aboutsummaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
authorNiall Sheridan <nsheridan@gmail.com>2016-12-28 21:18:36 +0000
committerNiall Sheridan <nsheridan@gmail.com>2016-12-28 21:18:36 +0000
commit73ef85bc5db590c22689e11be20737a3dd88168f (patch)
treefe393a6f0776bca1889b2113ab341a2922e25d10 /vendor
parent9e573e571fe878ed32947cae5a6d43cb5d72d3bb (diff)
Update dependencies
Diffstat (limited to 'vendor')
-rw-r--r--vendor/cloud.google.com/go/internal/retry.go55
-rw-r--r--vendor/cloud.google.com/go/storage/bucket.go3
-rw-r--r--vendor/cloud.google.com/go/storage/copy.go16
-rw-r--r--vendor/cloud.google.com/go/storage/doc.go2
-rw-r--r--vendor/cloud.google.com/go/storage/invoke.go19
-rw-r--r--vendor/cloud.google.com/go/storage/storage.go105
-rw-r--r--vendor/cloud.google.com/go/storage/writer.go6
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go6
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/client.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/config.go33
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go133
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go1996
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go66
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go369
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go301
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go334
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request.go125
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go21
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go9
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go1
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/session.go57
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go24
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go33
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/types.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/version.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go70
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json82
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go95
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go64
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go37
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/api.go7621
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go58
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/service.go17
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go5
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/api.go427
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/service.go17
-rw-r--r--vendor/github.com/go-ini/ini/README.md39
-rw-r--r--vendor/github.com/go-ini/ini/README_ZH.md47
-rw-r--r--vendor/github.com/go-ini/ini/ini.go38
-rw-r--r--vendor/github.com/go-ini/ini/parser.go39
-rw-r--r--vendor/github.com/go-ini/ini/section.go17
-rw-r--r--vendor/github.com/go-sql-driver/mysql/AUTHORS3
-rw-r--r--vendor/github.com/go-sql-driver/mysql/CHANGELOG.md7
-rw-r--r--vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md21
-rw-r--r--vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md9
-rw-r--r--vendor/github.com/go-sql-driver/mysql/README.md63
-rw-r--r--vendor/github.com/go-sql-driver/mysql/driver.go20
-rw-r--r--vendor/github.com/go-sql-driver/mysql/dsn.go18
-rw-r--r--vendor/github.com/go-sql-driver/mysql/errors.go3
-rw-r--r--vendor/github.com/go-sql-driver/mysql/infile.go3
-rw-r--r--vendor/github.com/go-sql-driver/mysql/packets.go111
-rw-r--r--vendor/github.com/go-sql-driver/mysql/statement.go5
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go11
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go1
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go6
-rw-r--r--vendor/github.com/google/go-github/github/activity_events.go12
-rw-r--r--vendor/github.com/google/go-github/github/activity_watching.go7
-rw-r--r--vendor/github.com/google/go-github/github/admin.go98
-rw-r--r--vendor/github.com/google/go-github/github/authorizations.go9
-rw-r--r--vendor/github.com/google/go-github/github/doc.go15
-rw-r--r--vendor/github.com/google/go-github/github/event_types.go65
-rw-r--r--vendor/github.com/google/go-github/github/github.go47
-rw-r--r--vendor/github.com/google/go-github/github/issues.go5
-rw-r--r--vendor/github.com/google/go-github/github/licenses.go31
-rw-r--r--vendor/github.com/google/go-github/github/messages.go8
-rw-r--r--vendor/github.com/google/go-github/github/projects.go417
-rw-r--r--vendor/github.com/google/go-github/github/pulls.go52
-rw-r--r--vendor/github.com/google/go-github/github/pulls_reviews.go19
-rw-r--r--vendor/github.com/google/go-github/github/repos.go130
-rw-r--r--vendor/github.com/google/go-github/github/repos_commits.go18
-rw-r--r--vendor/github.com/google/go-github/github/repos_contents.go8
-rw-r--r--vendor/github.com/google/go-github/github/repos_deployments.go21
-rw-r--r--vendor/github.com/google/go-github/github/repos_forks.go6
-rw-r--r--vendor/github.com/google/go-github/github/repos_pages.go29
-rw-r--r--vendor/github.com/google/go-github/github/repos_projects.go417
-rw-r--r--vendor/github.com/google/go-github/github/repos_stats.go29
-rw-r--r--vendor/github.com/googleapis/gax-go/README.md15
-rw-r--r--vendor/github.com/googleapis/gax-go/gax.go8
-rw-r--r--vendor/github.com/gorilla/csrf/README.md2
-rw-r--r--vendor/github.com/gorilla/csrf/csrf.go22
-rw-r--r--vendor/github.com/gorilla/handlers/recovery.go15
-rw-r--r--vendor/github.com/gorilla/sessions/README.md2
-rw-r--r--vendor/github.com/hashicorp/go-multierror/append.go8
-rw-r--r--vendor/github.com/hashicorp/go-multierror/format.go6
-rw-r--r--vendor/github.com/hashicorp/hcl/decoder.go14
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/parser.go5
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go8
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go19
-rw-r--r--vendor/github.com/hashicorp/vault/api/client.go9
-rw-r--r--vendor/github.com/hashicorp/vault/api/logical.go19
-rw-r--r--vendor/github.com/kr/fs/LICENSE27
-rw-r--r--vendor/github.com/kr/fs/Readme3
-rw-r--r--vendor/github.com/kr/fs/filesystem.go36
-rw-r--r--vendor/github.com/kr/fs/walk.go95
-rw-r--r--vendor/github.com/magiconair/properties/CHANGELOG.md4
-rw-r--r--vendor/github.com/magiconair/properties/properties.go6
-rw-r--r--vendor/github.com/mattn/go-sqlite3/README.md4
-rw-r--r--vendor/github.com/mattn/go-sqlite3/backup.go14
-rw-r--r--vendor/github.com/mattn/go-sqlite3/callback.go4
-rw-r--r--vendor/github.com/mattn/go-sqlite3/error.go10
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c8382
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h563
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3.go235
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go69
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_icu.go2
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go1
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_type.go57
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3ext.h8
-rw-r--r--vendor/github.com/mattn/go-sqlite3/tracecallback.go14
-rw-r--r--vendor/github.com/mattn/go-sqlite3/tracecallback_noimpl.go1
-rw-r--r--vendor/github.com/mitchellh/go-homedir/homedir.go4
-rw-r--r--vendor/github.com/mitchellh/mapstructure/mapstructure.go37
-rw-r--r--vendor/github.com/pelletier/go-toml/keysparsing.go11
-rw-r--r--vendor/github.com/pelletier/go-toml/lexer.go24
-rw-r--r--vendor/github.com/pelletier/go-toml/parser.go71
-rw-r--r--vendor/github.com/pelletier/go-toml/tomltree_conversions.go45
-rw-r--r--vendor/github.com/pkg/sftp/CONTRIBUTORS2
-rw-r--r--vendor/github.com/pkg/sftp/LICENSE9
-rw-r--r--vendor/github.com/pkg/sftp/README.md27
-rw-r--r--vendor/github.com/pkg/sftp/attrs.go237
-rw-r--r--vendor/github.com/pkg/sftp/attrs_stubs.go11
-rw-r--r--vendor/github.com/pkg/sftp/attrs_unix.go17
-rw-r--r--vendor/github.com/pkg/sftp/client.go1128
-rw-r--r--vendor/github.com/pkg/sftp/conn.go122
-rw-r--r--vendor/github.com/pkg/sftp/debug.go9
-rw-r--r--vendor/github.com/pkg/sftp/packet.go901
-rw-r--r--vendor/github.com/pkg/sftp/release.go5
-rw-r--r--vendor/github.com/pkg/sftp/server.go607
-rw-r--r--vendor/github.com/pkg/sftp/server_statvfs_darwin.go21
-rw-r--r--vendor/github.com/pkg/sftp/server_statvfs_impl.go25
-rw-r--r--vendor/github.com/pkg/sftp/server_statvfs_linux.go22
-rw-r--r--vendor/github.com/pkg/sftp/server_statvfs_stubs.go11
-rw-r--r--vendor/github.com/pkg/sftp/server_stubs.go12
-rw-r--r--vendor/github.com/pkg/sftp/server_unix.go143
-rw-r--r--vendor/github.com/pkg/sftp/sftp.go217
-rw-r--r--vendor/github.com/spf13/afero/README.md10
-rw-r--r--vendor/github.com/spf13/afero/afero.go2
-rw-r--r--vendor/github.com/spf13/afero/cacheOnReadFs.go3
-rw-r--r--vendor/github.com/spf13/afero/memmap.go38
-rw-r--r--vendor/github.com/spf13/afero/sftp.go128
-rw-r--r--vendor/github.com/spf13/afero/sftp/file.go95
-rw-r--r--vendor/github.com/spf13/afero/sftp_test_go286
-rw-r--r--vendor/github.com/spf13/cast/caste.go13
-rw-r--r--vendor/github.com/spf13/pflag/README.md4
-rw-r--r--vendor/github.com/spf13/viper/viper.go8
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go6
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions.go115
-rw-r--r--vendor/golang.org/x/crypto/ssh/keys.go25
-rw-r--r--vendor/golang.org/x/crypto/ssh/server.go2
-rw-r--r--vendor/golang.org/x/net/context/context.go102
-rw-r--r--vendor/golang.org/x/net/context/go18.go22
-rw-r--r--vendor/golang.org/x/net/context/pre_go18.go109
-rw-r--r--vendor/golang.org/x/net/http2/frame.go29
-rw-r--r--vendor/golang.org/x/net/http2/go18.go13
-rw-r--r--vendor/golang.org/x/net/http2/not_go18.go15
-rw-r--r--vendor/golang.org/x/net/http2/server.go271
-rw-r--r--vendor/golang.org/x/net/http2/transport.go86
-rw-r--r--vendor/golang.org/x/net/http2/write.go9
-rw-r--r--vendor/golang.org/x/net/http2/writesched.go31
-rw-r--r--vendor/golang.org/x/net/http2/writesched_priority.go10
-rw-r--r--vendor/golang.org/x/net/trace/trace.go2
-rw-r--r--vendor/golang.org/x/oauth2/google/appengine.go3
-rw-r--r--vendor/golang.org/x/oauth2/google/appengine_hook.go1
-rw-r--r--vendor/golang.org/x/oauth2/google/appenginevm_hook.go1
-rw-r--r--vendor/golang.org/x/oauth2/google/default.go113
-rw-r--r--vendor/golang.org/x/oauth2/google/google.go81
-rw-r--r--vendor/golang.org/x/oauth2/google/sdk.go12
-rw-r--r--vendor/golang.org/x/oauth2/internal/token.go2
-rw-r--r--vendor/golang.org/x/oauth2/jwt/jwt.go4
-rw-r--r--vendor/golang.org/x/sys/unix/asm_linux_mipsx.s31
-rw-r--r--vendor/golang.org/x/sys/unix/flock_linux_32bit.go2
-rwxr-xr-xvendor/golang.org/x/sys/unix/mkerrors.sh2
-rwxr-xr-xvendor/golang.org/x/sys/unix/mksysnum_linux.pl12
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_bsd.go18
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux.go44
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_amd64.go5
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go13
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go241
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_unix.go5
-rw-r--r--vendor/golang.org/x/sys/unix/syscall_unix_gc.go15
-rw-r--r--vendor/golang.org/x/sys/unix/types_linux.go6
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_386.go19
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go19
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm.go19
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go19
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mips.go1814
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go2020
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go19
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go19
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go19
-rw-r--r--vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go19
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_386.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go1829
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go1829
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go15
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go359
-rw-r--r--vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go359
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_386.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips.go642
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go642
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go8
-rw-r--r--vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go8
-rw-r--r--vendor/golang.org/x/text/internal/gen/code.go30
-rw-r--r--vendor/google.golang.org/api/gensupport/resumable.go32
-rw-r--r--vendor/google.golang.org/api/gensupport/retry.go8
-rw-r--r--vendor/google.golang.org/api/googleapi/transport/apikey.go38
-rw-r--r--vendor/google.golang.org/api/googleapi/types.go20
-rw-r--r--vendor/google.golang.org/api/internal/settings.go1
-rw-r--r--vendor/google.golang.org/api/oauth2/v2/oauth2-api.json8
-rw-r--r--vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go52
-rw-r--r--vendor/google.golang.org/api/option/option.go10
-rw-r--r--vendor/google.golang.org/api/storage/v1/storage-api.json9
-rw-r--r--vendor/google.golang.org/api/storage/v1/storage-gen.go460
-rw-r--r--vendor/google.golang.org/api/transport/dial.go10
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go3
-rw-r--r--vendor/google.golang.org/grpc/README.md25
-rw-r--r--vendor/google.golang.org/grpc/call.go69
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go86
-rwxr-xr-xvendor/google.golang.org/grpc/codegen.sh2
-rwxr-xr-xvendor/google.golang.org/grpc/coverage.sh3
-rw-r--r--vendor/google.golang.org/grpc/credentials/oauth/oauth.go2
-rw-r--r--vendor/google.golang.org/grpc/metadata/metadata.go2
-rw-r--r--vendor/google.golang.org/grpc/rpc_util.go74
-rw-r--r--vendor/google.golang.org/grpc/server.go154
-rw-r--r--vendor/google.golang.org/grpc/stats/handlers.go146
-rw-r--r--vendor/google.golang.org/grpc/stats/stats.go223
-rw-r--r--vendor/google.golang.org/grpc/stream.go146
-rw-r--r--vendor/google.golang.org/grpc/tap/tap.go54
-rw-r--r--vendor/google.golang.org/grpc/transport/control.go34
-rw-r--r--vendor/google.golang.org/grpc/transport/handler_server.go2
-rw-r--r--vendor/google.golang.org/grpc/transport/http2_client.go94
-rw-r--r--vendor/google.golang.org/grpc/transport/http2_server.go87
-rw-r--r--vendor/google.golang.org/grpc/transport/transport.go27
-rw-r--r--vendor/vendor.json652
257 files changed, 31941 insertions, 10134 deletions
diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go
new file mode 100644
index 0000000..79995be
--- /dev/null
+++ b/vendor/cloud.google.com/go/internal/retry.go
@@ -0,0 +1,55 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "fmt"
+ "time"
+
+ gax "github.com/googleapis/gax-go"
+
+ "golang.org/x/net/context"
+)
+
+// Retry calls the supplied function f repeatedly according to the provided
+// backoff parameters. It returns when one of the following occurs:
+// When f's first return value is true, Retry immediately returns with f's second
+// return value.
+// When the provided context is done, Retry returns with ctx.Err().
+func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
+ return retry(ctx, bo, f, gax.Sleep)
+}
+
+func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
+ sleep func(context.Context, time.Duration) error) error {
+ var lastErr error
+ for {
+ stop, err := f()
+ if stop {
+ return err
+ }
+ // Remember the last "real" error from f.
+ if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
+ lastErr = err
+ }
+ p := bo.Pause()
+ if cerr := sleep(ctx, p); cerr != nil {
+ if lastErr != nil {
+ return fmt.Errorf("%v; last function err: %v", cerr, lastErr)
+ }
+ return cerr
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go
index a2be0f4..f87fe33 100644
--- a/vendor/cloud.google.com/go/storage/bucket.go
+++ b/vendor/cloud.google.com/go/storage/bucket.go
@@ -246,6 +246,9 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
return err
})
if err != nil {
+ if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+ err = ErrBucketNotExist
+ }
return "", err
}
for _, item := range resp.Items {
diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go
index 6adb566..1f28455 100644
--- a/vendor/cloud.google.com/go/storage/copy.go
+++ b/vendor/cloud.google.com/go/storage/copy.go
@@ -110,6 +110,12 @@ func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
return nil, err
}
+ if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
+ return nil, err
+ }
+ if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
+ return nil, err
+ }
var res *raw.RewriteResponse
var err error
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
@@ -123,6 +129,10 @@ func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw
// ComposerFrom creates a Composer that can compose srcs into dst.
// You can immediately call Run on the returned Composer, or you can
// configure it first.
+//
+// The encryption key for the destination object will be used to decrypt all
+// source objects and encrypt the destination object. It is an error
+// to specify an encryption key for any of the source objects.
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
return &Composer{dst: dst, srcs: srcs}
}
@@ -158,6 +168,9 @@ func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
if src.bucket != c.dst.bucket {
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
}
+ if src.encryptionKey != nil {
+ return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
+ }
srcObj := &raw.ComposeRequestSourceObjects{
Name: src.object,
}
@@ -171,6 +184,9 @@ func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
+ if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
+ return nil, err
+ }
var obj *raw.Object
var err error
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go
index c23f2c8..cf6496b 100644
--- a/vendor/cloud.google.com/go/storage/doc.go
+++ b/vendor/cloud.google.com/go/storage/doc.go
@@ -23,7 +23,7 @@ All of the methods of this package use exponential backoff to retry calls
that fail with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff.
-Note: This package is experimental and may make backwards-incompatible changes.
+Note: This package is in beta. Some backwards-incompatible changes may occur.
Creating a Client
diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go
index 03b98f4..e8fc924 100644
--- a/vendor/cloud.google.com/go/storage/invoke.go
+++ b/vendor/cloud.google.com/go/storage/invoke.go
@@ -15,6 +15,7 @@
package storage
import (
+ "cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
@@ -23,24 +24,20 @@ import (
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
func runWithRetry(ctx context.Context, call func() error) error {
- var backoff gax.Backoff // use defaults for gax exponential backoff
- for {
- err := call()
+ return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
+ err = call()
if err == nil {
- return nil
+ return true, nil
}
e, ok := err.(*googleapi.Error)
if !ok {
- return err
+ return true, err
}
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
if e.Code == 429 || (e.Code >= 500 && e.Code < 600) {
- if err := gax.Sleep(ctx, backoff.Pause()); err != nil {
- return err
- }
- continue
+ return false, nil
}
- return err
- }
+ return true, err
+ })
}
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
index 9dccf5a..bafb136 100644
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ b/vendor/cloud.google.com/go/storage/storage.go
@@ -272,12 +272,13 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
// Use BucketHandle.Object to get a handle.
type ObjectHandle struct {
- c *Client
- bucket string
- object string
- acl ACLHandle
- gen int64 // a negative value indicates latest
- conds *Conditions
+ c *Client
+ bucket string
+ object string
+ acl ACLHandle
+ gen int64 // a negative value indicates latest
+ conds *Conditions
+ encryptionKey []byte // AES-256 key
}
// ACL provides access to the object's access control list.
@@ -309,6 +310,17 @@ func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
return &o2
}
+// Key returns a new ObjectHandle that uses the supplied encryption
+// key to encrypt and decrypt the object's contents.
+//
+// Encryption key must be a 32-byte AES-256 key.
+// See https://cloud.google.com/storage/docs/encryption for details.
+func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
+ o2 := *o
+ o2.encryptionKey = encryptionKey
+ return &o2
+}
+
// Attrs returns meta information about the object.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
@@ -319,6 +331,9 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
return nil, err
}
+ if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
+ return nil, err
+ }
var obj *raw.Object
var err error
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
@@ -389,6 +404,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
if err := applyConds("Update", o.gen, o.conds, call); err != nil {
return nil, err
}
+ if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
+ return nil, err
+ }
var obj *raw.Object
var err error
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
@@ -486,6 +504,9 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
} else if length > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
}
+ if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
+ return nil, err
+ }
var res *http.Response
err = runWithRetry(ctx, func() error { res, err = o.c.hc.Do(req); return err })
if err != nil {
@@ -616,7 +637,7 @@ func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
}
// toRawObject copies the editable attributes from o to the raw library's Object type.
-func (o ObjectAttrs) toRawObject(bucket string) *raw.Object {
+func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
acl := toRawObjectACL(o.ACL)
return &raw.Object{
Bucket: bucket,
@@ -716,6 +737,13 @@ type ObjectAttrs struct {
// metadata does not change this property. This field is read-only.
Updated time.Time
+ // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the
+ // customer-supplied encryption key for the object. It is empty if there is
+ // no customer-supplied encryption key.
+ // See // https://cloud.google.com/storage/docs/encryption for more about
+ // encryption in Google Cloud Storage.
+ CustomerKeySHA256 string
+
// Prefix is set only for ObjectAttrs which represent synthetic "directory
// entries" when iterating over buckets using Query.Delimiter. See
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
@@ -754,26 +782,31 @@ func newObject(o *raw.Object) *ObjectAttrs {
if err == nil && len(d) == 4 {
crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3])
}
+ var sha256 string
+ if o.CustomerEncryption != nil {
+ sha256 = o.CustomerEncryption.KeySha256
+ }
return &ObjectAttrs{
- Bucket: o.Bucket,
- Name: o.Name,
- ContentType: o.ContentType,
- ContentLanguage: o.ContentLanguage,
- CacheControl: o.CacheControl,
- ACL: acl,
- Owner: owner,
- ContentEncoding: o.ContentEncoding,
- Size: int64(o.Size),
- MD5: md5,
- CRC32C: crc32c,
- MediaLink: o.MediaLink,
- Metadata: o.Metadata,
- Generation: o.Generation,
- MetaGeneration: o.Metageneration,
- StorageClass: o.StorageClass,
- Created: convertTime(o.TimeCreated),
- Deleted: convertTime(o.TimeDeleted),
- Updated: convertTime(o.Updated),
+ Bucket: o.Bucket,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentLanguage: o.ContentLanguage,
+ CacheControl: o.CacheControl,
+ ACL: acl,
+ Owner: owner,
+ ContentEncoding: o.ContentEncoding,
+ Size: int64(o.Size),
+ MD5: md5,
+ CRC32C: crc32c,
+ MediaLink: o.MediaLink,
+ Metadata: o.Metadata,
+ Generation: o.Generation,
+ MetaGeneration: o.Metageneration,
+ StorageClass: o.StorageClass,
+ CustomerKeySHA256: sha256,
+ Created: convertTime(o.TimeCreated),
+ Deleted: convertTime(o.TimeDeleted),
+ Updated: convertTime(o.Updated),
}
}
@@ -1018,4 +1051,24 @@ func (c composeSourceObj) IfGenerationMatch(gen int64) {
}
}
+func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error {
+ if key == nil {
+ return nil
+ }
+ // TODO(jbd): Ask the API team to return a more user-friendly error
+ // and avoid doing this check at the client level.
+ if len(key) != 32 {
+ return errors.New("storage: not a 32-byte AES-256 key")
+ }
+ var cs string
+ if copySource {
+ cs = "copy-source-"
+ }
+ headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256")
+ headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key))
+ keyHash := sha256.Sum256(key)
+ headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:]))
+ return nil
+}
+
// TODO(jbd): Add storage.objects.watch.
diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go
index 8f98156..64d7543 100644
--- a/vendor/cloud.google.com/go/storage/writer.go
+++ b/vendor/cloud.google.com/go/storage/writer.go
@@ -85,7 +85,11 @@ func (w *Writer) open() error {
Media(pr, mediaOpts...).
Projection("full").
Context(w.ctx)
-
+ if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
+ w.err = err
+ pr.CloseWithError(w.err)
+ return
+ }
var resp *raw.Object
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
if err == nil {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
index fc38172..710eb43 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -61,6 +61,12 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, "<binary> len %d", v.Len())
+ break
+ }
+
nl, id, id2 := "", "", ""
if v.Len() > 3 {
nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
index 7c0e7d9..aeeada0 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -11,9 +11,11 @@ import (
// A Config provides configuration to a service client instance.
type Config struct {
- Config *aws.Config
- Handlers request.Handlers
- Endpoint, SigningRegion string
+ Config *aws.Config
+ Handlers request.Handlers
+ Endpoint string
+ SigningRegion string
+ SigningName string
}
// ConfigProvider provides a generic way for a service client to receive
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index 34c2bab..d58b812 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -5,6 +5,7 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
)
// UseServiceDefaultRetries instructs the config to use the service's own
@@ -48,6 +49,10 @@ type Config struct {
// endpoint for a client.
Endpoint *string
+ // The resolver to use for looking up endpoints for AWS service clients
+ // to use based on region.
+ EndpointResolver endpoints.Resolver
+
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
@@ -182,6 +187,19 @@ type Config struct {
// the delay of a request see the aws/client.DefaultRetryer and
// aws/request.Retryer.
SleepDelay func(time.Duration)
+
+ // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
+ // Will default to false. This would only be used for empty directory names in s3 requests.
+ //
+ // Example:
+ // sess, err := session.NewSession(&aws.Config{DisableRestProtocolURICleaning: aws.Bool(true))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("//foo//bar//moo"),
+ // })
+ DisableRestProtocolURICleaning *bool
}
// NewConfig returns a new Config pointer that can be chained with builder
@@ -222,6 +240,13 @@ func (c *Config) WithEndpoint(endpoint string) *Config {
return c
}
+// WithEndpointResolver sets a config EndpointResolver value returning a
+// Config pointer for chaining.
+func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
+ c.EndpointResolver = resolver
+ return c
+}
+
// WithRegion sets a config Region value returning a Config pointer for
// chaining.
func (c *Config) WithRegion(region string) *Config {
@@ -344,6 +369,10 @@ func mergeInConfig(dst *Config, other *Config) {
dst.Endpoint = other.Endpoint
}
+ if other.EndpointResolver != nil {
+ dst.EndpointResolver = other.EndpointResolver
+ }
+
if other.Region != nil {
dst.Region = other.Region
}
@@ -403,6 +432,10 @@ func mergeInConfig(dst *Config, other *Config) {
if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay
}
+
+ if other.DisableRestProtocolURICleaning != nil {
+ dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
+ }
}
// Copy will return a shallow copy of the Config object. If any additional
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
index 8e12f82..8a7bafc 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -71,7 +71,7 @@ var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// ValidateReqSigHandler is a request handler to ensure that the request's
// signature doesn't expire before it is sent. This can happen when a request
-// is built and signed signficantly before it is sent. Or signficant delays
+// is built and signed signficantly before it is sent. Or significant delays
// occur whne retrying requests that would cause the signature to expire.
var ValidateReqSigHandler = request.NamedHandler{
Name: "core.ValidateReqSigHandler",
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
index aa9d689..c397495 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -111,7 +111,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
}, nil
}
-// A ec2RoleCredRespBody provides the shape for unmarshalling credential
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
index 8dbbf67..0ef5504 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -19,8 +19,8 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/endpoints"
)
// A Defaults provides a collection of default values for SDK clients.
@@ -56,7 +56,8 @@ func Config() *aws.Config {
WithMaxRetries(aws.UseServiceDefaultRetries).
WithLogger(aws.NewDefaultLogger()).
WithLogLevel(aws.LogOff).
- WithSleepDelay(time.Sleep)
+ WithSleepDelay(time.Sleep).
+ WithEndpointResolver(endpoints.DefaultResolver())
}
// Handlers returns the default request handlers.
@@ -120,11 +121,14 @@ func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) cred
}
func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
- endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName,
- aws.StringValue(cfg.Region), true, false)
+ resolver := cfg.EndpointResolver
+ if resolver == nil {
+ resolver = endpoints.DefaultResolver()
+ }
+ e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
return &ec2rolecreds.EC2RoleProvider{
- Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion),
+ Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
ExpiryWindow: 5 * time.Minute,
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
index e5755d1..984407a 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -133,7 +133,7 @@ func (c *EC2Metadata) Available() bool {
return true
}
-// An EC2IAMInfo provides the shape for unmarshalling
+// An EC2IAMInfo provides the shape for unmarshaling
// an IAM info from the metadata API
type EC2IAMInfo struct {
Code string
@@ -142,7 +142,7 @@ type EC2IAMInfo struct {
InstanceProfileID string
}
-// An EC2InstanceIdentityDocument provides the shape for unmarshalling
+// An EC2InstanceIdentityDocument provides the shape for unmarshaling
// an instance identity document
type EC2InstanceIdentityDocument struct {
DevpayProductCodes []string `json:"devpayProductCodes"`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
new file mode 100644
index 0000000..74f72de
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
@@ -0,0 +1,133 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+type modelDefinition map[string]json.RawMessage
+
+// A DecodeModelOptions are the options for how the endpoints model definition
+// are decoded.
+type DecodeModelOptions struct {
+ SkipCustomizations bool
+}
+
+// Set combines all of the option functions together.
+func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// DecodeModel unmarshals a Regions and Endpoint model definition file into
+// a endpoint Resolver. If the file format is not supported, or an error occurs
+// when unmarshaling the model an error will be returned.
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+// resolver, err := endpoints.DecodeModel(reader)
+//
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
+ var opts DecodeModelOptions
+ opts.Set(optFns...)
+
+ // Get the version of the partition file to determine what
+ // unmarshaling model to use.
+ modelDef := modelDefinition{}
+ if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ var version string
+ if b, ok := modelDef["version"]; ok {
+ version = string(b)
+ } else {
+ return nil, newDecodeModelError("endpoints version not found in model", nil)
+ }
+
+ if version == "3" {
+ return decodeV3Endpoints(modelDef, opts)
+ }
+
+ return nil, newDecodeModelError(
+ fmt.Sprintf("endpoints version %s, not supported", version), nil)
+}
+
+func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
+ b, ok := modelDef["partitions"]
+ if !ok {
+ return nil, newDecodeModelError("endpoints model missing partitions", nil)
+ }
+
+ ps := partitions{}
+ if err := json.Unmarshal(b, &ps); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ if opts.SkipCustomizations {
+ return ps, nil
+ }
+
+ // Customization
+ for i := 0; i < len(ps); i++ {
+ p := &ps[i]
+ custAddEC2Metadata(p)
+ custAddS3DualStack(p)
+ custRmIotDataService(p)
+ }
+
+ return ps, nil
+}
+
+func custAddS3DualStack(p *partition) {
+ if p.ID != "aws" {
+ return
+ }
+
+ s, ok := p.Services["s3"]
+ if !ok {
+ return
+ }
+
+ s.Defaults.HasDualStack = boxedTrue
+ s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
+
+ p.Services["s3"] = s
+}
+
+func custAddEC2Metadata(p *partition) {
+ p.Services["ec2metadata"] = service{
+ IsRegionalized: boxedFalse,
+ PartitionEndpoint: "aws-global",
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ }
+}
+
+func custRmIotDataService(p *partition) {
+ delete(p.Services, "data.iot")
+}
+
+type decodeModelError struct {
+ awsError
+}
+
+func newDecodeModelError(msg string, err error) decodeModelError {
+ return decodeModelError{
+ awsError: awserr.New("DecodeEndpointsModelError", msg, err),
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
new file mode 100644
index 0000000..987d928
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -0,0 +1,1996 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+// Partition identifiers
+const (
+ AwsPartitionID = "aws" // AWS Standard partition.
+ AwsCnPartitionID = "aws-cn" // AWS China partition.
+ AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
+)
+
+// AWS Standard partition's regions.
+const (
+ ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
+ ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+ ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
+ ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
+ ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
+ CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
+ EuWest1RegionID = "eu-west-1" // EU (Ireland).
+ EuWest2RegionID = "eu-west-2" // EU (London).
+ SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
+ UsEast1RegionID = "us-east-1" // US East (N. Virginia).
+ UsEast2RegionID = "us-east-2" // US East (Ohio).
+ UsWest1RegionID = "us-west-1" // US West (N. California).
+ UsWest2RegionID = "us-west-2" // US West (Oregon).
+)
+
+// AWS China partition's regions.
+const (
+ CnNorth1RegionID = "cn-north-1" // China (Beijing).
+)
+
+// AWS GovCloud (US) partition's regions.
+const (
+ UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
+)
+
+// Service identifiers
+const (
+ AcmServiceID = "acm" // Acm.
+ ApigatewayServiceID = "apigateway" // Apigateway.
+ ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
+ AppstreamServiceID = "appstream" // Appstream.
+ Appstream2ServiceID = "appstream2" // Appstream2.
+ AutoscalingServiceID = "autoscaling" // Autoscaling.
+ BudgetsServiceID = "budgets" // Budgets.
+ CloudformationServiceID = "cloudformation" // Cloudformation.
+ CloudfrontServiceID = "cloudfront" // Cloudfront.
+ CloudhsmServiceID = "cloudhsm" // Cloudhsm.
+ CloudsearchServiceID = "cloudsearch" // Cloudsearch.
+ CloudtrailServiceID = "cloudtrail" // Cloudtrail.
+ CodebuildServiceID = "codebuild" // Codebuild.
+ CodecommitServiceID = "codecommit" // Codecommit.
+ CodedeployServiceID = "codedeploy" // Codedeploy.
+ CodepipelineServiceID = "codepipeline" // Codepipeline.
+ CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
+ CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
+ CognitoSyncServiceID = "cognito-sync" // CognitoSync.
+ ConfigServiceID = "config" // Config.
+ DatapipelineServiceID = "datapipeline" // Datapipeline.
+ DevicefarmServiceID = "devicefarm" // Devicefarm.
+ DirectconnectServiceID = "directconnect" // Directconnect.
+ DiscoveryServiceID = "discovery" // Discovery.
+ DmsServiceID = "dms" // Dms.
+ DsServiceID = "ds" // Ds.
+ DynamodbServiceID = "dynamodb" // Dynamodb.
+ Ec2ServiceID = "ec2" // Ec2.
+ Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
+ EcrServiceID = "ecr" // Ecr.
+ EcsServiceID = "ecs" // Ecs.
+ ElasticacheServiceID = "elasticache" // Elasticache.
+ ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
+ ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
+ ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
+ ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
+ ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
+ EmailServiceID = "email" // Email.
+ EsServiceID = "es" // Es.
+ EventsServiceID = "events" // Events.
+ FirehoseServiceID = "firehose" // Firehose.
+ GameliftServiceID = "gamelift" // Gamelift.
+ GlacierServiceID = "glacier" // Glacier.
+ HealthServiceID = "health" // Health.
+ IamServiceID = "iam" // Iam.
+ ImportexportServiceID = "importexport" // Importexport.
+ InspectorServiceID = "inspector" // Inspector.
+ IotServiceID = "iot" // Iot.
+ KinesisServiceID = "kinesis" // Kinesis.
+ KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
+ KmsServiceID = "kms" // Kms.
+ LambdaServiceID = "lambda" // Lambda.
+ LightsailServiceID = "lightsail" // Lightsail.
+ LogsServiceID = "logs" // Logs.
+ MachinelearningServiceID = "machinelearning" // Machinelearning.
+ MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
+ MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
+ MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
+ MonitoringServiceID = "monitoring" // Monitoring.
+ OpsworksServiceID = "opsworks" // Opsworks.
+ OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
+ PinpointServiceID = "pinpoint" // Pinpoint.
+ PollyServiceID = "polly" // Polly.
+ RdsServiceID = "rds" // Rds.
+ RedshiftServiceID = "redshift" // Redshift.
+ RekognitionServiceID = "rekognition" // Rekognition.
+ Route53ServiceID = "route53" // Route53.
+ Route53domainsServiceID = "route53domains" // Route53domains.
+ S3ServiceID = "s3" // S3.
+ SdbServiceID = "sdb" // Sdb.
+ ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
+ ShieldServiceID = "shield" // Shield.
+ SmsServiceID = "sms" // Sms.
+ SnowballServiceID = "snowball" // Snowball.
+ SnsServiceID = "sns" // Sns.
+ SqsServiceID = "sqs" // Sqs.
+ SsmServiceID = "ssm" // Ssm.
+ StatesServiceID = "states" // States.
+ StoragegatewayServiceID = "storagegateway" // Storagegateway.
+ StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
+ StsServiceID = "sts" // Sts.
+ SupportServiceID = "support" // Support.
+ SwfServiceID = "swf" // Swf.
+ WafServiceID = "waf" // Waf.
+ WafRegionalServiceID = "waf-regional" // WafRegional.
+ WorkspacesServiceID = "workspaces" // Workspaces.
+ XrayServiceID = "xray" // Xray.
+)
+
+// DefaultResolver returns an Endpoint resolver that will be able
+// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+// resolver := endpoints.DefaultResolver()
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DefaultResolver() Resolver {
+ return defaultPartitions
+}
+
+var defaultPartitions = partitions{
+ awsPartition,
+ awscnPartition,
+ awsusgovPartition,
+}
+
+// AwsPartition returns the Resolver for AWS Standard.
+func AwsPartition() Partition {
+ return awsPartition.Partition()
+}
+
+var awsPartition = partition{
+ ID: "aws",
+ Name: "AWS Standard",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "ap-northeast-1": region{
+ Description: "Asia Pacific (Tokyo)",
+ },
+ "ap-northeast-2": region{
+ Description: "Asia Pacific (Seoul)",
+ },
+ "ap-south-1": region{
+ Description: "Asia Pacific (Mumbai)",
+ },
+ "ap-southeast-1": region{
+ Description: "Asia Pacific (Singapore)",
+ },
+ "ap-southeast-2": region{
+ Description: "Asia Pacific (Sydney)",
+ },
+ "ca-central-1": region{
+ Description: "Canada (Central)",
+ },
+ "eu-central-1": region{
+ Description: "EU (Frankfurt)",
+ },
+ "eu-west-1": region{
+ Description: "EU (Ireland)",
+ },
+ "eu-west-2": region{
+ Description: "EU (London)",
+ },
+ "sa-east-1": region{
+ Description: "South America (Sao Paulo)",
+ },
+ "us-east-1": region{
+ Description: "US East (N. Virginia)",
+ },
+ "us-east-2": region{
+ Description: "US East (Ohio)",
+ },
+ "us-west-1": region{
+ Description: "US West (N. California)",
+ },
+ "us-west-2": region{
+ Description: "US West (Oregon)",
+ },
+ },
+ Services: services{
+ "acm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appstream": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "appstream2": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "budgets.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "cloudfront.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudsearch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codepipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-idp": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-sync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "datapipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "devicefarm": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "discovery": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecr": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.{service}.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elastictranscoder": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "email": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "iam.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "importexport": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "importexport.amazonaws.com",
+ SignatureVersions: []string{"v2", "v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ Service: "IngestionService",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lightsail": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "machinelearning": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "marketplacecommerceanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "opsworks": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "opsworks-cm": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "route53.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "route53domains": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "s3": service{
+ PartitionEndpoint: "us-east-1",
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "s3-ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{
+ Hostname: "s3-ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3-ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{
+ Hostname: "s3-eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "eu-west-2": endpoint{},
+ "s3-external-1": endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3-sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{
+ Hostname: "s3-us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3-us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ "sdb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v2"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ Hostname: "sdb.amazonaws.com",
+ },
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "shield": service{
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "Shield.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "queue.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "http", "https", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sts": service{
+ PartitionEndpoint: "aws-global",
+ Defaults: endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{
+ Hostname: "sts.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "aws-global": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "support": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "waf": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "waf.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "xray": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsCnPartition returns the Resolver for AWS China.
+func AwsCnPartition() Partition {
+ return awscnPartition.Partition()
+}
+
+var awscnPartition = partition{
+ ID: "aws-cn",
+ Name: "AWS China",
+ DNSSuffix: "amazonaws.com.cn",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "cn-north-1": region{
+ Description: "China (Beijing)",
+ },
+ },
+ Services: services{
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "iam.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "http", "https", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
+func AwsUsGovPartition() Partition {
+ return awsusgovPartition.Partition()
+}
+
+var awsusgovPartition = partition{
+ ID: "aws-us-gov",
+ Name: "AWS GovCloud (US)",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "us-gov-west-1": region{
+ Description: "AWS GovCloud (US)",
+ },
+ },
+ Services: services{
+ "autoscaling": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ec2": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "glacier": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3-us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "sns": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sqs": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
new file mode 100644
index 0000000..a0e9bc4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
@@ -0,0 +1,66 @@
+// Package endpoints provides the types and functionality for defining regions
+// and endpoints, as well as querying those definitions.
+//
+// The SDK's Regions and Endpoints metadata is code generated into the endpoints
+// package, and is accessible via the DefaultResolver function. This function
+// returns a endpoint Resolver will search the metadata and build an associated
+// endpoint if one is found. The default resolver will search all partitions
+// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
+// AWS GovCloud (US) (aws-us-gov).
+// .
+//
+// Enumerating Regions and Endpoint Metadata
+//
+// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
+// will allow you to get access to the list of underlying Partitions with the
+// Partitions method. This is helpful if you want to limit the SDK's endpoint
+// resolving to a single partition, or enumerate regions, services, and endpoints
+// in the partition.
+//
+// resolver := endpoints.DefaultResolver()
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+//
+// for _, p := range partitions {
+// fmt.Println("Regions for", p.Name)
+// for id, _ := range p.Regions() {
+// fmt.Println("*", id)
+// }
+//
+// fmt.Println("Services for", p.Name)
+// for id, _ := range p.Services() {
+// fmt.Println("*", id)
+// }
+// }
+//
+// Using Custom Endpoints
+//
+// The endpoints package also gives you the ability to use your own logic how
+// endpoints are resolved. This is a great way to define a custom endpoint
+// for select services, without passing that logic down through your code.
+//
+// If a type implements the Resolver interface it can be used to resolve
+// endpoints. To use this with the SDK's Session and Config set the value
+// of the type to the EndpointsResolver field of aws.Config when initializing
+// the session, or service client.
+//
+// In addition the ResolverFunc is a wrapper for a func matching the signature
+// of Resolver.EndpointFor, converting it to a type that satisfies the
+// Resolver interface.
+//
+//
+// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+// if service == endpoints.S3ServiceID {
+// return endpoints.ResolvedEndpoint{
+// URL: "s3.custom.endpoint.com",
+// SigningRegion: "custom-signing-region",
+// }, nil
+// }
+//
+// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
+// }
+//
+// sess := session.Must(session.NewSession(&aws.Config{
+// Region: aws.String("us-west-2"),
+// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
+// }))
+package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
new file mode 100644
index 0000000..3adec13
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -0,0 +1,369 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Options provide the configuration needed to direct how the
+// endpoints will be resolved.
+type Options struct {
+ // DisableSSL forces the endpoint to be resolved as HTTP.
+ // instead of HTTPS if the service supports it.
+ DisableSSL bool
+
+ // Sets the resolver to resolve the endpoint as a dualstack endpoint
+ // for the service. If dualstack support for a service is not known and
+ // StrictMatching is not enabled a dualstack endpoint for the service will
+ // be returned. This endpoint may not be valid. If StrictMatching is
+ // enabled only services that are known to support dualstack will return
+ // dualstack endpoints.
+ UseDualStack bool
+
+ // Enables strict matching of services and regions resolved endpoints.
+ // If the partition doesn't enumerate the exact service and region an
+ // error will be returned. This option will prevent returning endpoints
+ // that look valid, but may not resolve to any real endpoint.
+ StrictMatching bool
+}
+
+// Set combines all of the option functions together.
+func (o *Options) Set(optFns ...func(*Options)) {
+ for _, fn := range optFns {
+ fn(o)
+ }
+}
+
+// DisableSSLOption sets the DisableSSL options. Can be used as a functional
+// option when resolving endpoints.
+func DisableSSLOption(o *Options) {
+ o.DisableSSL = true
+}
+
+// UseDualStackOption sets the UseDualStack option. Can be used as a functional
+// option when resolving endpoints.
+func UseDualStackOption(o *Options) {
+ o.UseDualStack = true
+}
+
+// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
+// option when resolving endpoints.
+func StrictMatchingOption(o *Options) {
+ o.StrictMatching = true
+}
+
+// A Resolver provides the interface for functionality to resolve endpoints.
+// The build in Partition and DefaultResolver return value satisfy this interface.
+type Resolver interface {
+ EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+}
+
+// ResolverFunc is a helper utility that wraps a function so it satisfies the
+// Resolver interface. This is useful when you want to add additional endpoint
+// resolving logic, or stub out specific endpoints with custom values.
+type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+
+// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
+func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return fn(service, region, opts...)
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
+//
+// If disableSSL is set, it will only set the URL's scheme if the URL does not
+// contain a scheme.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
+
+// EnumPartitions a provides a way to retrieve the underlying partitions that
+// make up the SDK's default Resolver, or any resolver decoded from a model
+// file.
+//
+// Use this interface with DefaultResolver and DecodeModels to get the list of
+// Partitions.
+type EnumPartitions interface {
+ Partitions() []Partition
+}
+
+// A Partition provides the ability to enumerate the partition's regions
+// and services.
+type Partition struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier of the partition.
+func (p *Partition) ID() string { return p.id }
+
+// EndpointFor attempts to resolve the endpoint based on service and region.
+// See Options for information on configuring how the endpoint is resolved.
+//
+// If the service cannot be found in the metadata the UnknownServiceError
+// error will be returned. This validation will occur regardless if
+// StrictMatching is enabled.
+//
+// When resolving endpoints you can choose to enable StrictMatching. This will
+// require the provided service and region to be known by the partition.
+// If the endpoint cannot be strictly resolved an error will be returned. This
+// mode is useful to ensure the endpoint resolved is valid. Without
+// StrictMatching enabled the enpoint returned my look valid but may not work.
+// StrictMatching requires the SDK to be updated if you want to take advantage
+// of new regions and services expantions.
+//
+// Errors that can be returned.
+// * UnknownServiceError
+// * UnknownEndpointError
+func (p *Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return p.p.EndpointFor(service, region, opts...)
+}
+
+// Regions returns a map of Regions indexed by their ID. This is useful for
+// enumerating over the regions in a partition.
+func (p *Partition) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range p.p.Regions {
+ rs[id] = Region{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return rs
+}
+
+// Services returns a map of Service indexed by their ID. This is useful for
+// enumerating over the services in a partition.
+func (p *Partition) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id := range p.p.Services {
+ ss[id] = Service{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return ss
+}
+
+// A Region provides information about a region, and ability to resolve an
+// endpoint from the context of a region, given a service.
+type Region struct {
+ id, desc string
+ p *partition
+}
+
+// ID returns the region's identifier.
+func (r *Region) ID() string { return r.id }
+
+// ResolveEndpoint resolves an endpoint from the context of the region given
+// a service. See Partition.EndpointFor for usage and errors that can be returned.
+func (r *Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return r.p.EndpointFor(service, r.id, opts...)
+}
+
+// Services returns a list of all services that are known to be in this region.
+func (r *Region) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id, s := range r.p.Services {
+ if _, ok := s.Endpoints[r.id]; ok {
+ ss[id] = Service{
+ id: id,
+ p: r.p,
+ }
+ }
+ }
+
+ return ss
+}
+
+// A Service provides information about a service, and ability to resolve an
+// endpoint from the context of a service, given a region.
+type Service struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier for the service.
+func (s *Service) ID() string { return s.id }
+
+// ResolveEndpoint resolves an endpoint from the context of a service given
+// a region. See Partition.EndpointFor for usage and errors that can be returned.
+func (s *Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return s.p.EndpointFor(s.id, region, opts...)
+}
+
+// Endpoints returns a map of Endpoints indexed by their ID for all known
+// endpoints for a service.
+func (s *Service) Endpoints() map[string]Endpoint {
+ es := map[string]Endpoint{}
+ for id := range s.p.Services[s.id].Endpoints {
+ es[id] = Endpoint{
+ id: id,
+ serviceID: s.id,
+ p: s.p,
+ }
+ }
+
+ return es
+}
+
+// A Endpoint provides information about endpoints, and provides the ability
+// to resolve that endpoint for the service, and the region the endpoint
+// represents.
+type Endpoint struct {
+ id string
+ serviceID string
+ p *partition
+}
+
+// ID returns the identifier for an endpoint.
+func (e *Endpoint) ID() string { return e.id }
+
+// ServiceID returns the identifier the endpoint belongs to.
+func (e *Endpoint) ServiceID() string { return e.serviceID }
+
+// ResolveEndpoint resolves an endpoint from the context of a service and
+// region the endpoint represents. See Partition.EndpointFor for usage and
+// errors that can be returned.
+func (e *Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return e.p.EndpointFor(e.serviceID, e.id, opts...)
+}
+
+// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
+// service, and region.
+type ResolvedEndpoint struct {
+ // The endpoint URL
+ URL string
+
+ // The region that should be used for signing requests.
+ SigningRegion string
+
+ // The service name that should be used for signing requests.
+ SigningName string
+
+ // The signing method that should be used for signing requests.
+ SigningMethod string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A EndpointNotFoundError is returned when in StrictMatching mode, and the
+// endpoint for the service and region cannot be found in any of the partitions.
+type EndpointNotFoundError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+}
+
+//// NewEndpointNotFoundError builds and returns NewEndpointNotFoundError.
+//func NewEndpointNotFoundError(p, s, r string) EndpointNotFoundError {
+// return EndpointNotFoundError{
+// awsError: awserr.New("EndpointNotFoundError", "unable to find endpoint", nil),
+// Partition: p,
+// Service: s,
+// Region: r,
+// }
+//}
+//
+//// Error returns string representation of the error.
+//func (e EndpointNotFoundError) Error() string {
+// extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+// e.Partition, e.Service, e.Region)
+// return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+//}
+//
+//// String returns the string representation of the error.
+//func (e EndpointNotFoundError) String() string {
+// return e.Error()
+//}
+
+// A UnknownServiceError is returned when the service does not resolve to an
+// endpoint. Includes a list of all known services for the partition. Returned
+// when a partition does not support the service.
+type UnknownServiceError struct {
+ awsError
+ Partition string
+ Service string
+ Known []string
+}
+
+// NewUnknownServiceError builds and returns UnknownServiceError.
+func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
+ return UnknownServiceError{
+ awsError: awserr.New("UnknownServiceError",
+ "could not resolve endpoint for unknown service", nil),
+ Partition: p,
+ Service: s,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q",
+ e.Partition, e.Service)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) String() string {
+ return e.Error()
+}
+
+// A UnknownEndpointError is returned when in StrictMatching mode and the
+// service is valid, but the region does not resolve to an endpoint. Includes
+// a list of all known endpoints for the service.
+type UnknownEndpointError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+ Known []string
+}
+
+// NewUnknownEndpointError builds and returns UnknownEndpointError.
+func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
+ return UnknownEndpointError{
+ awsError: awserr.New("UnknownEndpointError",
+ "could not resolve endpoint", nil),
+ Partition: p,
+ Service: s,
+ Region: r,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+ e.Partition, e.Service, e.Region)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) String() string {
+ return e.Error()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
new file mode 100644
index 0000000..6522ce9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -0,0 +1,301 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type partitions []partition
+
+func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ var opt Options
+ opt.Set(opts...)
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
+ continue
+ }
+
+ return ps[i].EndpointFor(service, region, opts...)
+ }
+
+ // If loose matching fallback to first partition format to use
+ // when resolving the endpoint.
+ if !opt.StrictMatching && len(ps) > 0 {
+ return ps[0].EndpointFor(service, region, opts...)
+ }
+
+ return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
+}
+
+// Partitions satisfies the EnumPartitions interface and returns a list
+// of Partitions representing each partition represented in the SDK's
+// endpoints model.
+func (ps partitions) Partitions() []Partition {
+ parts := make([]Partition, 0, len(ps))
+ for i := 0; i < len(ps); i++ {
+ parts = append(parts, ps[i].Partition())
+ }
+
+ return parts
+}
+
+type partition struct {
+ ID string `json:"partition"`
+ Name string `json:"partitionName"`
+ DNSSuffix string `json:"dnsSuffix"`
+ RegionRegex regionRegex `json:"regionRegex"`
+ Defaults endpoint `json:"defaults"`
+ Regions regions `json:"regions"`
+ Services services `json:"services"`
+}
+
+func (p partition) Partition() Partition {
+ return Partition{
+ id: p.ID,
+ p: &p,
+ }
+}
+
+func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
+ s, hasService := p.Services[service]
+ _, hasEndpoint := s.Endpoints[region]
+
+ if hasEndpoint && hasService {
+ return true
+ }
+
+ if strictMatch {
+ return false
+ }
+
+ return p.RegionRegex.MatchString(region)
+}
+
+func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
+ var opt Options
+ opt.Set(opts...)
+
+ s, hasService := p.Services[service]
+ if !hasService {
+ return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
+ }
+
+ e, hasEndpoint := s.endpointForRegion(region)
+ if !hasEndpoint && opt.StrictMatching {
+ return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
+ }
+
+ defs := []endpoint{p.Defaults, s.Defaults}
+ return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
+}
+
+func serviceList(ss services) []string {
+ list := make([]string, 0, len(ss))
+ for k := range ss {
+ list = append(list, k)
+ }
+ return list
+}
+func endpointList(es endpoints) []string {
+ list := make([]string, 0, len(es))
+ for k := range es {
+ list = append(list, k)
+ }
+ return list
+}
+
+type regionRegex struct {
+ *regexp.Regexp
+}
+
+func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
+ // Strip leading and trailing quotes
+ regex, err := strconv.Unquote(string(b))
+ if err != nil {
+ return fmt.Errorf("unable to strip quotes from regex, %v", err)
+ }
+
+ rr.Regexp, err = regexp.Compile(regex)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal region regex, %v", err)
+ }
+ return nil
+}
+
+type regions map[string]region
+
+type region struct {
+ Description string `json:"description"`
+}
+
+type services map[string]service
+
+type service struct {
+ PartitionEndpoint string `json:"partitionEndpoint"`
+ IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
+ Defaults endpoint `json:"defaults"`
+ Endpoints endpoints `json:"endpoints"`
+}
+
+func (s *service) endpointForRegion(region string) (endpoint, bool) {
+ if s.IsRegionalized == boxedFalse {
+ return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
+ }
+
+ if e, ok := s.Endpoints[region]; ok {
+ return e, true
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return endpoint{}, false
+}
+
+type endpoints map[string]endpoint
+
+type endpoint struct {
+ Hostname string `json:"hostname"`
+ Protocols []string `json:"protocols"`
+ CredentialScope credentialScope `json:"credentialScope"`
+
+ // Custom fields not modeled
+ HasDualStack boxedBool `json:"-"`
+ DualStackHostname string `json:"-"`
+
+ // Signature Version not used
+ SignatureVersions []string `json:"signatureVersions"`
+
+ // SSLCommonName not used.
+ SSLCommonName string `json:"sslCommonName"`
+}
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4", "v2"}
+)
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
+ var merged endpoint
+ for _, def := range defs {
+ merged.mergeIn(def)
+ }
+ merged.mergeIn(e)
+ e = merged
+
+ hostname := e.Hostname
+
+ // Offset the hostname for dualstack if enabled
+ if opts.UseDualStack && e.HasDualStack == boxedTrue {
+ hostname = e.DualStackHostname
+ }
+
+ u := strings.Replace(hostname, "{service}", service, 1)
+ u = strings.Replace(u, "{region}", region, 1)
+ u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
+
+ scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
+ u = fmt.Sprintf("%s://%s", scheme, u)
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+ signingName := e.CredentialScope.Service
+ if len(signingName) == 0 {
+ signingName = service
+ }
+
+ return ResolvedEndpoint{
+ URL: u,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func getEndpointScheme(protocols []string, disableSSL bool) string {
+ if disableSSL {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func (e *endpoint) mergeIn(other endpoint) {
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SSLCommonName) > 0 {
+ e.SSLCommonName = other.SSLCommonName
+ }
+ if other.HasDualStack != boxedBoolUnset {
+ e.HasDualStack = other.HasDualStack
+ }
+ if len(other.DualStackHostname) > 0 {
+ e.DualStackHostname = other.DualStackHostname
+ }
+}
+
+type credentialScope struct {
+ Region string `json:"region"`
+ Service string `json:"service"`
+}
+
+type boxedBool int
+
+func (b *boxedBool) UnmarshalJSON(buf []byte) error {
+ v, err := strconv.ParseBool(string(buf))
+ if err != nil {
+ return err
+ }
+
+ if v {
+ *b = boxedTrue
+ } else {
+ *b = boxedFalse
+ }
+
+ return nil
+}
+
+const (
+ boxedBoolUnset boxedBool = iota
+ boxedFalse
+ boxedTrue
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
new file mode 100644
index 0000000..1e7369d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -0,0 +1,334 @@
+// +build codegen
+
+package endpoints
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+// A CodeGenOptions are the options for code generating the endpoints into
+// Go code from the endpoints model definition.
+type CodeGenOptions struct {
+ // Options for how the model will be decoded.
+ DecodeModelOptions DecodeModelOptions
+}
+
+// Set combines all of the option functions together
+func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// CodeGenModel given a endpoints model file will decode it and attempt to
+// generate Go code from the model definition. Error will be returned if
+// the code is unable to be generated, or decoded.
+func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
+ var opts CodeGenOptions
+ opts.Set(optFns...)
+
+ resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
+ *d = opts.DecodeModelOptions
+ })
+ if err != nil {
+ return err
+ }
+
+ tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
+ if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil {
+ return fmt.Errorf("failed to execute template, %v", err)
+ }
+
+ return nil
+}
+
+func toSymbol(v string) string {
+ out := []rune{}
+ for _, c := range strings.Title(v) {
+ if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
+ continue
+ }
+
+ out = append(out, c)
+ }
+
+ return string(out)
+}
+
+func quoteString(v string) string {
+ return fmt.Sprintf("%q", v)
+}
+
+func regionConstName(p, r string) string {
+ return toSymbol(p) + toSymbol(r)
+}
+
+func partitionGetter(id string) string {
+ return fmt.Sprintf("%sPartition", toSymbol(id))
+}
+
+func partitionVarName(id string) string {
+ return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
+}
+
+func listPartitionNames(ps partitions) string {
+ names := []string{}
+ switch len(ps) {
+ case 1:
+ return ps[0].Name
+ case 2:
+ return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
+ default:
+ for i, p := range ps {
+ if i == len(ps)-1 {
+ names = append(names, "and "+p.Name)
+ } else {
+ names = append(names, p.Name)
+ }
+ }
+ return strings.Join(names, ", ")
+ }
+}
+
+func boxedBoolIfSet(msg string, v boxedBool) string {
+ switch v {
+ case boxedTrue:
+ return fmt.Sprintf(msg, "boxedTrue")
+ case boxedFalse:
+ return fmt.Sprintf(msg, "boxedFalse")
+ default:
+ return ""
+ }
+}
+
+func stringIfSet(msg, v string) string {
+ if len(v) == 0 {
+ return ""
+ }
+
+ return fmt.Sprintf(msg, v)
+}
+
+func stringSliceIfSet(msg string, vs []string) string {
+ if len(vs) == 0 {
+ return ""
+ }
+
+ names := []string{}
+ for _, v := range vs {
+ names = append(names, `"`+v+`"`)
+ }
+
+ return fmt.Sprintf(msg, strings.Join(names, ","))
+}
+
+func endpointIsSet(v endpoint) bool {
+ return !reflect.DeepEqual(v, endpoint{})
+}
+
+func serviceSet(ps partitions) map[string]struct{} {
+ set := map[string]struct{}{}
+ for _, p := range ps {
+ for id := range p.Services {
+ set[id] = struct{}{}
+ }
+ }
+
+ return set
+}
+
+var funcMap = template.FuncMap{
+ "ToSymbol": toSymbol,
+ "QuoteString": quoteString,
+ "RegionConst": regionConstName,
+ "PartitionGetter": partitionGetter,
+ "PartitionVarName": partitionVarName,
+ "ListPartitionNames": listPartitionNames,
+ "BoxedBoolIfSet": boxedBoolIfSet,
+ "StringIfSet": stringIfSet,
+ "StringSliceIfSet": stringSliceIfSet,
+ "EndpointIsSet": endpointIsSet,
+ "ServicesSet": serviceSet,
+}
+
+const v3Tmpl = `
+{{ define "defaults" -}}
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+ {{ template "partition consts" . }}
+
+ {{ range $_, $partition := . }}
+ {{ template "partition region consts" $partition }}
+ {{ end }}
+
+ {{ template "service consts" . }}
+
+ {{ template "endpoint resolvers" . }}
+{{- end }}
+
+{{ define "partition consts" }}
+ // Partition identifiers
+ const (
+ {{ range $_, $p := . -}}
+ {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "partition region consts" }}
+ // {{ .Name }} partition's regions.
+ const (
+ {{ range $id, $region := .Regions -}}
+ {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "service consts" }}
+ // Service identifiers
+ const (
+ {{ $serviceSet := ServicesSet . -}}
+ {{ range $id, $_ := $serviceSet -}}
+ {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "endpoint resolvers" }}
+ // DefaultResolver returns an Endpoint resolver that will be able
+ // to resolve endpoints for: {{ ListPartitionNames . }}.
+ //
+ // Casting the return value of this func to a EnumPartitions will
+ // allow you to get a list of the partitions in the order the endpoints
+ // will be resolved in.
+ //
+ // resolver := endpoints.DefaultResolver()
+ // partitions := resolver.(endpoints.EnumPartitions).Partitions()
+ // for _, p := range partitions {
+ // // ... inspect partitions
+ // }
+ func DefaultResolver() Resolver {
+ return defaultPartitions
+ }
+
+ var defaultPartitions = partitions{
+ {{ range $_, $partition := . -}}
+ {{ PartitionVarName $partition.ID }},
+ {{ end }}
+ }
+
+ {{ range $_, $partition := . -}}
+ {{ $name := PartitionGetter $partition.ID -}}
+ // {{ $name }} returns the Resolver for {{ $partition.Name }}.
+ func {{ $name }}() Partition {
+ return {{ PartitionVarName $partition.ID }}.Partition()
+ }
+ var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
+ {{ end }}
+{{ end }}
+
+{{ define "default partitions" }}
+ func DefaultPartitions() []Partition {
+ return []partition{
+ {{ range $_, $partition := . -}}
+ // {{ ToSymbol $partition.ID}}Partition(),
+ {{ end }}
+ }
+ }
+{{ end }}
+
+{{ define "gocode Partition" -}}
+partition{
+ {{ StringIfSet "ID: %q,\n" .ID -}}
+ {{ StringIfSet "Name: %q,\n" .Name -}}
+ {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+ RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults }},
+ {{- end }}
+ Regions: {{ template "gocode Regions" .Regions }},
+ Services: {{ template "gocode Services" .Services }},
+}
+{{- end }}
+
+{{ define "gocode RegionRegex" -}}
+regionRegex{
+ Regexp: func() *regexp.Regexp{
+ reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
+ return reg
+ }(),
+}
+{{- end }}
+
+{{ define "gocode Regions" -}}
+regions{
+ {{ range $id, $region := . -}}
+ "{{ $id }}": {{ template "gocode Region" $region }},
+ {{ end -}}
+}
+{{- end }}
+
+{{ define "gocode Region" -}}
+region{
+ {{ StringIfSet "Description: %q,\n" .Description -}}
+}
+{{- end }}
+
+{{ define "gocode Services" -}}
+services{
+ {{ range $id, $service := . -}}
+ "{{ $id }}": {{ template "gocode Service" $service }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Service" -}}
+service{
+ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
+ {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults -}},
+ {{- end }}
+ {{ if .Endpoints -}}
+ Endpoints: {{ template "gocode Endpoints" .Endpoints }},
+ {{- end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoints" -}}
+endpoints{
+ {{ range $id, $endpoint := . -}}
+ "{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoint" -}}
+endpoint{
+ {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
+ {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
+ {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
+ {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
+ {{ if or .CredentialScope.Region .CredentialScope.Service -}}
+ CredentialScope: credentialScope{
+ {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
+ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
+ },
+ {{- end }}
+ {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
+ {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
+
+}
+{{- end }}
+`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
index 8ef9715..77312bb 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"io"
+ "net"
"net/http"
"net/url"
"reflect"
@@ -55,6 +56,8 @@ type Operation struct {
HTTPMethod string
HTTPPath string
*Paginator
+
+ BeforePresignFn func(r *Request) error
}
// Paginator keeps track of pagination configuration for an API operation.
@@ -149,6 +152,15 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) {
func (r *Request) Presign(expireTime time.Duration) (string, error) {
r.ExpireTime = expireTime
r.NotHoist = false
+
+ if r.Operation.BeforePresignFn != nil {
+ r = r.copy()
+ err := r.Operation.BeforePresignFn(r)
+ if err != nil {
+ return "", err
+ }
+ }
+
r.Sign()
if r.Error != nil {
return "", r.Error
@@ -234,7 +246,82 @@ func (r *Request) ResetBody() {
}
r.safeBody = newOffsetReader(r.Body, r.BodyStart)
- r.HTTPRequest.Body = r.safeBody
+
+ // Go 1.8 tightened and clarified the rules code needs to use when building
+ // requests with the http package. Go 1.8 removed the automatic detection
+ // of if the Request.Body was empty, or actually had bytes in it. The SDK
+ // always sets the Request.Body even if it is empty and should not actually
+ // be sent. This is incorrect.
+ //
+ // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
+ // client that the request really should be sent without a body. The
+ // Request.Body cannot be set to nil, which is preferable, because the
+ // field is exported and could introduce nil pointer dereferences for users
+ // of the SDK if they used that field.
+ //
+ // Related golang/go#18257
+ l, err := computeBodyLength(r.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to compute request body size", err)
+ return
+ }
+
+ if l == 0 {
+ r.HTTPRequest.Body = noBodyReader
+ } else if l > 0 {
+ r.HTTPRequest.Body = r.safeBody
+ } else {
+ // Hack to prevent sending bodies for methods where the body
+ // should be ignored by the server. Sending bodies on these
+ // methods without an associated ContentLength will cause the
+ // request to socket timeout because the server does not handle
+ // Transfer-Encoding: chunked bodies for these methods.
+ //
+ // This would only happen if a aws.ReaderSeekerCloser was used with
+ // a io.Reader that was not also an io.Seeker.
+ switch r.Operation.HTTPMethod {
+ case "GET", "HEAD", "DELETE":
+ r.HTTPRequest.Body = noBodyReader
+ default:
+ r.HTTPRequest.Body = r.safeBody
+ }
+ }
+}
+
+// Attempts to compute the length of the body of the reader using the
+// io.Seeker interface. If the value is not seekable because of being
+// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
+// If no error occurs the length of the body will be returned.
+func computeBodyLength(r io.ReadSeeker) (int64, error) {
+ seekable := true
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := r.(type) {
+ case aws.ReaderSeekerCloser:
+ seekable = v.IsSeeker()
+ case *aws.ReaderSeekerCloser:
+ seekable = v.IsSeeker()
+ }
+ if !seekable {
+ return -1, nil
+ }
+
+ curOffset, err := r.Seek(0, 1)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := r.Seek(0, 2)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = r.Seek(curOffset, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
}
// GetBody will return an io.ReadSeeker of the Request's underlying
@@ -286,7 +373,7 @@ func (r *Request) Send() error {
r.Handlers.Send.Run(r)
if r.Error != nil {
- if strings.Contains(r.Error.Error(), "net/http: request canceled") {
+ if !shouldRetryCancel(r) {
return r.Error
}
@@ -334,6 +421,17 @@ func (r *Request) Send() error {
return nil
}
+// copy will copy a request which will allow for local manipulation of the
+// request.
+func (r *Request) copy() *Request {
+ req := &Request{}
+ *req = *r
+ req.Handlers = r.Handlers.Copy()
+ op := *r.Operation
+ req.Operation = &op
+ return req
+}
+
// AddToUserAgent adds the string to the end of the request's current user agent.
func AddToUserAgent(r *Request, s string) {
curUA := r.HTTPRequest.Header.Get("User-Agent")
@@ -342,3 +440,26 @@ func AddToUserAgent(r *Request, s string) {
}
r.HTTPRequest.Header.Set("User-Agent", s)
}
+
+func shouldRetryCancel(r *Request) bool {
+ awsErr, ok := r.Error.(awserr.Error)
+ timeoutErr := false
+ errStr := r.Error.Error()
+ if ok {
+ err := awsErr.OrigErr()
+ netErr, netOK := err.(net.Error)
+ timeoutErr = netOK && netErr.Temporary()
+ if urlErr, ok := err.(*url.Error); !timeoutErr && ok {
+ errStr = urlErr.Err.Error()
+ }
+ }
+
+ // There can be two types of canceled errors here.
+ // The first being a net.Error and the other being an error.
+ // If the request was timed out, we want to continue the retry
+ // process. Otherwise, return the canceled error.
+ return timeoutErr ||
+ (errStr != "net/http: request canceled" &&
+ errStr != "net/http: request canceled while waiting for connection")
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
new file mode 100644
index 0000000..1323af9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -0,0 +1,21 @@
+// +build !go1.8
+
+package request
+
+import "io"
+
+// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
+// and Close always returns nil. It can be used in an outgoing client
+// request to explicitly signal that a request has zero bytes.
+// An alternative, however, is to simply set Request.Body to nil.
+//
+// Copy of Go 1.8 NoBody type from net/http/http.go
+type noBody struct{}
+
+func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
+func (noBody) Close() error { return nil }
+func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
+
+// Is an empty reader that will trigger the Go HTTP client to not include
+// and body in the HTTP request.
+var noBodyReader = noBody{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
new file mode 100644
index 0000000..8b963f4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -0,0 +1,9 @@
+// +build go1.8
+
+package request
+
+import "net/http"
+
+// Is a http.NoBody reader instructing Go HTTP client to not include
+// and body in the HTTP request.
+var noBodyReader = http.NoBody
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
index 8cc8b01..ebd60cc 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -38,6 +38,7 @@ var throttleCodes = map[string]struct{}{
"RequestThrottled": {},
"LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
"TooManyRequestsException": {}, // Lambda functions
+ "PriorRequestNotComplete": {}, // Route53
}
// credsExpiredCodes is a collection of error codes which signify the credentials
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 602f4e1..c9427e9 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -10,8 +10,8 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/endpoints"
)
// A Session provides a central location to create service clients from and
@@ -34,7 +34,7 @@ type Session struct {
// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
// method could now encounter an error when loading the configuration. When
// The environment variable is set, and an error occurs, New will return a
-// session that will fail all requests reporting the error that occured while
+// session that will fail all requests reporting the error that occurred while
// loading the session. Use NewSession to get the error when creating the
// session.
//
@@ -44,7 +44,7 @@ type Session struct {
// shared config, and shared credentials will be taken from the shared
// credentials file.
//
-// Deprecated: Use NewSession functiions to create sessions instead. NewSession
+// Deprecated: Use NewSession functions to create sessions instead. NewSession
// has the same functionality as New except an error can be returned when the
// func is called instead of waiting to receive an error until a request is made.
func New(cfgs ...*aws.Config) *Session {
@@ -59,7 +59,7 @@ func New(cfgs ...*aws.Config) *Session {
// needs to be replicated if an error occurs while creating
// the session.
msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
- "Use session.NewSession to handle errors occuring during session creation."
+ "Use session.NewSession to handle errors occurring during session creation."
// Session creation failed, need to report the error and prevent
// any requests from succeeding.
@@ -89,7 +89,7 @@ func New(cfgs ...*aws.Config) *Session {
// to be built with retrieving credentials with AssumeRole set in the config.
//
// See the NewSessionWithOptions func for information on how to override or
-// control through code how the Session will be created. Such as specifing the
+// control through code how the Session will be created. Such as specifying the
// config profile, and controlling if shared config is enabled or not.
func NewSession(cfgs ...*aws.Config) (*Session, error) {
envCfg := loadEnvConfig()
@@ -124,7 +124,7 @@ type Options struct {
// Provides config values for the SDK to use when creating service clients
// and making API requests to services. Any value set in with this field
// will override the associated value provided by the SDK defaults,
- // environment or config files where relevent.
+ // environment or config files where relevant.
//
// If not set, configuration values from from SDK defaults, environment,
// config will be used.
@@ -222,6 +222,11 @@ func oldNewSession(cfgs ...*aws.Config) *Session {
// Apply the passed in configs so the configuration can be applied to the
// default credential chain
cfg.MergeIn(cfgs...)
+ if cfg.EndpointResolver == nil {
+ // An endpoint resolver is required for a session to be able to provide
+ // endpoints for service client configurations.
+ cfg.EndpointResolver = endpoints.DefaultResolver()
+ }
cfg.Credentials = defaults.CredChain(cfg, handlers)
// Reapply any passed in configs to override credentials if set
@@ -375,19 +380,39 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session {
// configure the service client instances. Passing the Session to the service
// client's constructor (New) will use this method to configure the client.
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+ // Backwards compatibility, the error will be eaten if user calls ClientConfig
+ // directly. All SDK services will use ClientconfigWithError.
+ cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
+
+ return cfg
+}
+
+func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
s = s.Copy(cfgs...)
- endpoint, signingRegion := endpoints.NormalizeEndpoint(
- aws.StringValue(s.Config.Endpoint),
- serviceName,
- aws.StringValue(s.Config.Region),
- aws.BoolValue(s.Config.DisableSSL),
- aws.BoolValue(s.Config.UseDualStack),
- )
+
+ var resolved endpoints.ResolvedEndpoint
+ var err error
+
+ region := aws.StringValue(s.Config.Region)
+
+ if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
+ resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = region
+ } else {
+ resolved, err = s.Config.EndpointResolver.EndpointFor(
+ serviceName, region,
+ func(opt *endpoints.Options) {
+ opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
+ opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
+ },
+ )
+ }
return client.Config{
Config: s.Config,
Handlers: s.Handlers,
- Endpoint: endpoint,
- SigningRegion: signingRegion,
- }
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningName: resolved.SigningName,
+ }, err
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go
deleted file mode 100644
index 7966041..0000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path_1_4.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build !go1.5
-
-package v4
-
-import (
- "net/url"
- "strings"
-)
-
-func getURIPath(u *url.URL) string {
- var uri string
-
- if len(u.Opaque) > 0 {
- uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
- } else {
- uri = u.Path
- }
-
- if len(uri) == 0 {
- uri = "/"
- }
-
- return uri
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index 986530b..98bfe74 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -42,6 +42,14 @@
// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
// call URL.EscapedPath() if Opaque is not set.
//
+// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
+// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
+// request URL. https://github.com/golang/go/issues/16847 points to a bug in
+// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP
+// message. URL.Opaque generally will force Go to make requests with absolute URL.
+// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
+// or url.EscapedPath will ignore the RawPath escaping.
+//
// Test `TestStandaloneSign` provides a complete example of using the signer
// outside of the SDK and pre-escaping the URI path.
package v4
@@ -79,8 +87,9 @@ const (
var ignoredHeaders = rules{
blacklist{
mapRule{
- "Authorization": struct{}{},
- "User-Agent": struct{}{},
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
},
},
}
@@ -171,6 +180,16 @@ type Signer struct {
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
DisableURIPathEscaping bool
+ // Disales the automatical setting of the HTTP request's Body field with the
+ // io.ReadSeeker passed in to the signer. This is useful if you're using a
+ // custom wrapper around the body for the io.ReadSeeker and want to preserve
+ // the Body value on the Request.Body.
+ //
+ // This does run the risk of signing a request with a body that will not be
+ // sent in the request. Need to ensure that the underlying data of the Body
+ // values are the same.
+ DisableRequestBodyOverwrite bool
+
// currentTimeFn returns the time value which represents the current time.
// This value should only be used for testing. If it is nil the default
// time.Now will be used.
@@ -300,6 +319,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
DisableURIPathEscaping: v4.DisableURIPathEscaping,
}
+ for key := range ctx.Query {
+ sort.Strings(ctx.Query[key])
+ }
+
if ctx.isRequestSigned() {
ctx.Time = currentTimeFn()
ctx.handlePresignRemoval()
@@ -317,7 +340,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
// If the request is not presigned the body should be attached to it. This
// prevents the confusion of wanting to send a signed request without
// the body the request was signed for attached.
- if !ctx.isPresign {
+ if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
var reader io.ReadCloser
if body != nil {
var ok bool
@@ -412,6 +435,10 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
// S3 service should not have any escaping applied
v4.DisableURIPathEscaping = true
}
+ // Prevents setting the HTTPRequest's Body. Since the Body could be
+ // wrapped in a custom io.Closer that we do not want to be stompped
+ // on top of by the signer.
+ v4.DisableRequestBodyOverwrite = true
})
signingTime := req.Time
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
index fa014b4..9ca685e 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/types.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -5,7 +5,13 @@ import (
"sync"
)
-// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
+// only be used with an io.Reader that is also an io.Seeker. Doing so may
+// cause request signature errors, or request body's not sent for GET, HEAD
+// and DELETE HTTP methods.
+//
+// Deprecated: Should only be used with io.ReadSeeker. If using for
+// S3 PutObject to stream content use s3manager.Uploader instead.
func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
return ReaderSeekerCloser{r}
}
@@ -44,6 +50,12 @@ func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
return int64(0), nil
}
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
// Close closes the ReaderSeekerCloser.
//
// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index b01cd70..33622b3 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.4.22"
+const SDKVersion = "1.6.8"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
deleted file mode 100644
index 19d9756..0000000
--- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Package endpoints validates regional endpoints for services.
-package endpoints
-
-//go:generate go run -tags codegen ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
-//go:generate gofmt -s -w endpoints_map.go
-
-import (
- "fmt"
- "regexp"
- "strings"
-)
-
-// NormalizeEndpoint takes and endpoint and service API information to return a
-// normalized endpoint and signing region. If the endpoint is not an empty string
-// the service name and region will be used to look up the service's API endpoint.
-// If the endpoint is provided the scheme will be added if it is not present.
-func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) {
- if endpoint == "" {
- return EndpointForRegion(serviceName, region, disableSSL, useDualStack)
- }
-
- return AddScheme(endpoint, disableSSL), ""
-}
-
-// EndpointForRegion returns an endpoint and its signing region for a service and region.
-// if the service and region pair are not found endpoint and signingRegion will be empty.
-func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) {
- dualStackField := ""
- if useDualStack {
- dualStackField = "/dualstack"
- }
-
- derivedKeys := []string{
- region + "/" + svcName + dualStackField,
- region + "/*" + dualStackField,
- "*/" + svcName + dualStackField,
- "*/*" + dualStackField,
- }
-
- for _, key := range derivedKeys {
- if val, ok := endpointsMap.Endpoints[key]; ok {
- ep := val.Endpoint
- ep = strings.Replace(ep, "{region}", region, -1)
- ep = strings.Replace(ep, "{service}", svcName, -1)
-
- endpoint = ep
- signingRegion = val.SigningRegion
- break
- }
- }
-
- return AddScheme(endpoint, disableSSL), signingRegion
-}
-
-// Regular expression to determine if the endpoint string is prefixed with a scheme.
-var schemeRE = regexp.MustCompile("^([^:]+)://")
-
-// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
-// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
-func AddScheme(endpoint string, disableSSL bool) string {
- if endpoint != "" && !schemeRE.MatchString(endpoint) {
- scheme := "https"
- if disableSSL {
- scheme = "http"
- }
- endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
- }
-
- return endpoint
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
deleted file mode 100644
index 5594f2e..0000000
--- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
+++ /dev/null
@@ -1,82 +0,0 @@
-{
- "version": 2,
- "endpoints": {
- "*/*": {
- "endpoint": "{service}.{region}.amazonaws.com"
- },
- "cn-north-1/*": {
- "endpoint": "{service}.{region}.amazonaws.com.cn",
- "signatureVersion": "v4"
- },
- "cn-north-1/ec2metadata": {
- "endpoint": "http://169.254.169.254/latest"
- },
- "us-gov-west-1/iam": {
- "endpoint": "iam.us-gov.amazonaws.com"
- },
- "us-gov-west-1/sts": {
- "endpoint": "sts.us-gov-west-1.amazonaws.com"
- },
- "us-gov-west-1/s3": {
- "endpoint": "s3-{region}.amazonaws.com"
- },
- "us-gov-west-1/ec2metadata": {
- "endpoint": "http://169.254.169.254/latest"
- },
- "*/budgets": {
- "endpoint": "budgets.amazonaws.com",
- "signingRegion": "us-east-1"
- },
- "*/cloudfront": {
- "endpoint": "cloudfront.amazonaws.com",
- "signingRegion": "us-east-1"
- },
- "*/cloudsearchdomain": {
- "endpoint": "",
- "signingRegion": "us-east-1"
- },
- "*/data.iot": {
- "endpoint": "",
- "signingRegion": "us-east-1"
- },
- "*/ec2metadata": {
- "endpoint": "http://169.254.169.254/latest"
- },
- "*/iam": {
- "endpoint": "iam.amazonaws.com",
- "signingRegion": "us-east-1"
- },
- "*/importexport": {
- "endpoint": "importexport.amazonaws.com",
- "signingRegion": "us-east-1"
- },
- "*/route53": {
- "endpoint": "route53.amazonaws.com",
- "signingRegion": "us-east-1"
- },
- "*/sts": {
- "endpoint": "sts.amazonaws.com",
- "signingRegion": "us-east-1"
- },
- "*/waf": {
- "endpoint": "waf.amazonaws.com",
- "signingRegion": "us-east-1"
- },
- "us-east-1/sdb": {
- "endpoint": "sdb.amazonaws.com",
- "signingRegion": "us-east-1"
- },
- "*/s3": {
- "endpoint": "s3-{region}.amazonaws.com"
- },
- "*/s3/dualstack": {
- "endpoint": "s3.dualstack.{region}.amazonaws.com"
- },
- "us-east-1/s3": {
- "endpoint": "s3.amazonaws.com"
- },
- "eu-central-1/s3": {
- "endpoint": "{service}.{region}.amazonaws.com"
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
deleted file mode 100644
index e79e678..0000000
--- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package endpoints
-
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
-
-type endpointStruct struct {
- Version int
- Endpoints map[string]endpointEntry
-}
-
-type endpointEntry struct {
- Endpoint string
- SigningRegion string
-}
-
-var endpointsMap = endpointStruct{
- Version: 2,
- Endpoints: map[string]endpointEntry{
- "*/*": {
- Endpoint: "{service}.{region}.amazonaws.com",
- },
- "*/budgets": {
- Endpoint: "budgets.amazonaws.com",
- SigningRegion: "us-east-1",
- },
- "*/cloudfront": {
- Endpoint: "cloudfront.amazonaws.com",
- SigningRegion: "us-east-1",
- },
- "*/cloudsearchdomain": {
- Endpoint: "",
- SigningRegion: "us-east-1",
- },
- "*/data.iot": {
- Endpoint: "",
- SigningRegion: "us-east-1",
- },
- "*/ec2metadata": {
- Endpoint: "http://169.254.169.254/latest",
- },
- "*/iam": {
- Endpoint: "iam.amazonaws.com",
- SigningRegion: "us-east-1",
- },
- "*/importexport": {
- Endpoint: "importexport.amazonaws.com",
- SigningRegion: "us-east-1",
- },
- "*/route53": {
- Endpoint: "route53.amazonaws.com",
- SigningRegion: "us-east-1",
- },
- "*/s3": {
- Endpoint: "s3-{region}.amazonaws.com",
- },
- "*/s3/dualstack": {
- Endpoint: "s3.dualstack.{region}.amazonaws.com",
- },
- "*/sts": {
- Endpoint: "sts.amazonaws.com",
- SigningRegion: "us-east-1",
- },
- "*/waf": {
- Endpoint: "waf.amazonaws.com",
- SigningRegion: "us-east-1",
- },
- "cn-north-1/*": {
- Endpoint: "{service}.{region}.amazonaws.com.cn",
- },
- "cn-north-1/ec2metadata": {
- Endpoint: "http://169.254.169.254/latest",
- },
- "eu-central-1/s3": {
- Endpoint: "{service}.{region}.amazonaws.com",
- },
- "us-east-1/s3": {
- Endpoint: "s3.amazonaws.com",
- },
- "us-east-1/sdb": {
- Endpoint: "sdb.amazonaws.com",
- SigningRegion: "us-east-1",
- },
- "us-gov-west-1/ec2metadata": {
- Endpoint: "http://169.254.169.254/latest",
- },
- "us-gov-west-1/iam": {
- Endpoint: "iam.us-gov.amazonaws.com",
- },
- "us-gov-west-1/s3": {
- Endpoint: "s3-{region}.amazonaws.com",
- },
- "us-gov-west-1/sts": {
- Endpoint: "sts.us-gov-west-1.amazonaws.com",
- },
- },
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
index 60ea0bd..f434ab7 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -76,6 +76,10 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri
if field.PkgPath != "" {
continue // ignore unexported fields
}
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
if protocol.CanSetIdempotencyToken(value.Field(i), field) {
token := protocol.GetIdempotencyToken()
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
index 5f41251..20a41d4 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -14,6 +14,7 @@ import (
"strings"
"time"
+ "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
@@ -46,14 +47,29 @@ var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
func Build(r *request.Request) {
if r.ParamsFilled() {
v := reflect.ValueOf(r.Params).Elem()
- buildLocationElements(r, v)
+ buildLocationElements(r, v, false)
buildBody(r, v)
}
}
-func buildLocationElements(r *request.Request, v reflect.Value) {
+// BuildAsGET builds the REST component of a service request with the ability to hoist
+// data from the body.
+func BuildAsGET(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, true)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
query := r.HTTPRequest.URL.Query()
+ // Setup the raw path to match the base path pattern. This is needed
+ // so that when the path is mutated a custom escaped version can be
+ // stored in RawPath that will be used by the Go client.
+ r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
+
for i := 0; i < v.NumField(); i++ {
m := v.Field(i)
if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
@@ -72,6 +88,9 @@ func buildLocationElements(r *request.Request, v reflect.Value) {
if !m.IsValid() {
continue
}
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
var err error
switch field.Tag.Get("location") {
@@ -83,6 +102,10 @@ func buildLocationElements(r *request.Request, v reflect.Value) {
err = buildURI(r.HTTPRequest.URL, m, name)
case "querystring":
err = buildQueryString(query, m, name)
+ default:
+ if buildGETQuery {
+ err = buildQueryString(query, m, name)
+ }
}
r.Error = err
}
@@ -92,7 +115,9 @@ func buildLocationElements(r *request.Request, v reflect.Value) {
}
r.HTTPRequest.URL.RawQuery = query.Encode()
- updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path)
+ if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
+ cleanPath(r.HTTPRequest.URL)
+ }
}
func buildBody(r *request.Request, v reflect.Value) {
@@ -156,10 +181,11 @@ func buildURI(u *url.URL, v reflect.Value, name string) error {
return awserr.New("SerializationError", "failed to encode REST request", err)
}
- uri := u.Path
- uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1)
- uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1)
- u.Path = uri
+ u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
+ u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
+
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
return nil
}
@@ -193,25 +219,17 @@ func buildQueryString(query url.Values, v reflect.Value, name string) error {
return nil
}
-func updatePath(url *url.URL, urlPath string) {
- scheme, query := url.Scheme, url.RawQuery
+func cleanPath(u *url.URL) {
+ hasSlash := strings.HasSuffix(u.Path, "/")
- hasSlash := strings.HasSuffix(urlPath, "/")
+ // clean up path, removing duplicate `/`
+ u.Path = path.Clean(u.Path)
+ u.RawPath = path.Clean(u.RawPath)
- // clean up path
- urlPath = path.Clean(urlPath)
- if hasSlash && !strings.HasSuffix(urlPath, "/") {
- urlPath += "/"
+ if hasSlash && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ u.RawPath += "/"
}
-
- // get formatted URL minus scheme so we can build this into Opaque
- url.Scheme, url.Path, url.RawQuery = "", "", ""
- s := url.String()
- url.Scheme = scheme
- url.RawQuery = query
-
- // build opaque URI
- url.Opaque = s + urlPath
}
// EscapePath escapes part of a URL path in Amazon style
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
index 2cba1d9..9c00921 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -1,6 +1,7 @@
package rest
import (
+ "bytes"
"encoding/base64"
"fmt"
"io"
@@ -11,7 +12,6 @@ import (
"strings"
"time"
- "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
@@ -70,10 +70,16 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
}
default:
switch payload.Type().String() {
- case "io.ReadSeeker":
- payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body)))
- case "aws.ReadSeekCloser", "io.ReadCloser":
+ case "io.ReadCloser":
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+ case "io.ReadSeeker":
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError",
+ "failed to read response body", err)
+ return
+ }
+ payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
default:
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
defer r.HTTPResponse.Body.Close()
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
index 221029b..c74c191 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -127,6 +127,10 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
if field.PkgPath != "" {
continue // ignore unexported fields
}
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
mTag := field.Tag
if mTag.Get("location") != "" { // skip non-body members
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
index 49f291a..64b6ddd 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -111,11 +111,8 @@ func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
elems := node.Children[name]
if elems == nil { // try to find the field in attributes
- for _, a := range node.Attr {
- if name == a.Name.Local {
- // turn this into a text node for de-serializing
- elems = []*XMLNode{{Text: a.Value}}
- }
+ if val, ok := node.findElem(name); ok {
+ elems = []*XMLNode{{Text: val}}
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
index 72c198a..3112512 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -2,6 +2,7 @@ package xmlutil
import (
"encoding/xml"
+ "fmt"
"io"
"sort"
)
@@ -12,6 +13,9 @@ type XMLNode struct {
Children map[string][]*XMLNode `json:",omitempty"`
Text string `json:",omitempty"`
Attr []xml.Attr `json:",omitempty"`
+
+ namespaces map[string]string
+ parent *XMLNode
}
// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
@@ -59,21 +63,54 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
slice = []*XMLNode{}
}
node, e := XMLToStruct(d, &el)
+ out.findNamespaces()
if e != nil {
return out, e
}
node.Name = typed.Name
+ node.findNamespaces()
+ tempOut := *out
+ // Save into a temp variable, simply because out gets squashed during
+ // loop iterations
+ node.parent = &tempOut
slice = append(slice, node)
out.Children[name] = slice
case xml.EndElement:
if s != nil && s.Name.Local == typed.Name.Local { // matching end token
return out, nil
}
+ out = &XMLNode{}
}
}
return out, nil
}
+func (n *XMLNode) findNamespaces() {
+ ns := map[string]string{}
+ for _, a := range n.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ }
+ }
+
+ n.namespaces = ns
+}
+
+func (n *XMLNode) findElem(name string) (string, bool) {
+ for node := n; node != nil; node = node.parent {
+ for _, a := range node.Attr {
+ namespace := a.Name.Space
+ if v, ok := node.namespaces[namespace]; ok {
+ namespace = v
+ }
+ if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
+ return a.Value, true
+ }
+ }
+ }
+ return "", false
+}
+
// StructToXML writes an XMLNode to a xml.Encoder as tokens.
func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
index 9eec073..8ce3de0 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -40,6 +40,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) {
op := &request.Operation{
Name: opAbortMultipartUpload,
@@ -76,6 +77,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req
// * NoSuchUpload
// The specified multipart upload does not exist.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
req, out := c.AbortMultipartUploadRequest(input)
err := req.Send()
@@ -108,6 +110,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) {
op := &request.Operation{
Name: opCompleteMultipartUpload,
@@ -135,6 +138,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput)
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation CompleteMultipartUpload for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
req, out := c.CompleteMultipartUploadRequest(input)
err := req.Send()
@@ -167,6 +171,7 @@ const opCopyObject = "CopyObject"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) {
op := &request.Operation{
Name: opCopyObject,
@@ -200,6 +205,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou
// The source object of the COPY operation is not in the active tier and is
// only stored in Amazon Glacier.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
req, out := c.CopyObjectRequest(input)
err := req.Send()
@@ -232,6 +238,7 @@ const opCreateBucket = "CreateBucket"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) {
op := &request.Operation{
Name: opCreateBucket,
@@ -268,6 +275,7 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request
// * BucketAlreadyOwnedByYou
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
req, out := c.CreateBucketRequest(input)
err := req.Send()
@@ -300,6 +308,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) {
op := &request.Operation{
Name: opCreateMultipartUpload,
@@ -333,6 +342,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation CreateMultipartUpload for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
req, out := c.CreateMultipartUploadRequest(input)
err := req.Send()
@@ -365,6 +375,7 @@ const opDeleteBucket = "DeleteBucket"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) {
op := &request.Operation{
Name: opDeleteBucket,
@@ -395,12 +406,77 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation DeleteBucket for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
req, out := c.DeleteBucketRequest(input)
err := req.Send()
return out, err
}
+const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration"
+
+// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketAnalyticsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method.
+// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
+func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketAnalyticsConfiguration,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &DeleteBucketAnalyticsConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketAnalyticsConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketAnalyticsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
+func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opDeleteBucketCors = "DeleteBucketCors"
// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
@@ -427,6 +503,7 @@ const opDeleteBucketCors = "DeleteBucketCors"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) {
op := &request.Operation{
Name: opDeleteBucketCors,
@@ -456,12 +533,77 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation DeleteBucketCors for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
req, out := c.DeleteBucketCorsRequest(input)
err := req.Send()
return out, err
}
+const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration"
+
+// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketInventoryConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketInventoryConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method.
+// req, resp := client.DeleteBucketInventoryConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
+func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketInventoryConfiguration,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &DeleteBucketInventoryConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketInventoryConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes an inventory configuration (identified by the inventory ID) from
+// the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketInventoryConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
+func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) {
+ req, out := c.DeleteBucketInventoryConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
@@ -488,6 +630,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) {
op := &request.Operation{
Name: opDeleteBucketLifecycle,
@@ -517,12 +660,77 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation DeleteBucketLifecycle for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
req, out := c.DeleteBucketLifecycleRequest(input)
err := req.Send()
return out, err
}
+const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
+
+// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketMetricsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketMetricsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method.
+// req, resp := client.DeleteBucketMetricsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
+func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketMetricsConfiguration,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &DeleteBucketMetricsConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketMetricsConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes a metrics configuration (specified by the metrics configuration ID)
+// from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketMetricsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
+func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) {
+ req, out := c.DeleteBucketMetricsConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opDeleteBucketPolicy = "DeleteBucketPolicy"
// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the
@@ -549,6 +757,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) {
op := &request.Operation{
Name: opDeleteBucketPolicy,
@@ -578,6 +787,7 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation DeleteBucketPolicy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
req, out := c.DeleteBucketPolicyRequest(input)
err := req.Send()
@@ -610,6 +820,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) {
op := &request.Operation{
Name: opDeleteBucketReplication,
@@ -639,6 +850,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation DeleteBucketReplication for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
req, out := c.DeleteBucketReplicationRequest(input)
err := req.Send()
@@ -671,6 +883,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) {
op := &request.Operation{
Name: opDeleteBucketTagging,
@@ -700,6 +913,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation DeleteBucketTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
req, out := c.DeleteBucketTaggingRequest(input)
err := req.Send()
@@ -732,6 +946,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) {
op := &request.Operation{
Name: opDeleteBucketWebsite,
@@ -761,6 +976,7 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation DeleteBucketWebsite for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
req, out := c.DeleteBucketWebsiteRequest(input)
err := req.Send()
@@ -793,6 +1009,7 @@ const opDeleteObject = "DeleteObject"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) {
op := &request.Operation{
Name: opDeleteObject,
@@ -822,12 +1039,74 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation DeleteObject for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
req, out := c.DeleteObjectRequest(input)
err := req.Send()
return out, err
}
+const opDeleteObjectTagging = "DeleteObjectTagging"
+
+// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObjectTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteObjectTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteObjectTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the DeleteObjectTaggingRequest method.
+// req, resp := client.DeleteObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opDeleteObjectTagging,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &DeleteObjectTaggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteObjectTaggingOutput{}
+ req.Data = output
+ return
+}
+
+// DeleteObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Removes the tag-set from an existing object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjectTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) {
+ req, out := c.DeleteObjectTaggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opDeleteObjects = "DeleteObjects"
// DeleteObjectsRequest generates a "aws/request.Request" representing the
@@ -854,6 +1133,7 @@ const opDeleteObjects = "DeleteObjects"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) {
op := &request.Operation{
Name: opDeleteObjects,
@@ -882,6 +1162,7 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation DeleteObjects for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
req, out := c.DeleteObjectsRequest(input)
err := req.Send()
@@ -914,6 +1195,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) {
op := &request.Operation{
Name: opGetBucketAccelerateConfiguration,
@@ -941,6 +1223,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketAccelerateConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) {
req, out := c.GetBucketAccelerateConfigurationRequest(input)
err := req.Send()
@@ -973,6 +1256,7 @@ const opGetBucketAcl = "GetBucketAcl"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) {
op := &request.Operation{
Name: opGetBucketAcl,
@@ -1000,12 +1284,75 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketAcl for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
req, out := c.GetBucketAclRequest(input)
err := req.Send()
return out, err
}
+const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
+
+// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketAnalyticsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketAnalyticsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method.
+// req, resp := client.GetBucketAnalyticsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
+func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAnalyticsConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &GetBucketAnalyticsConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketAnalyticsConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Gets an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAnalyticsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
+func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.GetBucketAnalyticsConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opGetBucketCors = "GetBucketCors"
// GetBucketCorsRequest generates a "aws/request.Request" representing the
@@ -1032,6 +1379,7 @@ const opGetBucketCors = "GetBucketCors"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) {
op := &request.Operation{
Name: opGetBucketCors,
@@ -1059,12 +1407,75 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketCors for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
req, out := c.GetBucketCorsRequest(input)
err := req.Send()
return out, err
}
+const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
+
+// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketInventoryConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketInventoryConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketInventoryConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketInventoryConfigurationRequest method.
+// req, resp := client.GetBucketInventoryConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
+func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketInventoryConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &GetBucketInventoryConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketInventoryConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns an inventory configuration (identified by the inventory ID) from
+// the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketInventoryConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
+func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) {
+ req, out := c.GetBucketInventoryConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opGetBucketLifecycle = "GetBucketLifecycle"
// GetBucketLifecycleRequest generates a "aws/request.Request" representing the
@@ -1091,6 +1502,7 @@ const opGetBucketLifecycle = "GetBucketLifecycle"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) {
if c.Client.Config.Logger != nil {
c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated")
@@ -1121,6 +1533,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketLifecycle for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
req, out := c.GetBucketLifecycleRequest(input)
err := req.Send()
@@ -1153,6 +1566,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) {
op := &request.Operation{
Name: opGetBucketLifecycleConfiguration,
@@ -1180,6 +1594,7 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketLifecycleConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
req, out := c.GetBucketLifecycleConfigurationRequest(input)
err := req.Send()
@@ -1212,6 +1627,7 @@ const opGetBucketLocation = "GetBucketLocation"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) {
op := &request.Operation{
Name: opGetBucketLocation,
@@ -1239,6 +1655,7 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketLocation for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
req, out := c.GetBucketLocationRequest(input)
err := req.Send()
@@ -1271,6 +1688,7 @@ const opGetBucketLogging = "GetBucketLogging"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) {
op := &request.Operation{
Name: opGetBucketLogging,
@@ -1299,12 +1717,75 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketLogging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
req, out := c.GetBucketLoggingRequest(input)
err := req.Send()
return out, err
}
+const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
+
+// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketMetricsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketMetricsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketMetricsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetBucketMetricsConfigurationRequest method.
+// req, resp := client.GetBucketMetricsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
+func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketMetricsConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &GetBucketMetricsConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketMetricsConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Gets a metrics configuration (specified by the metrics configuration ID)
+// from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketMetricsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
+func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) {
+ req, out := c.GetBucketMetricsConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opGetBucketNotification = "GetBucketNotification"
// GetBucketNotificationRequest generates a "aws/request.Request" representing the
@@ -1331,6 +1812,7 @@ const opGetBucketNotification = "GetBucketNotification"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) {
if c.Client.Config.Logger != nil {
c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated")
@@ -1361,6 +1843,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketNotification for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
req, out := c.GetBucketNotificationRequest(input)
err := req.Send()
@@ -1393,6 +1876,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) {
op := &request.Operation{
Name: opGetBucketNotificationConfiguration,
@@ -1420,6 +1904,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketNotificationConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
req, out := c.GetBucketNotificationConfigurationRequest(input)
err := req.Send()
@@ -1452,6 +1937,7 @@ const opGetBucketPolicy = "GetBucketPolicy"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) {
op := &request.Operation{
Name: opGetBucketPolicy,
@@ -1479,6 +1965,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketPolicy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
req, out := c.GetBucketPolicyRequest(input)
err := req.Send()
@@ -1511,6 +1998,7 @@ const opGetBucketReplication = "GetBucketReplication"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) {
op := &request.Operation{
Name: opGetBucketReplication,
@@ -1538,6 +2026,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketReplication for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
req, out := c.GetBucketReplicationRequest(input)
err := req.Send()
@@ -1570,6 +2059,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) {
op := &request.Operation{
Name: opGetBucketRequestPayment,
@@ -1597,6 +2087,7 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput)
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketRequestPayment for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
req, out := c.GetBucketRequestPaymentRequest(input)
err := req.Send()
@@ -1629,6 +2120,7 @@ const opGetBucketTagging = "GetBucketTagging"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) {
op := &request.Operation{
Name: opGetBucketTagging,
@@ -1656,6 +2148,7 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
req, out := c.GetBucketTaggingRequest(input)
err := req.Send()
@@ -1688,6 +2181,7 @@ const opGetBucketVersioning = "GetBucketVersioning"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) {
op := &request.Operation{
Name: opGetBucketVersioning,
@@ -1715,6 +2209,7 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketVersioning for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
req, out := c.GetBucketVersioningRequest(input)
err := req.Send()
@@ -1747,6 +2242,7 @@ const opGetBucketWebsite = "GetBucketWebsite"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) {
op := &request.Operation{
Name: opGetBucketWebsite,
@@ -1774,6 +2270,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetBucketWebsite for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
req, out := c.GetBucketWebsiteRequest(input)
err := req.Send()
@@ -1806,6 +2303,7 @@ const opGetObject = "GetObject"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) {
op := &request.Operation{
Name: opGetObject,
@@ -1838,6 +2336,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp
// * NoSuchKey
// The specified key does not exist.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
req, out := c.GetObjectRequest(input)
err := req.Send()
@@ -1870,6 +2369,7 @@ const opGetObjectAcl = "GetObjectAcl"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) {
op := &request.Operation{
Name: opGetObjectAcl,
@@ -1902,12 +2402,74 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request
// * NoSuchKey
// The specified key does not exist.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
req, out := c.GetObjectAclRequest(input)
err := req.Send()
return out, err
}
+const opGetObjectTagging = "GetObjectTagging"
+
+// GetObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetObjectTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetObjectTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the GetObjectTaggingRequest method.
+// req, resp := client.GetObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opGetObjectTagging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &GetObjectTaggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetObjectTaggingOutput{}
+ req.Data = output
+ return
+}
+
+// GetObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Returns the tag-set of an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) {
+ req, out := c.GetObjectTaggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opGetObjectTorrent = "GetObjectTorrent"
// GetObjectTorrentRequest generates a "aws/request.Request" representing the
@@ -1934,6 +2496,7 @@ const opGetObjectTorrent = "GetObjectTorrent"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) {
op := &request.Operation{
Name: opGetObjectTorrent,
@@ -1961,6 +2524,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation GetObjectTorrent for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
req, out := c.GetObjectTorrentRequest(input)
err := req.Send()
@@ -1993,6 +2557,7 @@ const opHeadBucket = "HeadBucket"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) {
op := &request.Operation{
Name: opHeadBucket,
@@ -2028,6 +2593,7 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou
// * NoSuchBucket
// The specified bucket does not exist.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
req, out := c.HeadBucketRequest(input)
err := req.Send()
@@ -2060,6 +2626,7 @@ const opHeadObject = "HeadObject"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) {
op := &request.Operation{
Name: opHeadObject,
@@ -2094,12 +2661,196 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou
// * NoSuchKey
// The specified key does not exist.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
req, out := c.HeadObjectRequest(input)
err := req.Send()
return out, err
}
+const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
+
+// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBucketAnalyticsConfigurations for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBucketAnalyticsConfigurations method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method.
+// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
+func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opListBucketAnalyticsConfigurations,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &ListBucketAnalyticsConfigurationsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListBucketAnalyticsConfigurationsOutput{}
+ req.Data = output
+ return
+}
+
+// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service.
+//
+// Lists the analytics configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketAnalyticsConfigurations for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
+func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) {
+ req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
+
+// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketInventoryConfigurations operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBucketInventoryConfigurations for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBucketInventoryConfigurations method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListBucketInventoryConfigurationsRequest method.
+// req, resp := client.ListBucketInventoryConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
+func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opListBucketInventoryConfigurations,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &ListBucketInventoryConfigurationsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListBucketInventoryConfigurationsOutput{}
+ req.Data = output
+ return
+}
+
+// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service.
+//
+// Returns a list of inventory configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketInventoryConfigurations for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
+func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) {
+ req, out := c.ListBucketInventoryConfigurationsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
+
+// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketMetricsConfigurations operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBucketMetricsConfigurations for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBucketMetricsConfigurations method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the ListBucketMetricsConfigurationsRequest method.
+// req, resp := client.ListBucketMetricsConfigurationsRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
+func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) {
+ op := &request.Operation{
+ Name: opListBucketMetricsConfigurations,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &ListBucketMetricsConfigurationsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListBucketMetricsConfigurationsOutput{}
+ req.Data = output
+ return
+}
+
+// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service.
+//
+// Lists the metrics configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketMetricsConfigurations for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
+func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) {
+ req, out := c.ListBucketMetricsConfigurationsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opListBuckets = "ListBuckets"
// ListBucketsRequest generates a "aws/request.Request" representing the
@@ -2126,6 +2877,7 @@ const opListBuckets = "ListBuckets"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) {
op := &request.Operation{
Name: opListBuckets,
@@ -2153,6 +2905,7 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request,
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation ListBuckets for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
req, out := c.ListBucketsRequest(input)
err := req.Send()
@@ -2185,6 +2938,7 @@ const opListMultipartUploads = "ListMultipartUploads"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) {
op := &request.Operation{
Name: opListMultipartUploads,
@@ -2218,6 +2972,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation ListMultipartUploads for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
req, out := c.ListMultipartUploadsRequest(input)
err := req.Send()
@@ -2275,6 +3030,7 @@ const opListObjectVersions = "ListObjectVersions"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) {
op := &request.Operation{
Name: opListObjectVersions,
@@ -2308,6 +3064,7 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation ListObjectVersions for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
req, out := c.ListObjectVersionsRequest(input)
err := req.Send()
@@ -2365,6 +3122,7 @@ const opListObjects = "ListObjects"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) {
op := &request.Operation{
Name: opListObjects,
@@ -2405,6 +3163,7 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request,
// * NoSuchBucket
// The specified bucket does not exist.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
req, out := c.ListObjectsRequest(input)
err := req.Send()
@@ -2462,6 +3221,7 @@ const opListObjectsV2 = "ListObjectsV2"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) {
op := &request.Operation{
Name: opListObjectsV2,
@@ -2503,6 +3263,7 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque
// * NoSuchBucket
// The specified bucket does not exist.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
req, out := c.ListObjectsV2Request(input)
err := req.Send()
@@ -2560,6 +3321,7 @@ const opListParts = "ListParts"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) {
op := &request.Operation{
Name: opListParts,
@@ -2593,6 +3355,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation ListParts for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
req, out := c.ListPartsRequest(input)
err := req.Send()
@@ -2650,6 +3413,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) {
op := &request.Operation{
Name: opPutBucketAccelerateConfiguration,
@@ -2679,6 +3443,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketAccelerateConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) {
req, out := c.PutBucketAccelerateConfigurationRequest(input)
err := req.Send()
@@ -2711,6 +3476,7 @@ const opPutBucketAcl = "PutBucketAcl"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) {
op := &request.Operation{
Name: opPutBucketAcl,
@@ -2740,12 +3506,77 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketAcl for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
req, out := c.PutBucketAclRequest(input)
err := req.Send()
return out, err
}
+const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
+
+// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketAnalyticsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketAnalyticsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method.
+// req, resp := client.PutBucketAnalyticsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
+func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAnalyticsConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?analytics",
+ }
+
+ if input == nil {
+ input = &PutBucketAnalyticsConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketAnalyticsConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAnalyticsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
+func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.PutBucketAnalyticsConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opPutBucketCors = "PutBucketCors"
// PutBucketCorsRequest generates a "aws/request.Request" representing the
@@ -2772,6 +3603,7 @@ const opPutBucketCors = "PutBucketCors"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) {
op := &request.Operation{
Name: opPutBucketCors,
@@ -2801,12 +3633,77 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketCors for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
req, out := c.PutBucketCorsRequest(input)
err := req.Send()
return out, err
}
+const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
+
+// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketInventoryConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketInventoryConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketInventoryConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketInventoryConfigurationRequest method.
+// req, resp := client.PutBucketInventoryConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
+func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketInventoryConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?inventory",
+ }
+
+ if input == nil {
+ input = &PutBucketInventoryConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketInventoryConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Adds an inventory configuration (identified by the inventory ID) from the
+// bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketInventoryConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
+func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) {
+ req, out := c.PutBucketInventoryConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opPutBucketLifecycle = "PutBucketLifecycle"
// PutBucketLifecycleRequest generates a "aws/request.Request" representing the
@@ -2833,6 +3730,7 @@ const opPutBucketLifecycle = "PutBucketLifecycle"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) {
if c.Client.Config.Logger != nil {
c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated")
@@ -2865,6 +3763,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketLifecycle for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
req, out := c.PutBucketLifecycleRequest(input)
err := req.Send()
@@ -2897,6 +3796,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) {
op := &request.Operation{
Name: opPutBucketLifecycleConfiguration,
@@ -2927,6 +3827,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketLifecycleConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
req, out := c.PutBucketLifecycleConfigurationRequest(input)
err := req.Send()
@@ -2959,6 +3860,7 @@ const opPutBucketLogging = "PutBucketLogging"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) {
op := &request.Operation{
Name: opPutBucketLogging,
@@ -2990,12 +3892,77 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketLogging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
req, out := c.PutBucketLoggingRequest(input)
err := req.Send()
return out, err
}
+const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
+
+// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketMetricsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketMetricsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketMetricsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutBucketMetricsConfigurationRequest method.
+// req, resp := client.PutBucketMetricsConfigurationRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
+func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketMetricsConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?metrics",
+ }
+
+ if input == nil {
+ input = &PutBucketMetricsConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketMetricsConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets a metrics configuration (specified by the metrics configuration ID)
+// for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketMetricsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
+func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) {
+ req, out := c.PutBucketMetricsConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opPutBucketNotification = "PutBucketNotification"
// PutBucketNotificationRequest generates a "aws/request.Request" representing the
@@ -3022,6 +3989,7 @@ const opPutBucketNotification = "PutBucketNotification"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) {
if c.Client.Config.Logger != nil {
c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated")
@@ -3054,6 +4022,7 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketNotification for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
req, out := c.PutBucketNotificationRequest(input)
err := req.Send()
@@ -3086,6 +4055,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) {
op := &request.Operation{
Name: opPutBucketNotificationConfiguration,
@@ -3115,6 +4085,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketNotificationConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
req, out := c.PutBucketNotificationConfigurationRequest(input)
err := req.Send()
@@ -3147,6 +4118,7 @@ const opPutBucketPolicy = "PutBucketPolicy"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) {
op := &request.Operation{
Name: opPutBucketPolicy,
@@ -3177,6 +4149,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketPolicy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
req, out := c.PutBucketPolicyRequest(input)
err := req.Send()
@@ -3209,6 +4182,7 @@ const opPutBucketReplication = "PutBucketReplication"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) {
op := &request.Operation{
Name: opPutBucketReplication,
@@ -3239,6 +4213,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketReplication for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
req, out := c.PutBucketReplicationRequest(input)
err := req.Send()
@@ -3271,6 +4246,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) {
op := &request.Operation{
Name: opPutBucketRequestPayment,
@@ -3304,6 +4280,7 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput)
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketRequestPayment for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
req, out := c.PutBucketRequestPaymentRequest(input)
err := req.Send()
@@ -3336,6 +4313,7 @@ const opPutBucketTagging = "PutBucketTagging"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) {
op := &request.Operation{
Name: opPutBucketTagging,
@@ -3365,6 +4343,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
req, out := c.PutBucketTaggingRequest(input)
err := req.Send()
@@ -3397,6 +4376,7 @@ const opPutBucketVersioning = "PutBucketVersioning"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) {
op := &request.Operation{
Name: opPutBucketVersioning,
@@ -3427,6 +4407,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketVersioning for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
req, out := c.PutBucketVersioningRequest(input)
err := req.Send()
@@ -3459,6 +4440,7 @@ const opPutBucketWebsite = "PutBucketWebsite"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) {
op := &request.Operation{
Name: opPutBucketWebsite,
@@ -3488,6 +4470,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutBucketWebsite for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
req, out := c.PutBucketWebsiteRequest(input)
err := req.Send()
@@ -3520,6 +4503,7 @@ const opPutObject = "PutObject"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) {
op := &request.Operation{
Name: opPutObject,
@@ -3547,6 +4531,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation PutObject for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
req, out := c.PutObjectRequest(input)
err := req.Send()
@@ -3579,6 +4564,7 @@ const opPutObjectAcl = "PutObjectAcl"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) {
op := &request.Operation{
Name: opPutObjectAcl,
@@ -3612,12 +4598,74 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request
// * NoSuchKey
// The specified key does not exist.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
req, out := c.PutObjectAclRequest(input)
err := req.Send()
return out, err
}
+const opPutObjectTagging = "PutObjectTagging"
+
+// PutObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutObjectTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutObjectTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+// // Example sending a request using the PutObjectTaggingRequest method.
+// req, resp := client.PutObjectTaggingRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) {
+ op := &request.Operation{
+ Name: opPutObjectTagging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?tagging",
+ }
+
+ if input == nil {
+ input = &PutObjectTaggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutObjectTaggingOutput{}
+ req.Data = output
+ return
+}
+
+// PutObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Sets the supplied tag-set to an object that already exists in a bucket
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) {
+ req, out := c.PutObjectTaggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
const opRestoreObject = "RestoreObject"
// RestoreObjectRequest generates a "aws/request.Request" representing the
@@ -3644,6 +4692,7 @@ const opRestoreObject = "RestoreObject"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) {
op := &request.Operation{
Name: opRestoreObject,
@@ -3676,6 +4725,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
// * ObjectAlreadyInActiveTierError
// This operation is not allowed against this storage tier
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
req, out := c.RestoreObjectRequest(input)
err := req.Send()
@@ -3708,6 +4758,7 @@ const opUploadPart = "UploadPart"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) {
op := &request.Operation{
Name: opUploadPart,
@@ -3741,6 +4792,7 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation UploadPart for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
req, out := c.UploadPartRequest(input)
err := req.Send()
@@ -3773,6 +4825,7 @@ const opUploadPartCopy = "UploadPartCopy"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) {
op := &request.Operation{
Name: opUploadPartCopy,
@@ -3800,6 +4853,7 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation UploadPartCopy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
req, out := c.UploadPartCopyRequest(input)
err := req.Send()
@@ -3808,6 +4862,7 @@ func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput,
// Specifies the days since the initiation of an Incomplete Multipart Upload
// that Lifecycle will wait before permanently removing all parts of the upload.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload
type AbortIncompleteMultipartUpload struct {
_ struct{} `type:"structure"`
@@ -3826,6 +4881,13 @@ func (s AbortIncompleteMultipartUpload) GoString() string {
return s.String()
}
+// SetDaysAfterInitiation sets the DaysAfterInitiation field's value.
+func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload {
+ s.DaysAfterInitiation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest
type AbortMultipartUploadInput struct {
_ struct{} `type:"structure"`
@@ -3877,6 +4939,31 @@ func (s *AbortMultipartUploadInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput
type AbortMultipartUploadOutput struct {
_ struct{} `type:"structure"`
@@ -3895,6 +4982,13 @@ func (s AbortMultipartUploadOutput) GoString() string {
return s.String()
}
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration
type AccelerateConfiguration struct {
_ struct{} `type:"structure"`
@@ -3912,6 +5006,13 @@ func (s AccelerateConfiguration) GoString() string {
return s.String()
}
+// SetStatus sets the Status field's value.
+func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy
type AccessControlPolicy struct {
_ struct{} `type:"structure"`
@@ -3951,6 +5052,327 @@ func (s *AccessControlPolicy) Validate() error {
return nil
}
+// SetGrants sets the Grants field's value.
+func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy {
+ s.Owner = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator
+type AnalyticsAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ // The prefix to use when evaluating an AND predicate.
+ Prefix *string `type:"string"`
+
+ // The list of tags to use when evaluating an AND predicate.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator {
+ s.Tags = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration
+type AnalyticsConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The filter used to describe a set of objects for analyses. A filter must
+ // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator).
+ // If no filter is provided, all objects will be considered in any analysis.
+ Filter *AnalyticsFilter `type:"structure"`
+
+ // The identifier used to represent an analytics configuration.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // If present, it indicates that data related to access patterns will be collected
+ // and made available to analyze the tradeoffs between different storage classes.
+ //
+ // StorageClassAnalysis is a required field
+ StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"}
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.StorageClassAnalysis == nil {
+ invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis"))
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.StorageClassAnalysis != nil {
+ if err := s.StorageClassAnalysis.Validate(); err != nil {
+ invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetStorageClassAnalysis sets the StorageClassAnalysis field's value.
+func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration {
+ s.StorageClassAnalysis = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination
+type AnalyticsExportDestination struct {
+ _ struct{} `type:"structure"`
+
+ // A destination signifying output to an S3 bucket.
+ //
+ // S3BucketDestination is a required field
+ S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsExportDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsExportDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsExportDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"}
+ if s.S3BucketDestination == nil {
+ invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
+ }
+ if s.S3BucketDestination != nil {
+ if err := s.S3BucketDestination.Validate(); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetS3BucketDestination sets the S3BucketDestination field's value.
+func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination {
+ s.S3BucketDestination = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter
+type AnalyticsFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A conjunction (logical AND) of predicates, which is used in evaluating an
+ // analytics filter. The operator must have at least two predicates.
+ And *AnalyticsAndOperator `type:"structure"`
+
+ // The prefix to use when evaluating an analytics filter.
+ Prefix *string `type:"string"`
+
+ // The tag to use when evaluating an analytics filter.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s AnalyticsFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter {
+ s.Tag = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination
+type AnalyticsS3BucketDestination struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon resource name (ARN) of the bucket to which data is exported.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // The account ID that owns the destination bucket. If no account ID is provided,
+ // the owner will not be validated prior to exporting data.
+ BucketAccountId *string `type:"string"`
+
+ // The file format used when exporting data to Amazon S3.
+ //
+ // Format is a required field
+ Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"`
+
+ // The prefix to use when exporting data. The exported data begins with this
+ // prefix.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AnalyticsS3BucketDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsS3BucketDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsS3BucketDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Format == nil {
+ invalidParams.Add(request.NewErrParamRequired("Format"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination {
+ s.Bucket = &v
+ return s
+}
+
+// SetBucketAccountId sets the BucketAccountId field's value.
+func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination {
+ s.BucketAccountId = &v
+ return s
+}
+
+// SetFormat sets the Format field's value.
+func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination {
+ s.Format = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket
type Bucket struct {
_ struct{} `type:"structure"`
@@ -3971,6 +5393,19 @@ func (s Bucket) GoString() string {
return s.String()
}
+// SetCreationDate sets the CreationDate field's value.
+func (s *Bucket) SetCreationDate(v time.Time) *Bucket {
+ s.CreationDate = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *Bucket) SetName(v string) *Bucket {
+ s.Name = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration
type BucketLifecycleConfiguration struct {
_ struct{} `type:"structure"`
@@ -4011,6 +5446,13 @@ func (s *BucketLifecycleConfiguration) Validate() error {
return nil
}
+// SetRules sets the Rules field's value.
+func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus
type BucketLoggingStatus struct {
_ struct{} `type:"structure"`
@@ -4042,6 +5484,13 @@ func (s *BucketLoggingStatus) Validate() error {
return nil
}
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus {
+ s.LoggingEnabled = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration
type CORSConfiguration struct {
_ struct{} `type:"structure"`
@@ -4082,6 +5531,13 @@ func (s *CORSConfiguration) Validate() error {
return nil
}
+// SetCORSRules sets the CORSRules field's value.
+func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration {
+ s.CORSRules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule
type CORSRule struct {
_ struct{} `type:"structure"`
@@ -4135,6 +5591,37 @@ func (s *CORSRule) Validate() error {
return nil
}
+// SetAllowedHeaders sets the AllowedHeaders field's value.
+func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule {
+ s.AllowedHeaders = v
+ return s
+}
+
+// SetAllowedMethods sets the AllowedMethods field's value.
+func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule {
+ s.AllowedMethods = v
+ return s
+}
+
+// SetAllowedOrigins sets the AllowedOrigins field's value.
+func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule {
+ s.AllowedOrigins = v
+ return s
+}
+
+// SetExposeHeaders sets the ExposeHeaders field's value.
+func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule {
+ s.ExposeHeaders = v
+ return s
+}
+
+// SetMaxAgeSeconds sets the MaxAgeSeconds field's value.
+func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
+ s.MaxAgeSeconds = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration
type CloudFunctionConfiguration struct {
_ struct{} `type:"structure"`
@@ -4162,6 +5649,37 @@ func (s CloudFunctionConfiguration) GoString() string {
return s.String()
}
+// SetCloudFunction sets the CloudFunction field's value.
+func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration {
+ s.CloudFunction = &v
+ return s
+}
+
+// SetEvent sets the Event field's value.
+func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration {
+ s.Event = &v
+ return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetInvocationRole sets the InvocationRole field's value.
+func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration {
+ s.InvocationRole = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix
type CommonPrefix struct {
_ struct{} `type:"structure"`
@@ -4178,6 +5696,13 @@ func (s CommonPrefix) GoString() string {
return s.String()
}
+// SetPrefix sets the Prefix field's value.
+func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest
type CompleteMultipartUploadInput struct {
_ struct{} `type:"structure" payload:"MultipartUpload"`
@@ -4231,6 +5756,37 @@ func (s *CompleteMultipartUploadInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetMultipartUpload sets the MultipartUpload field's value.
+func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput {
+ s.MultipartUpload = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput
type CompleteMultipartUploadOutput struct {
_ struct{} `type:"structure"`
@@ -4273,6 +5829,61 @@ func (s CompleteMultipartUploadOutput) GoString() string {
return s.String()
}
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput {
+ s.Bucket = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput {
+ s.Key = &v
+ return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput {
+ s.Location = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload
type CompletedMultipartUpload struct {
_ struct{} `type:"structure"`
@@ -4289,6 +5900,13 @@ func (s CompletedMultipartUpload) GoString() string {
return s.String()
}
+// SetParts sets the Parts field's value.
+func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload {
+ s.Parts = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart
type CompletedPart struct {
_ struct{} `type:"structure"`
@@ -4310,6 +5928,19 @@ func (s CompletedPart) GoString() string {
return s.String()
}
+// SetETag sets the ETag field's value.
+func (s *CompletedPart) SetETag(v string) *CompletedPart {
+ s.ETag = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
+ s.PartNumber = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition
type Condition struct {
_ struct{} `type:"structure"`
@@ -4340,6 +5971,19 @@ func (s Condition) GoString() string {
return s.String()
}
+// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value.
+func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition {
+ s.HttpErrorCodeReturnedEquals = &v
+ return s
+}
+
+// SetKeyPrefixEquals sets the KeyPrefixEquals field's value.
+func (s *Condition) SetKeyPrefixEquals(v string) *Condition {
+ s.KeyPrefixEquals = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest
type CopyObjectInput struct {
_ struct{} `type:"structure"`
@@ -4457,6 +6101,15 @@ type CopyObjectInput struct {
// The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+ // The tag-set for the object destination object this value must be used in
+ // conjunction with the TaggingDirective. The tag-set must be encoded as URL
+ // Query parameters
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+ // Specifies whether the object tag-set are copied from the source object or
+ // replaced with tag-set provided in the request.
+ TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"`
+
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata.
@@ -4495,6 +6148,205 @@ func (s *CopyObjectInput) Validate() error {
return nil
}
+// SetACL sets the ACL field's value.
+func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput {
+ s.CopySource = &v
+ return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput {
+ s.CopySourceIfMatch = &v
+ return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput {
+ s.CopySourceIfModifiedSince = &v
+ return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput {
+ s.CopySourceIfNoneMatch = &v
+ return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput {
+ s.CopySourceIfUnmodifiedSince = &v
+ return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerAlgorithm = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerKey = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput {
+ s.CopySourceSSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput {
+ s.Metadata = v
+ return s
+}
+
+// SetMetadataDirective sets the MetadataDirective field's value.
+func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput {
+ s.MetadataDirective = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetTaggingDirective sets the TaggingDirective field's value.
+func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput {
+ s.TaggingDirective = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput
type CopyObjectOutput struct {
_ struct{} `type:"structure" payload:"CopyObjectResult"`
@@ -4541,6 +6393,61 @@ func (s CopyObjectOutput) GoString() string {
return s.String()
}
+// SetCopyObjectResult sets the CopyObjectResult field's value.
+func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput {
+ s.CopyObjectResult = v
+ return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput {
+ s.CopySourceVersionId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult
type CopyObjectResult struct {
_ struct{} `type:"structure"`
@@ -4559,6 +6466,19 @@ func (s CopyObjectResult) GoString() string {
return s.String()
}
+// SetETag sets the ETag field's value.
+func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult {
+ s.LastModified = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult
type CopyPartResult struct {
_ struct{} `type:"structure"`
@@ -4579,6 +6499,19 @@ func (s CopyPartResult) GoString() string {
return s.String()
}
+// SetETag sets the ETag field's value.
+func (s *CopyPartResult) SetETag(v string) *CopyPartResult {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult {
+ s.LastModified = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration
type CreateBucketConfiguration struct {
_ struct{} `type:"structure"`
@@ -4597,6 +6530,13 @@ func (s CreateBucketConfiguration) GoString() string {
return s.String()
}
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration {
+ s.LocationConstraint = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest
type CreateBucketInput struct {
_ struct{} `type:"structure" payload:"CreateBucketConfiguration"`
@@ -4648,6 +6588,55 @@ func (s *CreateBucketInput) Validate() error {
return nil
}
+// SetACL sets the ACL field's value.
+func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value.
+func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput {
+ s.CreateBucketConfiguration = v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput
type CreateBucketOutput struct {
_ struct{} `type:"structure"`
@@ -4664,6 +6653,13 @@ func (s CreateBucketOutput) GoString() string {
return s.String()
}
+// SetLocation sets the Location field's value.
+func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
+ s.Location = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest
type CreateMultipartUploadInput struct {
_ struct{} `type:"structure"`
@@ -4780,6 +6776,139 @@ func (s *CreateMultipartUploadInput) Validate() error {
return nil
}
+// SetACL sets the ACL field's value.
+func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput {
+ s.Metadata = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput
type CreateMultipartUploadOutput struct {
_ struct{} `type:"structure"`
@@ -4832,6 +6961,67 @@ func (s CreateMultipartUploadOutput) GoString() string {
return s.String()
}
+// SetAbortDate sets the AbortDate field's value.
+func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput {
+ s.AbortDate = &v
+ return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput {
+ s.AbortRuleId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete
type Delete struct {
_ struct{} `type:"structure"`
@@ -4876,6 +7066,87 @@ func (s *Delete) Validate() error {
return nil
}
+// SetObjects sets the Objects field's value.
+func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete {
+ s.Objects = v
+ return s
+}
+
+// SetQuiet sets the Quiet field's value.
+func (s *Delete) SetQuiet(v bool) *Delete {
+ s.Quiet = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest
+type DeleteBucketAnalyticsConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket from which an analytics configuration is deleted.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The identifier used to represent an analytics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAnalyticsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAnalyticsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput
+type DeleteBucketAnalyticsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAnalyticsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest
type DeleteBucketCorsInput struct {
_ struct{} `type:"structure"`
@@ -4906,6 +7177,13 @@ func (s *DeleteBucketCorsInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput
type DeleteBucketCorsOutput struct {
_ struct{} `type:"structure"`
}
@@ -4920,6 +7198,7 @@ func (s DeleteBucketCorsOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest
type DeleteBucketInput struct {
_ struct{} `type:"structure"`
@@ -4950,6 +7229,81 @@ func (s *DeleteBucketInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest
+type DeleteBucketInventoryConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the inventory configuration to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInventoryConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInventoryConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInventoryConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput
+type DeleteBucketInventoryConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInventoryConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest
type DeleteBucketLifecycleInput struct {
_ struct{} `type:"structure"`
@@ -4980,6 +7334,13 @@ func (s *DeleteBucketLifecycleInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput
type DeleteBucketLifecycleOutput struct {
_ struct{} `type:"structure"`
}
@@ -4994,6 +7355,75 @@ func (s DeleteBucketLifecycleOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest
+type DeleteBucketMetricsConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the metrics configuration to delete.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketMetricsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketMetricsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketMetricsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput
+type DeleteBucketMetricsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketMetricsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketMetricsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput
type DeleteBucketOutput struct {
_ struct{} `type:"structure"`
}
@@ -5008,6 +7438,7 @@ func (s DeleteBucketOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest
type DeleteBucketPolicyInput struct {
_ struct{} `type:"structure"`
@@ -5038,6 +7469,13 @@ func (s *DeleteBucketPolicyInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput
type DeleteBucketPolicyOutput struct {
_ struct{} `type:"structure"`
}
@@ -5052,6 +7490,7 @@ func (s DeleteBucketPolicyOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest
type DeleteBucketReplicationInput struct {
_ struct{} `type:"structure"`
@@ -5082,6 +7521,13 @@ func (s *DeleteBucketReplicationInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput
type DeleteBucketReplicationOutput struct {
_ struct{} `type:"structure"`
}
@@ -5096,6 +7542,7 @@ func (s DeleteBucketReplicationOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest
type DeleteBucketTaggingInput struct {
_ struct{} `type:"structure"`
@@ -5126,6 +7573,13 @@ func (s *DeleteBucketTaggingInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput
type DeleteBucketTaggingOutput struct {
_ struct{} `type:"structure"`
}
@@ -5140,6 +7594,7 @@ func (s DeleteBucketTaggingOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest
type DeleteBucketWebsiteInput struct {
_ struct{} `type:"structure"`
@@ -5170,6 +7625,13 @@ func (s *DeleteBucketWebsiteInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput
type DeleteBucketWebsiteOutput struct {
_ struct{} `type:"structure"`
}
@@ -5184,6 +7646,7 @@ func (s DeleteBucketWebsiteOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry
type DeleteMarkerEntry struct {
_ struct{} `type:"structure"`
@@ -5213,6 +7676,37 @@ func (s DeleteMarkerEntry) GoString() string {
return s.String()
}
+// SetIsLatest sets the IsLatest field's value.
+func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry {
+ s.IsLatest = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry {
+ s.Owner = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest
type DeleteObjectInput struct {
_ struct{} `type:"structure"`
@@ -5265,6 +7759,37 @@ func (s *DeleteObjectInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput {
+ s.MFA = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput
type DeleteObjectOutput struct {
_ struct{} `type:"structure"`
@@ -5291,6 +7816,110 @@ func (s DeleteObjectOutput) GoString() string {
return s.String()
}
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest
+type DeleteObjectTaggingInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // The versionId of the object that the tag-set will be removed from.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput
+type DeleteObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The versionId of the object the tag-set was removed from.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest
type DeleteObjectsInput struct {
_ struct{} `type:"structure" payload:"Delete"`
@@ -5342,6 +7971,31 @@ func (s *DeleteObjectsInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDelete sets the Delete field's value.
+func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput {
+ s.Delete = v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput {
+ s.MFA = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput
type DeleteObjectsOutput struct {
_ struct{} `type:"structure"`
@@ -5364,6 +8018,25 @@ func (s DeleteObjectsOutput) GoString() string {
return s.String()
}
+// SetDeleted sets the Deleted field's value.
+func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput {
+ s.Deleted = v
+ return s
+}
+
+// SetErrors sets the Errors field's value.
+func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput {
+ s.Errors = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject
type DeletedObject struct {
_ struct{} `type:"structure"`
@@ -5386,6 +8059,31 @@ func (s DeletedObject) GoString() string {
return s.String()
}
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value.
+func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject {
+ s.DeleteMarkerVersionId = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeletedObject) SetKey(v string) *DeletedObject {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination
type Destination struct {
_ struct{} `type:"structure"`
@@ -5422,6 +8120,19 @@ func (s *Destination) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *Destination) SetBucket(v string) *Destination {
+ s.Bucket = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Destination) SetStorageClass(v string) *Destination {
+ s.StorageClass = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error
type Error struct {
_ struct{} `type:"structure"`
@@ -5444,6 +8155,31 @@ func (s Error) GoString() string {
return s.String()
}
+// SetCode sets the Code field's value.
+func (s *Error) SetCode(v string) *Error {
+ s.Code = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Error) SetKey(v string) *Error {
+ s.Key = &v
+ return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *Error) SetMessage(v string) *Error {
+ s.Message = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *Error) SetVersionId(v string) *Error {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument
type ErrorDocument struct {
_ struct{} `type:"structure"`
@@ -5479,7 +8215,14 @@ func (s *ErrorDocument) Validate() error {
return nil
}
+// SetKey sets the Key field's value.
+func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
+ s.Key = &v
+ return s
+}
+
// Container for key value pair that defines the criteria for the filter rule.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule
type FilterRule struct {
_ struct{} `type:"structure"`
@@ -5502,6 +8245,19 @@ func (s FilterRule) GoString() string {
return s.String()
}
+// SetName sets the Name field's value.
+func (s *FilterRule) SetName(v string) *FilterRule {
+ s.Name = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *FilterRule) SetValue(v string) *FilterRule {
+ s.Value = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest
type GetBucketAccelerateConfigurationInput struct {
_ struct{} `type:"structure"`
@@ -5534,6 +8290,13 @@ func (s *GetBucketAccelerateConfigurationInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput
type GetBucketAccelerateConfigurationOutput struct {
_ struct{} `type:"structure"`
@@ -5551,6 +8314,13 @@ func (s GetBucketAccelerateConfigurationOutput) GoString() string {
return s.String()
}
+// SetStatus sets the Status field's value.
+func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest
type GetBucketAclInput struct {
_ struct{} `type:"structure"`
@@ -5581,6 +8351,13 @@ func (s *GetBucketAclInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput
type GetBucketAclOutput struct {
_ struct{} `type:"structure"`
@@ -5600,6 +8377,96 @@ func (s GetBucketAclOutput) GoString() string {
return s.String()
}
+// SetGrants sets the Grants field's value.
+func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
+ s.Owner = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest
+type GetBucketAnalyticsConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket from which an analytics configuration is retrieved.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The identifier used to represent an analytics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAnalyticsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAnalyticsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAnalyticsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput
+type GetBucketAnalyticsConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
+
+ // The configuration and any analyses for the analytics filter.
+ AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketAnalyticsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAnalyticsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
+func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput {
+ s.AnalyticsConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest
type GetBucketCorsInput struct {
_ struct{} `type:"structure"`
@@ -5630,6 +8497,13 @@ func (s *GetBucketCorsInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput
type GetBucketCorsOutput struct {
_ struct{} `type:"structure"`
@@ -5646,6 +8520,90 @@ func (s GetBucketCorsOutput) GoString() string {
return s.String()
}
+// SetCORSRules sets the CORSRules field's value.
+func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
+ s.CORSRules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest
+type GetBucketInventoryConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the inventory configuration to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketInventoryConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketInventoryConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketInventoryConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput
+type GetBucketInventoryConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"InventoryConfiguration"`
+
+ // Specifies the inventory configuration.
+ InventoryConfiguration *InventoryConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketInventoryConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketInventoryConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetInventoryConfiguration sets the InventoryConfiguration field's value.
+func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput {
+ s.InventoryConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest
type GetBucketLifecycleConfigurationInput struct {
_ struct{} `type:"structure"`
@@ -5676,6 +8634,13 @@ func (s *GetBucketLifecycleConfigurationInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput
type GetBucketLifecycleConfigurationOutput struct {
_ struct{} `type:"structure"`
@@ -5692,6 +8657,13 @@ func (s GetBucketLifecycleConfigurationOutput) GoString() string {
return s.String()
}
+// SetRules sets the Rules field's value.
+func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest
type GetBucketLifecycleInput struct {
_ struct{} `type:"structure"`
@@ -5722,6 +8694,13 @@ func (s *GetBucketLifecycleInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput
type GetBucketLifecycleOutput struct {
_ struct{} `type:"structure"`
@@ -5738,6 +8717,13 @@ func (s GetBucketLifecycleOutput) GoString() string {
return s.String()
}
+// SetRules sets the Rules field's value.
+func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest
type GetBucketLocationInput struct {
_ struct{} `type:"structure"`
@@ -5768,6 +8754,13 @@ func (s *GetBucketLocationInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput
type GetBucketLocationOutput struct {
_ struct{} `type:"structure"`
@@ -5784,6 +8777,13 @@ func (s GetBucketLocationOutput) GoString() string {
return s.String()
}
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput {
+ s.LocationConstraint = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest
type GetBucketLoggingInput struct {
_ struct{} `type:"structure"`
@@ -5814,6 +8814,13 @@ func (s *GetBucketLoggingInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput
type GetBucketLoggingOutput struct {
_ struct{} `type:"structure"`
@@ -5830,6 +8837,90 @@ func (s GetBucketLoggingOutput) GoString() string {
return s.String()
}
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput {
+ s.LoggingEnabled = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest
+type GetBucketMetricsConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the metrics configuration to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketMetricsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput
+type GetBucketMetricsConfigurationOutput struct {
+ _ struct{} `type:"structure" payload:"MetricsConfiguration"`
+
+ // Specifies the metrics configuration.
+ MetricsConfiguration *MetricsConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// SetMetricsConfiguration sets the MetricsConfiguration field's value.
+func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput {
+ s.MetricsConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest
type GetBucketNotificationConfigurationRequest struct {
_ struct{} `type:"structure"`
@@ -5862,6 +8953,13 @@ func (s *GetBucketNotificationConfigurationRequest) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest
type GetBucketPolicyInput struct {
_ struct{} `type:"structure"`
@@ -5892,6 +8990,13 @@ func (s *GetBucketPolicyInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput
type GetBucketPolicyOutput struct {
_ struct{} `type:"structure" payload:"Policy"`
@@ -5909,6 +9014,13 @@ func (s GetBucketPolicyOutput) GoString() string {
return s.String()
}
+// SetPolicy sets the Policy field's value.
+func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput {
+ s.Policy = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest
type GetBucketReplicationInput struct {
_ struct{} `type:"structure"`
@@ -5939,6 +9051,13 @@ func (s *GetBucketReplicationInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput
type GetBucketReplicationOutput struct {
_ struct{} `type:"structure" payload:"ReplicationConfiguration"`
@@ -5957,6 +9076,13 @@ func (s GetBucketReplicationOutput) GoString() string {
return s.String()
}
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput {
+ s.ReplicationConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest
type GetBucketRequestPaymentInput struct {
_ struct{} `type:"structure"`
@@ -5987,6 +9113,13 @@ func (s *GetBucketRequestPaymentInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput
type GetBucketRequestPaymentOutput struct {
_ struct{} `type:"structure"`
@@ -6004,6 +9137,13 @@ func (s GetBucketRequestPaymentOutput) GoString() string {
return s.String()
}
+// SetPayer sets the Payer field's value.
+func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput {
+ s.Payer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest
type GetBucketTaggingInput struct {
_ struct{} `type:"structure"`
@@ -6034,6 +9174,13 @@ func (s *GetBucketTaggingInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput
type GetBucketTaggingOutput struct {
_ struct{} `type:"structure"`
@@ -6051,6 +9198,13 @@ func (s GetBucketTaggingOutput) GoString() string {
return s.String()
}
+// SetTagSet sets the TagSet field's value.
+func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
+ s.TagSet = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest
type GetBucketVersioningInput struct {
_ struct{} `type:"structure"`
@@ -6081,6 +9235,13 @@ func (s *GetBucketVersioningInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput
type GetBucketVersioningOutput struct {
_ struct{} `type:"structure"`
@@ -6103,6 +9264,19 @@ func (s GetBucketVersioningOutput) GoString() string {
return s.String()
}
+// SetMFADelete sets the MFADelete field's value.
+func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput {
+ s.MFADelete = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest
type GetBucketWebsiteInput struct {
_ struct{} `type:"structure"`
@@ -6133,6 +9307,13 @@ func (s *GetBucketWebsiteInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput
type GetBucketWebsiteOutput struct {
_ struct{} `type:"structure"`
@@ -6155,6 +9336,31 @@ func (s GetBucketWebsiteOutput) GoString() string {
return s.String()
}
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput {
+ s.ErrorDocument = v
+ return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput {
+ s.IndexDocument = v
+ return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput {
+ s.RedirectAllRequestsTo = v
+ return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput {
+ s.RoutingRules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest
type GetObjectAclInput struct {
_ struct{} `type:"structure"`
@@ -6203,6 +9409,31 @@ func (s *GetObjectAclInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput
type GetObjectAclOutput struct {
_ struct{} `type:"structure"`
@@ -6226,6 +9457,25 @@ func (s GetObjectAclOutput) GoString() string {
return s.String()
}
+// SetGrants sets the Grants field's value.
+func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput {
+ s.Grants = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput {
+ s.Owner = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest
type GetObjectInput struct {
_ struct{} `type:"structure"`
@@ -6332,6 +9582,121 @@ func (s *GetObjectInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectInput) SetBucket(v string) *GetObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput {
+ s.IfMatch = &v
+ return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput {
+ s.IfModifiedSince = &v
+ return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput {
+ s.IfNoneMatch = &v
+ return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput {
+ s.IfUnmodifiedSince = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectInput) SetKey(v string) *GetObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRange sets the Range field's value.
+func (s *GetObjectInput) SetRange(v string) *GetObjectInput {
+ s.Range = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetResponseCacheControl sets the ResponseCacheControl field's value.
+func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput {
+ s.ResponseCacheControl = &v
+ return s
+}
+
+// SetResponseContentDisposition sets the ResponseContentDisposition field's value.
+func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput {
+ s.ResponseContentDisposition = &v
+ return s
+}
+
+// SetResponseContentEncoding sets the ResponseContentEncoding field's value.
+func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput {
+ s.ResponseContentEncoding = &v
+ return s
+}
+
+// SetResponseContentLanguage sets the ResponseContentLanguage field's value.
+func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput {
+ s.ResponseContentLanguage = &v
+ return s
+}
+
+// SetResponseContentType sets the ResponseContentType field's value.
+func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput {
+ s.ResponseContentType = &v
+ return s
+}
+
+// SetResponseExpires sets the ResponseExpires field's value.
+func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput {
+ s.ResponseExpires = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput
type GetObjectOutput struct {
_ struct{} `type:"structure" payload:"Body"`
@@ -6425,6 +9790,9 @@ type GetObjectOutput struct {
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+ // The number of tags, if any, on the object.
+ TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"`
+
// Version of the object.
VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
@@ -6444,6 +9812,267 @@ func (s GetObjectOutput) GoString() string {
return s.String()
}
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput {
+ s.AcceptRanges = &v
+ return s
+}
+
+// SetBody sets the Body field's value.
+func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput {
+ s.Body = v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentRange sets the ContentRange field's value.
+func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput {
+ s.ContentRange = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput {
+ s.ContentType = &v
+ return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput {
+ s.Expires = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput {
+ s.LastModified = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput {
+ s.Metadata = v
+ return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput {
+ s.MissingMeta = &v
+ return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput {
+ s.PartsCount = &v
+ return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput {
+ s.ReplicationStatus = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput {
+ s.Restore = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagCount sets the TagCount field's value.
+func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput {
+ s.TagCount = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest
+type GetObjectTaggingInput struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput
+type GetObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ // TagSet is a required field
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput {
+ s.TagSet = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest
type GetObjectTorrentInput struct {
_ struct{} `type:"structure"`
@@ -6489,6 +10118,25 @@ func (s *GetObjectTorrentInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput
type GetObjectTorrentOutput struct {
_ struct{} `type:"structure" payload:"Body"`
@@ -6509,6 +10157,58 @@ func (s GetObjectTorrentOutput) GoString() string {
return s.String()
}
+// SetBody sets the Body field's value.
+func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput {
+ s.Body = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters
+type GlacierJobParameters struct {
+ _ struct{} `type:"structure"`
+
+ // Glacier retrieval tier at which the restore will be processed.
+ //
+ // Tier is a required field
+ Tier *string `type:"string" required:"true" enum:"Tier"`
+}
+
+// String returns the string representation
+func (s GlacierJobParameters) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlacierJobParameters) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlacierJobParameters) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"}
+ if s.Tier == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tier"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetTier sets the Tier field's value.
+func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
+ s.Tier = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant
type Grant struct {
_ struct{} `type:"structure"`
@@ -6543,6 +10243,19 @@ func (s *Grant) Validate() error {
return nil
}
+// SetGrantee sets the Grantee field's value.
+func (s *Grant) SetGrantee(v *Grantee) *Grant {
+ s.Grantee = v
+ return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *Grant) SetPermission(v string) *Grant {
+ s.Permission = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee
type Grantee struct {
_ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
@@ -6587,6 +10300,37 @@ func (s *Grantee) Validate() error {
return nil
}
+// SetDisplayName sets the DisplayName field's value.
+func (s *Grantee) SetDisplayName(v string) *Grantee {
+ s.DisplayName = &v
+ return s
+}
+
+// SetEmailAddress sets the EmailAddress field's value.
+func (s *Grantee) SetEmailAddress(v string) *Grantee {
+ s.EmailAddress = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Grantee) SetID(v string) *Grantee {
+ s.ID = &v
+ return s
+}
+
+// SetType sets the Type field's value.
+func (s *Grantee) SetType(v string) *Grantee {
+ s.Type = &v
+ return s
+}
+
+// SetURI sets the URI field's value.
+func (s *Grantee) SetURI(v string) *Grantee {
+ s.URI = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest
type HeadBucketInput struct {
_ struct{} `type:"structure"`
@@ -6617,6 +10361,13 @@ func (s *HeadBucketInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput
type HeadBucketOutput struct {
_ struct{} `type:"structure"`
}
@@ -6631,6 +10382,7 @@ func (s HeadBucketOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest
type HeadObjectInput struct {
_ struct{} `type:"structure"`
@@ -6720,6 +10472,85 @@ func (s *HeadObjectInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput {
+ s.IfMatch = &v
+ return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput {
+ s.IfModifiedSince = &v
+ return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput {
+ s.IfNoneMatch = &v
+ return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput {
+ s.IfUnmodifiedSince = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRange sets the Range field's value.
+func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput {
+ s.Range = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput
type HeadObjectOutput struct {
_ struct{} `type:"structure"`
@@ -6826,6 +10657,157 @@ func (s HeadObjectOutput) GoString() string {
return s.String()
}
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput {
+ s.AcceptRanges = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput {
+ s.ContentType = &v
+ return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput {
+ s.DeleteMarker = &v
+ return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput {
+ s.Expires = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput {
+ s.LastModified = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput {
+ s.Metadata = v
+ return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput {
+ s.MissingMeta = &v
+ return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput {
+ s.PartsCount = &v
+ return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput {
+ s.ReplicationStatus = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput {
+ s.Restore = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument
type IndexDocument struct {
_ struct{} `type:"structure"`
@@ -6861,6 +10843,13 @@ func (s *IndexDocument) Validate() error {
return nil
}
+// SetSuffix sets the Suffix field's value.
+func (s *IndexDocument) SetSuffix(v string) *IndexDocument {
+ s.Suffix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator
type Initiator struct {
_ struct{} `type:"structure"`
@@ -6882,7 +10871,344 @@ func (s Initiator) GoString() string {
return s.String()
}
+// SetDisplayName sets the DisplayName field's value.
+func (s *Initiator) SetDisplayName(v string) *Initiator {
+ s.DisplayName = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Initiator) SetID(v string) *Initiator {
+ s.ID = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration
+type InventoryConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Contains information about where to publish the inventory results.
+ //
+ // Destination is a required field
+ Destination *InventoryDestination `type:"structure" required:"true"`
+
+ // Specifies an inventory filter. The inventory only includes objects that meet
+ // the filter's criteria.
+ Filter *InventoryFilter `type:"structure"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+
+ // Specifies which object version(s) to included in the inventory results.
+ //
+ // IncludedObjectVersions is a required field
+ IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"`
+
+ // Specifies whether the inventory is enabled or disabled.
+ //
+ // IsEnabled is a required field
+ IsEnabled *bool `type:"boolean" required:"true"`
+
+ // Contains the optional fields that are included in the inventory results.
+ OptionalFields []*string `locationNameList:"Field" type:"list"`
+
+ // Specifies the schedule for generating inventory results.
+ //
+ // Schedule is a required field
+ Schedule *InventorySchedule `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"}
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.IncludedObjectVersions == nil {
+ invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions"))
+ }
+ if s.IsEnabled == nil {
+ invalidParams.Add(request.NewErrParamRequired("IsEnabled"))
+ }
+ if s.Schedule == nil {
+ invalidParams.Add(request.NewErrParamRequired("Schedule"))
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Schedule != nil {
+ if err := s.Schedule.Validate(); err != nil {
+ invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration {
+ s.Destination = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetIncludedObjectVersions sets the IncludedObjectVersions field's value.
+func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration {
+ s.IncludedObjectVersions = &v
+ return s
+}
+
+// SetIsEnabled sets the IsEnabled field's value.
+func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration {
+ s.IsEnabled = &v
+ return s
+}
+
+// SetOptionalFields sets the OptionalFields field's value.
+func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration {
+ s.OptionalFields = v
+ return s
+}
+
+// SetSchedule sets the Schedule field's value.
+func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration {
+ s.Schedule = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination
+type InventoryDestination struct {
+ _ struct{} `type:"structure"`
+
+ // Contains the bucket name, file format, bucket owner (optional), and prefix
+ // (optional) where inventory results are published.
+ //
+ // S3BucketDestination is a required field
+ S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"}
+ if s.S3BucketDestination == nil {
+ invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
+ }
+ if s.S3BucketDestination != nil {
+ if err := s.S3BucketDestination.Validate(); err != nil {
+ invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetS3BucketDestination sets the S3BucketDestination field's value.
+func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination {
+ s.S3BucketDestination = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter
+type InventoryFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The prefix that an object must have to be included in the inventory results.
+ //
+ // Prefix is a required field
+ Prefix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"}
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination
+type InventoryS3BucketDestination struct {
+ _ struct{} `type:"structure"`
+
+ // The ID of the account that owns the destination bucket.
+ AccountId *string `type:"string"`
+
+ // The Amazon resource name (ARN) of the bucket where inventory results will
+ // be published.
+ //
+ // Bucket is a required field
+ Bucket *string `type:"string" required:"true"`
+
+ // Specifies the output format of the inventory results.
+ //
+ // Format is a required field
+ Format *string `type:"string" required:"true" enum:"InventoryFormat"`
+
+ // The prefix that is prepended to all inventory results.
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s InventoryS3BucketDestination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryS3BucketDestination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryS3BucketDestination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Format == nil {
+ invalidParams.Add(request.NewErrParamRequired("Format"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination {
+ s.AccountId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination {
+ s.Bucket = &v
+ return s
+}
+
+// SetFormat sets the Format field's value.
+func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination {
+ s.Format = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule
+type InventorySchedule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies how frequently inventory results are produced.
+ //
+ // Frequency is a required field
+ Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"`
+}
+
+// String returns the string representation
+func (s InventorySchedule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventorySchedule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventorySchedule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"}
+ if s.Frequency == nil {
+ invalidParams.Add(request.NewErrParamRequired("Frequency"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFrequency sets the Frequency field's value.
+func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule {
+ s.Frequency = &v
+ return s
+}
+
// Container for object key name prefix and suffix filtering rules.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter
type KeyFilter struct {
_ struct{} `type:"structure"`
@@ -6901,7 +11227,14 @@ func (s KeyFilter) GoString() string {
return s.String()
}
+// SetFilterRules sets the FilterRules field's value.
+func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter {
+ s.FilterRules = v
+ return s
+}
+
// Container for specifying the AWS Lambda notification configuration.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration
type LambdaFunctionConfiguration struct {
_ struct{} `type:"structure"`
@@ -6949,6 +11282,31 @@ func (s *LambdaFunctionConfiguration) Validate() error {
return nil
}
+// SetEvents sets the Events field's value.
+func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetLambdaFunctionArn sets the LambdaFunctionArn field's value.
+func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration {
+ s.LambdaFunctionArn = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration
type LifecycleConfiguration struct {
_ struct{} `type:"structure"`
@@ -6989,6 +11347,13 @@ func (s *LifecycleConfiguration) Validate() error {
return nil
}
+// SetRules sets the Rules field's value.
+func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration
type LifecycleExpiration struct {
_ struct{} `type:"structure"`
@@ -7017,6 +11382,25 @@ func (s LifecycleExpiration) GoString() string {
return s.String()
}
+// SetDate sets the Date field's value.
+func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration {
+ s.Date = &v
+ return s
+}
+
+// SetDays sets the Days field's value.
+func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration {
+ s.Days = &v
+ return s
+}
+
+// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value.
+func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration {
+ s.ExpiredObjectDeleteMarker = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule
type LifecycleRule struct {
_ struct{} `type:"structure"`
@@ -7026,6 +11410,10 @@ type LifecycleRule struct {
Expiration *LifecycleExpiration `type:"structure"`
+ // The Filter is used to identify objects that a Lifecycle Rule applies to.
+ // A Filter must have exactly one of Prefix, Tag, or And specified.
+ Filter *LifecycleRuleFilter `type:"structure"`
+
// Unique identifier for the rule. The value cannot be longer than 255 characters.
ID *string `type:"string"`
@@ -7038,10 +11426,9 @@ type LifecycleRule struct {
NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
- // Prefix identifying one or more objects to which the rule applies.
- //
- // Prefix is a required field
- Prefix *string `type:"string" required:"true"`
+ // Prefix identifying one or more objects to which the rule applies. This is
+ // deprecated; use Filter instead.
+ Prefix *string `deprecated:"true" type:"string"`
// If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
// is not currently being applied.
@@ -7065,12 +11452,227 @@ func (s LifecycleRule) GoString() string {
// Validate inspects the fields of the type to determine if they are valid.
func (s *LifecycleRule) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"}
- if s.Prefix == nil {
- invalidParams.Add(request.NewErrParamRequired("Prefix"))
- }
if s.Status == nil {
invalidParams.Add(request.NewErrParamRequired("Status"))
}
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
+func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule {
+ s.AbortIncompleteMultipartUpload = v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule {
+ s.Expiration = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule {
+ s.Filter = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *LifecycleRule) SetID(v string) *LifecycleRule {
+ s.ID = &v
+ return s
+}
+
+// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
+func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule {
+ s.NoncurrentVersionExpiration = v
+ return s
+}
+
+// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value.
+func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule {
+ s.NoncurrentVersionTransitions = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule {
+ s.Prefix = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *LifecycleRule) SetStatus(v string) *LifecycleRule {
+ s.Status = &v
+ return s
+}
+
+// SetTransitions sets the Transitions field's value.
+func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule {
+ s.Transitions = v
+ return s
+}
+
+// This is used in a Lifecycle Rule Filter to apply a logical AND to two or
+// more predicates. The Lifecycle Rule will apply to any object matching all
+// of the predicates configured inside the And operator.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator
+type LifecycleRuleAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ Prefix *string `type:"string"`
+
+ // All of these tags must exist in the object's tag set in order for the rule
+ // to apply.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleRuleAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRuleAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRuleAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator {
+ s.Tags = v
+ return s
+}
+
+// The Filter is used to identify objects that a Lifecycle Rule applies to.
+// A Filter must have exactly one of Prefix, Tag, or And specified.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter
+type LifecycleRuleFilter struct {
+ _ struct{} `type:"structure"`
+
+ // This is used in a Lifecycle Rule Filter to apply a logical AND to two or
+ // more predicates. The Lifecycle Rule will apply to any object matching all
+ // of the predicates configured inside the And operator.
+ And *LifecycleRuleAndOperator `type:"structure"`
+
+ // Prefix identifying one or more objects to which the rule applies.
+ Prefix *string `type:"string"`
+
+ // This tag must exist in the object's tag set in order for the rule to apply.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s LifecycleRuleFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRuleFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRuleFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter {
+ s.Tag = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest
+type ListBucketAnalyticsConfigurationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket from which analytics configurations are retrieved.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ContinuationToken that represents a placeholder from where this request
+ // should begin.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketAnalyticsConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketAnalyticsConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketAnalyticsConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -7078,6 +11680,288 @@ func (s *LifecycleRule) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput
+type ListBucketAnalyticsConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The list of analytics configurations for a bucket.
+ AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"`
+
+ // The ContinuationToken that represents where this request began.
+ ContinuationToken *string `type:"string"`
+
+ // Indicates whether the returned list of analytics configurations is complete.
+ // A value of true indicates that the list is not complete and the NextContinuationToken
+ // will be provided for a subsequent request.
+ IsTruncated *bool `type:"boolean"`
+
+ // NextContinuationToken is sent when isTruncated is true, which indicates that
+ // there are more analytics configurations to list. The next request must include
+ // this NextContinuationToken. The token is obfuscated and is not a usable value.
+ NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketAnalyticsConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketAnalyticsConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput {
+ s.AnalyticsConfigurationList = v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest
+type ListBucketInventoryConfigurationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the inventory configurations to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The marker used to continue an inventory configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value
+ // that Amazon S3 understands.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketInventoryConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketInventoryConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketInventoryConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput
+type ListBucketInventoryConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If sent in the request, the marker that is used as a starting point for this
+ // inventory configuration list response.
+ ContinuationToken *string `type:"string"`
+
+ // The list of inventory configurations for a bucket.
+ InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"`
+
+ // Indicates whether the returned list of inventory configurations is truncated
+ // in this response. A value of true indicates that the list is truncated.
+ IsTruncated *bool `type:"boolean"`
+
+ // The marker used to continue this inventory configuration listing. Use the
+ // NextContinuationToken from this response to continue the listing in a subsequent
+ // request. The continuation token is an opaque value that Amazon S3 understands.
+ NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketInventoryConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketInventoryConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetInventoryConfigurationList sets the InventoryConfigurationList field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput {
+ s.InventoryConfigurationList = v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest
+type ListBucketMetricsConfigurationsInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name of the bucket containing the metrics configurations to retrieve.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The marker that is used to continue a metrics configuration listing that
+ // has been truncated. Use the NextContinuationToken from a previously truncated
+ // list response to continue the listing. The continuation token is an opaque
+ // value that Amazon S3 understands.
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketMetricsConfigurationsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketMetricsConfigurationsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketMetricsConfigurationsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput
+type ListBucketMetricsConfigurationsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The marker that is used as a starting point for this metrics configuration
+ // list response. This value is present if it was sent in the request.
+ ContinuationToken *string `type:"string"`
+
+ // Indicates whether the returned list of metrics configurations is complete.
+ // A value of true indicates that the list is not complete and the NextContinuationToken
+ // will be provided for a subsequent request.
+ IsTruncated *bool `type:"boolean"`
+
+ // The list of metrics configurations for a bucket.
+ MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"`
+
+ // The marker used to continue a metrics configuration listing that has been
+ // truncated. Use the NextContinuationToken from a previously truncated list
+ // response to continue the listing. The continuation token is an opaque value
+ // that Amazon S3 understands.
+ NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketMetricsConfigurationsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketMetricsConfigurationsOutput) GoString() string {
+ return s.String()
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetMetricsConfigurationList sets the MetricsConfigurationList field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput {
+ s.MetricsConfigurationList = v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput
type ListBucketsInput struct {
_ struct{} `type:"structure"`
}
@@ -7092,6 +11976,7 @@ func (s ListBucketsInput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput
type ListBucketsOutput struct {
_ struct{} `type:"structure"`
@@ -7110,6 +11995,19 @@ func (s ListBucketsOutput) GoString() string {
return s.String()
}
+// SetBuckets sets the Buckets field's value.
+func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput {
+ s.Buckets = v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
+ s.Owner = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest
type ListMultipartUploadsInput struct {
_ struct{} `type:"structure"`
@@ -7169,6 +12067,49 @@ func (s *ListMultipartUploadsInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput {
+ s.MaxUploads = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput {
+ s.UploadIdMarker = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput
type ListMultipartUploadsOutput struct {
_ struct{} `type:"structure"`
@@ -7223,6 +12164,79 @@ func (s ListMultipartUploadsOutput) GoString() string {
return s.String()
}
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput {
+ s.MaxUploads = &v
+ return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput {
+ s.NextKeyMarker = &v
+ return s
+}
+
+// SetNextUploadIdMarker sets the NextUploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput {
+ s.NextUploadIdMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput {
+ s.UploadIdMarker = &v
+ return s
+}
+
+// SetUploads sets the Uploads field's value.
+func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput {
+ s.Uploads = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest
type ListObjectVersionsInput struct {
_ struct{} `type:"structure"`
@@ -7277,6 +12291,49 @@ func (s *ListObjectVersionsInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput {
+ s.VersionIdMarker = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput
type ListObjectVersionsOutput struct {
_ struct{} `type:"structure"`
@@ -7326,6 +12383,85 @@ func (s ListObjectVersionsOutput) GoString() string {
return s.String()
}
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetDeleteMarkers sets the DeleteMarkers field's value.
+func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput {
+ s.DeleteMarkers = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput {
+ s.KeyMarker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput {
+ s.Name = &v
+ return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput {
+ s.NextKeyMarker = &v
+ return s
+}
+
+// SetNextVersionIdMarker sets the NextVersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput {
+ s.NextVersionIdMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput {
+ s.VersionIdMarker = &v
+ return s
+}
+
+// SetVersions sets the Versions field's value.
+func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput {
+ s.Versions = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest
type ListObjectsInput struct {
_ struct{} `type:"structure"`
@@ -7382,6 +12518,49 @@ func (s *ListObjectsInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput {
+ s.Prefix = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput
type ListObjectsOutput struct {
_ struct{} `type:"structure"`
@@ -7426,6 +12605,67 @@ func (s ListObjectsOutput) GoString() string {
return s.String()
}
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput {
+ s.Contents = v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput {
+ s.Marker = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput {
+ s.Name = &v
+ return s
+}
+
+// SetNextMarker sets the NextMarker field's value.
+func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput {
+ s.NextMarker = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
+ s.Prefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request
type ListObjectsV2Input struct {
_ struct{} `type:"structure"`
@@ -7490,6 +12730,61 @@ func (s *ListObjectsV2Input) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input {
+ s.Bucket = &v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input {
+ s.EncodingType = &v
+ return s
+}
+
+// SetFetchOwner sets the FetchOwner field's value.
+func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input {
+ s.FetchOwner = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input {
+ s.Prefix = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input {
+ s.StartAfter = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output
type ListObjectsV2Output struct {
_ struct{} `type:"structure"`
@@ -7551,6 +12846,79 @@ func (s ListObjectsV2Output) GoString() string {
return s.String()
}
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output {
+ s.CommonPrefixes = v
+ return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output {
+ s.Contents = v
+ return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output {
+ s.ContinuationToken = &v
+ return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output {
+ s.Delimiter = &v
+ return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output {
+ s.EncodingType = &v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKeyCount sets the KeyCount field's value.
+func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output {
+ s.KeyCount = &v
+ return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output {
+ s.MaxKeys = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output {
+ s.Name = &v
+ return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output {
+ s.NextContinuationToken = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output {
+ s.Prefix = &v
+ return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
+ s.StartAfter = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest
type ListPartsInput struct {
_ struct{} `type:"structure"`
@@ -7611,6 +12979,43 @@ func (s *ListPartsInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsInput) SetBucket(v string) *ListPartsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsInput) SetKey(v string) *ListPartsInput {
+ s.Key = &v
+ return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput {
+ s.MaxParts = &v
+ return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput {
+ s.PartNumberMarker = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput
type ListPartsOutput struct {
_ struct{} `type:"structure"`
@@ -7669,6 +13074,91 @@ func (s ListPartsOutput) GoString() string {
return s.String()
}
+// SetAbortDate sets the AbortDate field's value.
+func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput {
+ s.AbortDate = &v
+ return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput {
+ s.AbortRuleId = &v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput {
+ s.Bucket = &v
+ return s
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput {
+ s.Initiator = v
+ return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput {
+ s.IsTruncated = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput {
+ s.Key = &v
+ return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput {
+ s.MaxParts = &v
+ return s
+}
+
+// SetNextPartNumberMarker sets the NextPartNumberMarker field's value.
+func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput {
+ s.NextPartNumberMarker = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput {
+ s.Owner = v
+ return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput {
+ s.PartNumberMarker = &v
+ return s
+}
+
+// SetParts sets the Parts field's value.
+func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput {
+ s.Parts = v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled
type LoggingEnabled struct {
_ struct{} `type:"structure"`
@@ -7717,6 +13207,197 @@ func (s *LoggingEnabled) Validate() error {
return nil
}
+// SetTargetBucket sets the TargetBucket field's value.
+func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled {
+ s.TargetBucket = &v
+ return s
+}
+
+// SetTargetGrants sets the TargetGrants field's value.
+func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled {
+ s.TargetGrants = v
+ return s
+}
+
+// SetTargetPrefix sets the TargetPrefix field's value.
+func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled {
+ s.TargetPrefix = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator
+type MetricsAndOperator struct {
+ _ struct{} `type:"structure"`
+
+ // The prefix used when evaluating an AND predicate.
+ Prefix *string `type:"string"`
+
+ // The list of tags used when evaluating an AND predicate.
+ Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s MetricsAndOperator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsAndOperator) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsAndOperator) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"}
+ if s.Tags != nil {
+ for i, v := range s.Tags {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator {
+ s.Prefix = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator {
+ s.Tags = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration
+type MetricsConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies a metrics configuration filter. The metrics configuration will
+ // only include objects that meet the filter's criteria. A filter must be a
+ // prefix, a tag, or a conjunction (MetricsAndOperator).
+ Filter *MetricsFilter `type:"structure"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s MetricsConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"}
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.Filter != nil {
+ if err := s.Filter.Validate(); err != nil {
+ invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter
+type MetricsFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A conjunction (logical AND) of predicates, which is used in evaluating a
+ // metrics filter. The operator must have at least two predicates, and an object
+ // must match all of the predicates in order for the filter to apply.
+ And *MetricsAndOperator `type:"structure"`
+
+ // The prefix used when evaluating a metrics filter.
+ Prefix *string `type:"string"`
+
+ // The tag used when evaluating a metrics filter.
+ Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s MetricsFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsFilter) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsFilter) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"}
+ if s.And != nil {
+ if err := s.And.Validate(); err != nil {
+ invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.Tag != nil {
+ if err := s.Tag.Validate(); err != nil {
+ invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter {
+ s.And = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter {
+ s.Prefix = &v
+ return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter {
+ s.Tag = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload
type MultipartUpload struct {
_ struct{} `type:"structure"`
@@ -7748,11 +13429,48 @@ func (s MultipartUpload) GoString() string {
return s.String()
}
+// SetInitiated sets the Initiated field's value.
+func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload {
+ s.Initiated = &v
+ return s
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload {
+ s.Initiator = v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *MultipartUpload) SetKey(v string) *MultipartUpload {
+ s.Key = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload {
+ s.Owner = v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload {
+ s.StorageClass = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload {
+ s.UploadId = &v
+ return s
+}
+
// Specifies when noncurrent object versions expire. Upon expiration, Amazon
// S3 permanently deletes the noncurrent object versions. You set this lifecycle
// configuration action on a bucket that has versioning enabled (or suspended)
// to request that Amazon S3 delete noncurrent object versions at a specific
// period in the object's lifetime.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration
type NoncurrentVersionExpiration struct {
_ struct{} `type:"structure"`
@@ -7773,11 +13491,18 @@ func (s NoncurrentVersionExpiration) GoString() string {
return s.String()
}
+// SetNoncurrentDays sets the NoncurrentDays field's value.
+func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration {
+ s.NoncurrentDays = &v
+ return s
+}
+
// Container for the transition rule that describes when noncurrent objects
// transition to the STANDARD_IA or GLACIER storage class. If your bucket is
// versioning-enabled (or versioning is suspended), you can set this action
// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
// or GLACIER storage class at a specific period in the object's lifetime.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition
type NoncurrentVersionTransition struct {
_ struct{} `type:"structure"`
@@ -7801,8 +13526,21 @@ func (s NoncurrentVersionTransition) GoString() string {
return s.String()
}
+// SetNoncurrentDays sets the NoncurrentDays field's value.
+func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition {
+ s.NoncurrentDays = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition {
+ s.StorageClass = &v
+ return s
+}
+
// Container for specifying the notification configuration of the bucket. If
// this element is empty, notifications are turned off on the bucket.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration
type NotificationConfiguration struct {
_ struct{} `type:"structure"`
@@ -7863,6 +13601,25 @@ func (s *NotificationConfiguration) Validate() error {
return nil
}
+// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value.
+func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration {
+ s.LambdaFunctionConfigurations = v
+ return s
+}
+
+// SetQueueConfigurations sets the QueueConfigurations field's value.
+func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration {
+ s.QueueConfigurations = v
+ return s
+}
+
+// SetTopicConfigurations sets the TopicConfigurations field's value.
+func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration {
+ s.TopicConfigurations = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated
type NotificationConfigurationDeprecated struct {
_ struct{} `type:"structure"`
@@ -7883,8 +13640,27 @@ func (s NotificationConfigurationDeprecated) GoString() string {
return s.String()
}
+// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated {
+ s.CloudFunctionConfiguration = v
+ return s
+}
+
+// SetQueueConfiguration sets the QueueConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated {
+ s.QueueConfiguration = v
+ return s
+}
+
+// SetTopicConfiguration sets the TopicConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated {
+ s.TopicConfiguration = v
+ return s
+}
+
// Container for object key name filtering rules. For information about key
// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter
type NotificationConfigurationFilter struct {
_ struct{} `type:"structure"`
@@ -7902,6 +13678,13 @@ func (s NotificationConfigurationFilter) GoString() string {
return s.String()
}
+// SetKey sets the Key field's value.
+func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter {
+ s.Key = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object
type Object struct {
_ struct{} `type:"structure"`
@@ -7929,6 +13712,43 @@ func (s Object) GoString() string {
return s.String()
}
+// SetETag sets the ETag field's value.
+func (s *Object) SetETag(v string) *Object {
+ s.ETag = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Object) SetKey(v string) *Object {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Object) SetLastModified(v time.Time) *Object {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *Object) SetOwner(v *Owner) *Object {
+ s.Owner = v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Object) SetSize(v int64) *Object {
+ s.Size = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Object) SetStorageClass(v string) *Object {
+ s.StorageClass = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier
type ObjectIdentifier struct {
_ struct{} `type:"structure"`
@@ -7967,6 +13787,19 @@ func (s *ObjectIdentifier) Validate() error {
return nil
}
+// SetKey sets the Key field's value.
+func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier {
+ s.Key = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion
type ObjectVersion struct {
_ struct{} `type:"structure"`
@@ -8004,6 +13837,55 @@ func (s ObjectVersion) GoString() string {
return s.String()
}
+// SetETag sets the ETag field's value.
+func (s *ObjectVersion) SetETag(v string) *ObjectVersion {
+ s.ETag = &v
+ return s
+}
+
+// SetIsLatest sets the IsLatest field's value.
+func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion {
+ s.IsLatest = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ObjectVersion) SetKey(v string) *ObjectVersion {
+ s.Key = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion {
+ s.LastModified = &v
+ return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion {
+ s.Owner = v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *ObjectVersion) SetSize(v int64) *ObjectVersion {
+ s.Size = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion {
+ s.StorageClass = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner
type Owner struct {
_ struct{} `type:"structure"`
@@ -8022,6 +13904,19 @@ func (s Owner) GoString() string {
return s.String()
}
+// SetDisplayName sets the DisplayName field's value.
+func (s *Owner) SetDisplayName(v string) *Owner {
+ s.DisplayName = &v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Owner) SetID(v string) *Owner {
+ s.ID = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part
type Part struct {
_ struct{} `type:"structure"`
@@ -8049,6 +13944,31 @@ func (s Part) GoString() string {
return s.String()
}
+// SetETag sets the ETag field's value.
+func (s *Part) SetETag(v string) *Part {
+ s.ETag = &v
+ return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Part) SetLastModified(v time.Time) *Part {
+ s.LastModified = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *Part) SetPartNumber(v int64) *Part {
+ s.PartNumber = &v
+ return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Part) SetSize(v int64) *Part {
+ s.Size = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest
type PutBucketAccelerateConfigurationInput struct {
_ struct{} `type:"structure" payload:"AccelerateConfiguration"`
@@ -8089,6 +14009,19 @@ func (s *PutBucketAccelerateConfigurationInput) Validate() error {
return nil
}
+// SetAccelerateConfiguration sets the AccelerateConfiguration field's value.
+func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput {
+ s.AccelerateConfiguration = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput
type PutBucketAccelerateConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -8103,6 +14036,7 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest
type PutBucketAclInput struct {
_ struct{} `type:"structure" payload:"AccessControlPolicy"`
@@ -8159,6 +14093,55 @@ func (s *PutBucketAclInput) Validate() error {
return nil
}
+// SetACL sets the ACL field's value.
+func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput {
+ s.ACL = &v
+ return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput {
+ s.AccessControlPolicy = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput
type PutBucketAclOutput struct {
_ struct{} `type:"structure"`
}
@@ -8173,6 +14156,94 @@ func (s PutBucketAclOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest
+type PutBucketAnalyticsConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
+
+ // The configuration and any analyses for the analytics filter.
+ //
+ // AnalyticsConfiguration is a required field
+ AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"`
+
+ // The name of the bucket to which an analytics configuration is stored.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The identifier used to represent an analytics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketAnalyticsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAnalyticsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAnalyticsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"}
+ if s.AnalyticsConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration"))
+ }
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.AnalyticsConfiguration != nil {
+ if err := s.AnalyticsConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput {
+ s.AnalyticsConfiguration = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput
+type PutBucketAnalyticsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAnalyticsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest
type PutBucketCorsInput struct {
_ struct{} `type:"structure" payload:"CORSConfiguration"`
@@ -8214,6 +14285,19 @@ func (s *PutBucketCorsInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCORSConfiguration sets the CORSConfiguration field's value.
+func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput {
+ s.CORSConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput
type PutBucketCorsOutput struct {
_ struct{} `type:"structure"`
}
@@ -8228,6 +14312,94 @@ func (s PutBucketCorsOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest
+type PutBucketInventoryConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"InventoryConfiguration"`
+
+ // The name of the bucket where the inventory configuration will be stored.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the inventory configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+
+ // Specifies the inventory configuration.
+ //
+ // InventoryConfiguration is a required field
+ InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketInventoryConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketInventoryConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketInventoryConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.InventoryConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration"))
+ }
+ if s.InventoryConfiguration != nil {
+ if err := s.InventoryConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// SetInventoryConfiguration sets the InventoryConfiguration field's value.
+func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput {
+ s.InventoryConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput
+type PutBucketInventoryConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketInventoryConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketInventoryConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest
type PutBucketLifecycleConfigurationInput struct {
_ struct{} `type:"structure" payload:"LifecycleConfiguration"`
@@ -8265,6 +14437,19 @@ func (s *PutBucketLifecycleConfigurationInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput {
+ s.LifecycleConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput
type PutBucketLifecycleConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -8279,6 +14464,7 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest
type PutBucketLifecycleInput struct {
_ struct{} `type:"structure" payload:"LifecycleConfiguration"`
@@ -8316,6 +14502,19 @@ func (s *PutBucketLifecycleInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
+func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput {
+ s.LifecycleConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput
type PutBucketLifecycleOutput struct {
_ struct{} `type:"structure"`
}
@@ -8330,6 +14529,7 @@ func (s PutBucketLifecycleOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest
type PutBucketLoggingInput struct {
_ struct{} `type:"structure" payload:"BucketLoggingStatus"`
@@ -8371,6 +14571,19 @@ func (s *PutBucketLoggingInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetBucketLoggingStatus sets the BucketLoggingStatus field's value.
+func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput {
+ s.BucketLoggingStatus = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput
type PutBucketLoggingOutput struct {
_ struct{} `type:"structure"`
}
@@ -8385,6 +14598,94 @@ func (s PutBucketLoggingOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest
+type PutBucketMetricsConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"MetricsConfiguration"`
+
+ // The name of the bucket for which the metrics configuration is set.
+ //
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The ID used to identify the metrics configuration.
+ //
+ // Id is a required field
+ Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+
+ // Specifies the metrics configuration.
+ //
+ // MetricsConfiguration is a required field
+ MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketMetricsConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketMetricsConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketMetricsConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Id == nil {
+ invalidParams.Add(request.NewErrParamRequired("Id"))
+ }
+ if s.MetricsConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration"))
+ }
+ if s.MetricsConfiguration != nil {
+ if err := s.MetricsConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput {
+ s.Id = &v
+ return s
+}
+
+// SetMetricsConfiguration sets the MetricsConfiguration field's value.
+func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput {
+ s.MetricsConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput
+type PutBucketMetricsConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketMetricsConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketMetricsConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest
type PutBucketNotificationConfigurationInput struct {
_ struct{} `type:"structure" payload:"NotificationConfiguration"`
@@ -8429,6 +14730,19 @@ func (s *PutBucketNotificationConfigurationInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetNotificationConfiguration sets the NotificationConfiguration field's value.
+func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput {
+ s.NotificationConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput
type PutBucketNotificationConfigurationOutput struct {
_ struct{} `type:"structure"`
}
@@ -8443,6 +14757,7 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest
type PutBucketNotificationInput struct {
_ struct{} `type:"structure" payload:"NotificationConfiguration"`
@@ -8479,6 +14794,19 @@ func (s *PutBucketNotificationInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetNotificationConfiguration sets the NotificationConfiguration field's value.
+func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput {
+ s.NotificationConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput
type PutBucketNotificationOutput struct {
_ struct{} `type:"structure"`
}
@@ -8493,6 +14821,7 @@ func (s PutBucketNotificationOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest
type PutBucketPolicyInput struct {
_ struct{} `type:"structure" payload:"Policy"`
@@ -8531,6 +14860,19 @@ func (s *PutBucketPolicyInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput {
+ s.Policy = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput
type PutBucketPolicyOutput struct {
_ struct{} `type:"structure"`
}
@@ -8545,6 +14887,7 @@ func (s PutBucketPolicyOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest
type PutBucketReplicationInput struct {
_ struct{} `type:"structure" payload:"ReplicationConfiguration"`
@@ -8589,6 +14932,19 @@ func (s *PutBucketReplicationInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput {
+ s.ReplicationConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput
type PutBucketReplicationOutput struct {
_ struct{} `type:"structure"`
}
@@ -8603,6 +14959,7 @@ func (s PutBucketReplicationOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest
type PutBucketRequestPaymentInput struct {
_ struct{} `type:"structure" payload:"RequestPaymentConfiguration"`
@@ -8644,6 +15001,19 @@ func (s *PutBucketRequestPaymentInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value.
+func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput {
+ s.RequestPaymentConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput
type PutBucketRequestPaymentOutput struct {
_ struct{} `type:"structure"`
}
@@ -8658,6 +15028,7 @@ func (s PutBucketRequestPaymentOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest
type PutBucketTaggingInput struct {
_ struct{} `type:"structure" payload:"Tagging"`
@@ -8699,6 +15070,19 @@ func (s *PutBucketTaggingInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput {
+ s.Tagging = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput
type PutBucketTaggingOutput struct {
_ struct{} `type:"structure"`
}
@@ -8713,6 +15097,7 @@ func (s PutBucketTaggingOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest
type PutBucketVersioningInput struct {
_ struct{} `type:"structure" payload:"VersioningConfiguration"`
@@ -8753,6 +15138,25 @@ func (s *PutBucketVersioningInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput {
+ s.MFA = &v
+ return s
+}
+
+// SetVersioningConfiguration sets the VersioningConfiguration field's value.
+func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput {
+ s.VersioningConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput
type PutBucketVersioningOutput struct {
_ struct{} `type:"structure"`
}
@@ -8767,6 +15171,7 @@ func (s PutBucketVersioningOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest
type PutBucketWebsiteInput struct {
_ struct{} `type:"structure" payload:"WebsiteConfiguration"`
@@ -8808,6 +15213,19 @@ func (s *PutBucketWebsiteInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetWebsiteConfiguration sets the WebsiteConfiguration field's value.
+func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput {
+ s.WebsiteConfiguration = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput
type PutBucketWebsiteOutput struct {
_ struct{} `type:"structure"`
}
@@ -8822,6 +15240,7 @@ func (s PutBucketWebsiteOutput) GoString() string {
return s.String()
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest
type PutObjectAclInput struct {
_ struct{} `type:"structure" payload:"AccessControlPolicy"`
@@ -8896,6 +15315,73 @@ func (s *PutObjectAclInput) Validate() error {
return nil
}
+// SetACL sets the ACL field's value.
+func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput {
+ s.ACL = &v
+ return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput {
+ s.AccessControlPolicy = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput {
+ s.GrantWrite = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput
type PutObjectAclOutput struct {
_ struct{} `type:"structure"`
@@ -8914,6 +15400,13 @@ func (s PutObjectAclOutput) GoString() string {
return s.String()
}
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest
type PutObjectInput struct {
_ struct{} `type:"structure" payload:"Body"`
@@ -9006,6 +15499,9 @@ type PutObjectInput struct {
// The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata.
@@ -9041,6 +15537,157 @@ func (s *PutObjectInput) Validate() error {
return nil
}
+// SetACL sets the ACL field's value.
+func (s *PutObjectInput) SetACL(v string) *PutObjectInput {
+ s.ACL = &v
+ return s
+}
+
+// SetBody sets the Body field's value.
+func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput {
+ s.Body = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectInput) SetBucket(v string) *PutObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput {
+ s.CacheControl = &v
+ return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput {
+ s.ContentDisposition = &v
+ return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput {
+ s.ContentEncoding = &v
+ return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput {
+ s.ContentLanguage = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *PutObjectInput) SetContentType(v string) *PutObjectInput {
+ s.ContentType = &v
+ return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput {
+ s.Expires = &v
+ return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput {
+ s.GrantFullControl = &v
+ return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput {
+ s.GrantRead = &v
+ return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput {
+ s.GrantReadACP = &v
+ return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput {
+ s.GrantWriteACP = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectInput) SetKey(v string) *PutObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput {
+ s.Metadata = v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput {
+ s.StorageClass = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectInput) SetTagging(v string) *PutObjectInput {
+ s.Tagging = &v
+ return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
+ s.WebsiteRedirectLocation = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput
type PutObjectOutput struct {
_ struct{} `type:"structure"`
@@ -9087,8 +15734,157 @@ func (s PutObjectOutput) GoString() string {
return s.String()
}
+// SetETag sets the ETag field's value.
+func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput {
+ s.Expiration = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest
+type PutObjectTaggingInput struct {
+ _ struct{} `type:"structure" payload:"Tagging"`
+
+ // Bucket is a required field
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Key is a required field
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Tagging is a required field
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
+
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Tagging == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tagging"))
+ }
+ if s.Tagging != nil {
+ if err := s.Tagging.Validate(); err != nil {
+ invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput {
+ s.Key = &v
+ return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput {
+ s.Tagging = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput
+type PutObjectTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectTaggingOutput) GoString() string {
+ return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput {
+ s.VersionId = &v
+ return s
+}
+
// Container for specifying an configuration when you want Amazon S3 to publish
// events to an Amazon Simple Queue Service (Amazon SQS) queue.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration
type QueueConfiguration struct {
_ struct{} `type:"structure"`
@@ -9136,6 +15932,31 @@ func (s *QueueConfiguration) Validate() error {
return nil
}
+// SetEvents sets the Events field's value.
+func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *QueueConfiguration) SetId(v string) *QueueConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetQueueArn sets the QueueArn field's value.
+func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration {
+ s.QueueArn = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated
type QueueConfigurationDeprecated struct {
_ struct{} `type:"structure"`
@@ -9161,6 +15982,31 @@ func (s QueueConfigurationDeprecated) GoString() string {
return s.String()
}
+// SetEvent sets the Event field's value.
+func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated {
+ s.Event = &v
+ return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated {
+ s.Events = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated {
+ s.Id = &v
+ return s
+}
+
+// SetQueue sets the Queue field's value.
+func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated {
+ s.Queue = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect
type Redirect struct {
_ struct{} `type:"structure"`
@@ -9199,6 +16045,37 @@ func (s Redirect) GoString() string {
return s.String()
}
+// SetHostName sets the HostName field's value.
+func (s *Redirect) SetHostName(v string) *Redirect {
+ s.HostName = &v
+ return s
+}
+
+// SetHttpRedirectCode sets the HttpRedirectCode field's value.
+func (s *Redirect) SetHttpRedirectCode(v string) *Redirect {
+ s.HttpRedirectCode = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *Redirect) SetProtocol(v string) *Redirect {
+ s.Protocol = &v
+ return s
+}
+
+// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value.
+func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect {
+ s.ReplaceKeyPrefixWith = &v
+ return s
+}
+
+// SetReplaceKeyWith sets the ReplaceKeyWith field's value.
+func (s *Redirect) SetReplaceKeyWith(v string) *Redirect {
+ s.ReplaceKeyWith = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo
type RedirectAllRequestsTo struct {
_ struct{} `type:"structure"`
@@ -9235,8 +16112,21 @@ func (s *RedirectAllRequestsTo) Validate() error {
return nil
}
+// SetHostName sets the HostName field's value.
+func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo {
+ s.HostName = &v
+ return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo {
+ s.Protocol = &v
+ return s
+}
+
// Container for replication rules. You can add as many as 1,000 rules. Total
// replication configuration size can be up to 2 MB.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration
type ReplicationConfiguration struct {
_ struct{} `type:"structure"`
@@ -9289,6 +16179,19 @@ func (s *ReplicationConfiguration) Validate() error {
return nil
}
+// SetRole sets the Role field's value.
+func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration {
+ s.Role = &v
+ return s
+}
+
+// SetRules sets the Rules field's value.
+func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration {
+ s.Rules = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule
type ReplicationRule struct {
_ struct{} `type:"structure"`
@@ -9345,6 +16248,31 @@ func (s *ReplicationRule) Validate() error {
return nil
}
+// SetDestination sets the Destination field's value.
+func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
+ s.Destination = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *ReplicationRule) SetID(v string) *ReplicationRule {
+ s.ID = &v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
+ s.Prefix = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration
type RequestPaymentConfiguration struct {
_ struct{} `type:"structure"`
@@ -9377,6 +16305,13 @@ func (s *RequestPaymentConfiguration) Validate() error {
return nil
}
+// SetPayer sets the Payer field's value.
+func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration {
+ s.Payer = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest
type RestoreObjectInput struct {
_ struct{} `type:"structure" payload:"RestoreRequest"`
@@ -9431,6 +16366,37 @@ func (s *RestoreObjectInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput {
+ s.Key = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetRestoreRequest sets the RestoreRequest field's value.
+func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput {
+ s.RestoreRequest = v
+ return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput {
+ s.VersionId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput
type RestoreObjectOutput struct {
_ struct{} `type:"structure"`
@@ -9449,6 +16415,13 @@ func (s RestoreObjectOutput) GoString() string {
return s.String()
}
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest
type RestoreRequest struct {
_ struct{} `type:"structure"`
@@ -9456,6 +16429,9 @@ type RestoreRequest struct {
//
// Days is a required field
Days *int64 `type:"integer" required:"true"`
+
+ // Glacier related prameters pertaining to this job.
+ GlacierJobParameters *GlacierJobParameters `type:"structure"`
}
// String returns the string representation
@@ -9474,6 +16450,11 @@ func (s *RestoreRequest) Validate() error {
if s.Days == nil {
invalidParams.Add(request.NewErrParamRequired("Days"))
}
+ if s.GlacierJobParameters != nil {
+ if err := s.GlacierJobParameters.Validate(); err != nil {
+ invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -9481,6 +16462,19 @@ func (s *RestoreRequest) Validate() error {
return nil
}
+// SetDays sets the Days field's value.
+func (s *RestoreRequest) SetDays(v int64) *RestoreRequest {
+ s.Days = &v
+ return s
+}
+
+// SetGlacierJobParameters sets the GlacierJobParameters field's value.
+func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest {
+ s.GlacierJobParameters = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule
type RoutingRule struct {
_ struct{} `type:"structure"`
@@ -9521,6 +16515,19 @@ func (s *RoutingRule) Validate() error {
return nil
}
+// SetCondition sets the Condition field's value.
+func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule {
+ s.Condition = v
+ return s
+}
+
+// SetRedirect sets the Redirect field's value.
+func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
+ s.Redirect = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule
type Rule struct {
_ struct{} `type:"structure"`
@@ -9587,6 +16594,153 @@ func (s *Rule) Validate() error {
return nil
}
+// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
+func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule {
+ s.AbortIncompleteMultipartUpload = v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule {
+ s.Expiration = v
+ return s
+}
+
+// SetID sets the ID field's value.
+func (s *Rule) SetID(v string) *Rule {
+ s.ID = &v
+ return s
+}
+
+// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
+func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule {
+ s.NoncurrentVersionExpiration = v
+ return s
+}
+
+// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value.
+func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule {
+ s.NoncurrentVersionTransition = v
+ return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *Rule) SetPrefix(v string) *Rule {
+ s.Prefix = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *Rule) SetStatus(v string) *Rule {
+ s.Status = &v
+ return s
+}
+
+// SetTransition sets the Transition field's value.
+func (s *Rule) SetTransition(v *Transition) *Rule {
+ s.Transition = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis
+type StorageClassAnalysis struct {
+ _ struct{} `type:"structure"`
+
+ // A container used to describe how data related to the storage class analysis
+ // should be exported.
+ DataExport *StorageClassAnalysisDataExport `type:"structure"`
+}
+
+// String returns the string representation
+func (s StorageClassAnalysis) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StorageClassAnalysis) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StorageClassAnalysis) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"}
+ if s.DataExport != nil {
+ if err := s.DataExport.Validate(); err != nil {
+ invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDataExport sets the DataExport field's value.
+func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis {
+ s.DataExport = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport
+type StorageClassAnalysisDataExport struct {
+ _ struct{} `type:"structure"`
+
+ // The place to store the data for an analysis.
+ //
+ // Destination is a required field
+ Destination *AnalyticsExportDestination `type:"structure" required:"true"`
+
+ // The version of the output schema to use when exporting data. Must be V_1.
+ //
+ // OutputSchemaVersion is a required field
+ OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"`
+}
+
+// String returns the string representation
+func (s StorageClassAnalysisDataExport) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StorageClassAnalysisDataExport) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StorageClassAnalysisDataExport) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"}
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.OutputSchemaVersion == nil {
+ invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion"))
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport {
+ s.Destination = v
+ return s
+}
+
+// SetOutputSchemaVersion sets the OutputSchemaVersion field's value.
+func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport {
+ s.OutputSchemaVersion = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag
type Tag struct {
_ struct{} `type:"structure"`
@@ -9630,6 +16784,19 @@ func (s *Tag) Validate() error {
return nil
}
+// SetKey sets the Key field's value.
+func (s *Tag) SetKey(v string) *Tag {
+ s.Key = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *Tag) SetValue(v string) *Tag {
+ s.Value = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging
type Tagging struct {
_ struct{} `type:"structure"`
@@ -9670,6 +16837,13 @@ func (s *Tagging) Validate() error {
return nil
}
+// SetTagSet sets the TagSet field's value.
+func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
+ s.TagSet = v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant
type TargetGrant struct {
_ struct{} `type:"structure"`
@@ -9704,8 +16878,21 @@ func (s *TargetGrant) Validate() error {
return nil
}
+// SetGrantee sets the Grantee field's value.
+func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant {
+ s.Grantee = v
+ return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *TargetGrant) SetPermission(v string) *TargetGrant {
+ s.Permission = &v
+ return s
+}
+
// Container for specifying the configuration when you want Amazon S3 to publish
// events to an Amazon Simple Notification Service (Amazon SNS) topic.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration
type TopicConfiguration struct {
_ struct{} `type:"structure"`
@@ -9753,6 +16940,31 @@ func (s *TopicConfiguration) Validate() error {
return nil
}
+// SetEvents sets the Events field's value.
+func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration {
+ s.Events = v
+ return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration {
+ s.Filter = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *TopicConfiguration) SetId(v string) *TopicConfiguration {
+ s.Id = &v
+ return s
+}
+
+// SetTopicArn sets the TopicArn field's value.
+func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration {
+ s.TopicArn = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated
type TopicConfigurationDeprecated struct {
_ struct{} `type:"structure"`
@@ -9780,6 +16992,31 @@ func (s TopicConfigurationDeprecated) GoString() string {
return s.String()
}
+// SetEvent sets the Event field's value.
+func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated {
+ s.Event = &v
+ return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated {
+ s.Events = v
+ return s
+}
+
+// SetId sets the Id field's value.
+func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated {
+ s.Id = &v
+ return s
+}
+
+// SetTopic sets the Topic field's value.
+func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated {
+ s.Topic = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition
type Transition struct {
_ struct{} `type:"structure"`
@@ -9805,6 +17042,25 @@ func (s Transition) GoString() string {
return s.String()
}
+// SetDate sets the Date field's value.
+func (s *Transition) SetDate(v time.Time) *Transition {
+ s.Date = &v
+ return s
+}
+
+// SetDays sets the Days field's value.
+func (s *Transition) SetDays(v int64) *Transition {
+ s.Days = &v
+ return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Transition) SetStorageClass(v string) *Transition {
+ s.StorageClass = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest
type UploadPartCopyInput struct {
_ struct{} `type:"structure"`
@@ -9925,6 +17181,109 @@ func (s *UploadPartCopyInput) Validate() error {
return nil
}
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput {
+ s.CopySource = &v
+ return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput {
+ s.CopySourceIfMatch = &v
+ return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput {
+ s.CopySourceIfModifiedSince = &v
+ return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput {
+ s.CopySourceIfNoneMatch = &v
+ return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput {
+ s.CopySourceIfUnmodifiedSince = &v
+ return s
+}
+
+// SetCopySourceRange sets the CopySourceRange field's value.
+func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput {
+ s.CopySourceRange = &v
+ return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerAlgorithm = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerKey = &v
+ return s
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+ s.CopySourceSSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput
type UploadPartCopyOutput struct {
_ struct{} `type:"structure" payload:"CopyPartResult"`
@@ -9967,6 +17326,49 @@ func (s UploadPartCopyOutput) GoString() string {
return s.String()
}
+// SetCopyPartResult sets the CopyPartResult field's value.
+func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput {
+ s.CopyPartResult = v
+ return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput {
+ s.CopySourceVersionId = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest
type UploadPartInput struct {
_ struct{} `type:"structure" payload:"Body"`
@@ -10056,6 +17458,67 @@ func (s *UploadPartInput) Validate() error {
return nil
}
+// SetBody sets the Body field's value.
+func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput {
+ s.Body = v
+ return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartInput) SetBucket(v string) *UploadPartInput {
+ s.Bucket = &v
+ return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput {
+ s.ContentLength = &v
+ return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartInput) SetKey(v string) *UploadPartInput {
+ s.Key = &v
+ return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput {
+ s.PartNumber = &v
+ return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput {
+ s.RequestPayer = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput {
+ s.SSECustomerKey = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput {
+ s.UploadId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput
type UploadPartOutput struct {
_ struct{} `type:"structure"`
@@ -10095,6 +17558,43 @@ func (s UploadPartOutput) GoString() string {
return s.String()
}
+// SetETag sets the ETag field's value.
+func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput {
+ s.ETag = &v
+ return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput {
+ s.RequestCharged = &v
+ return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput {
+ s.SSECustomerAlgorithm = &v
+ return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput {
+ s.SSECustomerKeyMD5 = &v
+ return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput {
+ s.SSEKMSKeyId = &v
+ return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput {
+ s.ServerSideEncryption = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration
type VersioningConfiguration struct {
_ struct{} `type:"structure"`
@@ -10117,6 +17617,19 @@ func (s VersioningConfiguration) GoString() string {
return s.String()
}
+// SetMFADelete sets the MFADelete field's value.
+func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration {
+ s.MFADelete = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration {
+ s.Status = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration
type WebsiteConfiguration struct {
_ struct{} `type:"structure"`
@@ -10174,6 +17687,35 @@ func (s *WebsiteConfiguration) Validate() error {
return nil
}
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration {
+ s.ErrorDocument = v
+ return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration {
+ s.IndexDocument = v
+ return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration {
+ s.RedirectAllRequestsTo = v
+ return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration {
+ s.RoutingRules = v
+ return s
+}
+
+const (
+ // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value
+ AnalyticsS3ExportFileFormatCsv = "CSV"
+)
+
const (
// BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value
BucketAccelerateStatusEnabled = "Enabled"
@@ -10308,6 +17850,47 @@ const (
)
const (
+ // InventoryFormatCsv is a InventoryFormat enum value
+ InventoryFormatCsv = "CSV"
+)
+
+const (
+ // InventoryFrequencyDaily is a InventoryFrequency enum value
+ InventoryFrequencyDaily = "Daily"
+
+ // InventoryFrequencyWeekly is a InventoryFrequency enum value
+ InventoryFrequencyWeekly = "Weekly"
+)
+
+const (
+ // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value
+ InventoryIncludedObjectVersionsAll = "All"
+
+ // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value
+ InventoryIncludedObjectVersionsCurrent = "Current"
+)
+
+const (
+ // InventoryOptionalFieldSize is a InventoryOptionalField enum value
+ InventoryOptionalFieldSize = "Size"
+
+ // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value
+ InventoryOptionalFieldLastModifiedDate = "LastModifiedDate"
+
+ // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value
+ InventoryOptionalFieldStorageClass = "StorageClass"
+
+ // InventoryOptionalFieldEtag is a InventoryOptionalField enum value
+ InventoryOptionalFieldEtag = "ETag"
+
+ // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value
+ InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded"
+
+ // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value
+ InventoryOptionalFieldReplicationStatus = "ReplicationStatus"
+)
+
+const (
// MFADeleteEnabled is a MFADelete enum value
MFADeleteEnabled = "Enabled"
@@ -10461,6 +18044,30 @@ const (
)
const (
+ // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value
+ StorageClassAnalysisSchemaVersionV1 = "V_1"
+)
+
+const (
+ // TaggingDirectiveCopy is a TaggingDirective enum value
+ TaggingDirectiveCopy = "COPY"
+
+ // TaggingDirectiveReplace is a TaggingDirective enum value
+ TaggingDirectiveReplace = "REPLACE"
+)
+
+const (
+ // TierStandard is a Tier enum value
+ TierStandard = "Standard"
+
+ // TierBulk is a Tier enum value
+ TierBulk = "Bulk"
+
+ // TierExpedited is a Tier enum value
+ TierExpedited = "Expedited"
+)
+
+const (
// TransitionStorageClassGlacier is a TransitionStorageClass enum value
TransitionStorageClassGlacier = "GLACIER"
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
index f05d1ea..ec3ffe4 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -1,7 +1,6 @@
package s3
import (
- "bytes"
"fmt"
"net/url"
"regexp"
@@ -83,29 +82,31 @@ func updateEndpointForAccelerate(r *request.Request) {
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
r.Error = awserr.New("InvalidParameterException",
- fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket),
+ fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
nil)
return
}
- // Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com
- r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate")
-
- if aws.BoolValue(r.Config.UseDualStack) {
- host := []byte(r.HTTPRequest.URL.Host)
+ parts := strings.Split(r.HTTPRequest.URL.Host, ".")
+ if len(parts) < 3 {
+ r.Error = awserr.New("InvalidParameterExecption",
+ fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
+ r.HTTPRequest.URL.Host), nil)
+ return
+ }
- // Strip region from hostname
- if idx := bytes.Index(host, accelElem); idx >= 0 {
- start := idx + len(accelElem)
- if end := bytes.IndexByte(host[start:], '.'); end >= 0 {
- end += start + 1
- copy(host[start:], host[end:])
- host = host[:len(host)-(end-start)]
- r.HTTPRequest.URL.Host = string(host)
- }
+ if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
+ parts[0] = "s3-accelerate"
+ }
+ for i := 1; i+1 < len(parts); i++ {
+ if parts[i] == aws.StringValue(r.Config.Region) {
+ parts = append(parts[:i], parts[i+1:]...)
+ break
}
}
+ r.HTTPRequest.URL.Host = strings.Join(parts, ".")
+
moveBucketToHost(r.HTTPRequest.URL, bucket)
}
@@ -159,28 +160,3 @@ func moveBucketToHost(u *url.URL, bucket string) {
u.Path = "/"
}
}
-
-const s3HostPrefix = "s3"
-
-// replaceHostRegion replaces the S3 region string in the host with the
-// value provided. If v is empty the host prefix returned will be s3.
-func replaceHostRegion(host, v string) string {
- if !strings.HasPrefix(host, s3HostPrefix) {
- return host
- }
-
- suffix := host[len(s3HostPrefix):]
- for i := len(s3HostPrefix); i < len(host); i++ {
- if host[i] == '.' {
- // Trim until '.' leave the it in place.
- suffix = host[i:]
- break
- }
- }
-
- if len(v) == 0 {
- return fmt.Sprintf("s3%s", suffix)
- }
-
- return fmt.Sprintf("s3-%s%s", v, suffix)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
index 5833952..5e6f229 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -12,8 +12,9 @@ import (
)
// S3 is a client for Amazon S3.
-//The service client's operations are safe to be used concurrently.
+// The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01
type S3 struct {
*client.Client
}
@@ -24,8 +25,11 @@ var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
-// A ServiceName is the name of the service the client will make API calls to.
-const ServiceName = "s3"
+// Service information constants
+const (
+ ServiceName = "s3" // Service endpoint prefix API calls made to.
+ EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
+)
// New creates a new instance of the S3 client with a session.
// If additional configuration is needed for the client instance use the optional
@@ -38,17 +42,18 @@ const ServiceName = "s3"
// // Create a S3 client with additional configuration
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
- c := p.ClientConfig(ServiceName, cfgs...)
- return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
-func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 {
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
svc := &S3{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
+ SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2006-03-01",
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
index ce65fcd..5a78fd3 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
@@ -5,7 +5,6 @@ import (
"io/ioutil"
"net/http"
- "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
@@ -17,8 +16,8 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
return
}
body := bytes.NewReader(b)
- r.HTTPResponse.Body = aws.ReadSeekCloser(body)
- defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0)
+ r.HTTPResponse.Body = ioutil.NopCloser(body)
+ defer body.Seek(0, 0)
if body.Len() == 0 {
// If there is no body don't attempt to parse the body.
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index e10ca8f..4459465 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -36,6 +36,7 @@ const opAssumeRole = "AssumeRole"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
op := &request.Operation{
Name: opAssumeRole,
@@ -169,6 +170,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
req, out := c.AssumeRoleRequest(input)
err := req.Send()
@@ -201,6 +203,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
op := &request.Operation{
Name: opAssumeRoleWithSAML,
@@ -327,6 +330,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
req, out := c.AssumeRoleWithSAMLRequest(input)
err := req.Send()
@@ -359,6 +363,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
op := &request.Operation{
Name: opAssumeRoleWithWebIdentity,
@@ -447,7 +452,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
// API, see the following resources:
//
-// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual)
+// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
//
//
@@ -514,6 +519,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
req, out := c.AssumeRoleWithWebIdentityRequest(input)
err := req.Send()
@@ -546,6 +552,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
op := &request.Operation{
Name: opDecodeAuthorizationMessage,
@@ -611,6 +618,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
// invalid. This can happen if the token contains invalid characters, such as
// linebreaks.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
req, out := c.DecodeAuthorizationMessageRequest(input)
err := req.Send()
@@ -643,6 +651,7 @@ const opGetCallerIdentity = "GetCallerIdentity"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
op := &request.Operation{
Name: opGetCallerIdentity,
@@ -671,6 +680,7 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ
//
// See the AWS API reference guide for AWS Security Token Service's
// API operation GetCallerIdentity for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
req, out := c.GetCallerIdentityRequest(input)
err := req.Send()
@@ -703,6 +713,7 @@ const opGetFederationToken = "GetFederationToken"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
op := &request.Operation{
Name: opGetFederationToken,
@@ -761,7 +772,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
//
// * You cannot use these credentials to call any IAM APIs.
//
-// * You cannot call any STS APIs.
+// * You cannot call any STS APIs except GetCallerIdentity.
//
// Permissions
//
@@ -825,6 +836,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
req, out := c.GetFederationTokenRequest(input)
err := req.Send()
@@ -857,6 +869,7 @@ const opGetSessionToken = "GetSessionToken"
// fmt.Println(resp)
// }
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
op := &request.Operation{
Name: opGetSessionToken,
@@ -904,7 +917,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// * You cannot call any IAM APIs unless MFA authentication information is
// included in the request.
//
-// * You cannot call any STS API exceptAssumeRole.
+// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity.
//
// We recommend that you do not call GetSessionToken with root account credentials.
// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
@@ -938,12 +951,14 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
req, out := c.GetSessionTokenRequest(input)
err := req.Send()
return out, err
}
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest
type AssumeRoleInput struct {
_ struct{} `type:"structure"`
@@ -970,10 +985,9 @@ type AssumeRoleInput struct {
// External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
// in the IAM User Guide.
//
- // The format for this parameter, as described by its regex pattern, is a string
- // of characters consisting of upper- and lower-case alphanumeric characters
- // with no spaces. You can also include underscores or any of the following
- // characters: =,.@:\/-
+ // The regex used to validated this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:\/-
ExternalId *string `min:"2" type:"string"`
// An IAM policy in JSON format.
@@ -1017,10 +1031,9 @@ type AssumeRoleInput struct {
// requests using the temporary security credentials will expose the role session
// name to the external account in their CloudTrail logs.
//
- // The format for this parameter, as described by its regex pattern, is a string
- // of characters consisting of upper- and lower-case alphanumeric characters
- // with no spaces. You can also include underscores or any of the following
- // characters: =,.@-
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
//
// RoleSessionName is a required field
RoleSessionName *string `min:"2" type:"string" required:"true"`
@@ -1031,10 +1044,9 @@ type AssumeRoleInput struct {
// The value is either the serial number for a hardware device (such as GAHT12345678)
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
//
- // The format for this parameter, as described by its regex pattern, is a string
- // of characters consisting of upper- and lower-case alphanumeric characters
- // with no spaces. You can also include underscores or any of the following
- // characters: =,.@-
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
SerialNumber *string `min:"9" type:"string"`
// The value provided by the MFA device, if the trust policy of the role being
@@ -1094,8 +1106,51 @@ func (s *AssumeRoleInput) Validate() error {
return nil
}
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetExternalId sets the ExternalId field's value.
+func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
+ s.ExternalId = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
+ s.Policy = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
+ s.TokenCode = &v
+ return s
+}
+
// Contains the response to a successful AssumeRole request, including temporary
// AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse
type AssumeRoleOutput struct {
_ struct{} `type:"structure"`
@@ -1131,6 +1186,25 @@ func (s AssumeRoleOutput) GoString() string {
return s.String()
}
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest
type AssumeRoleWithSAMLInput struct {
_ struct{} `type:"structure"`
@@ -1239,8 +1313,39 @@ func (s *AssumeRoleWithSAMLInput) Validate() error {
return nil
}
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPrincipalArn sets the PrincipalArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
+ s.PrincipalArn = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetSAMLAssertion sets the SAMLAssertion field's value.
+func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
+ s.SAMLAssertion = &v
+ return s
+}
+
// Contains the response to a successful AssumeRoleWithSAML request, including
// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse
type AssumeRoleWithSAMLOutput struct {
_ struct{} `type:"structure"`
@@ -1304,6 +1409,55 @@ func (s AssumeRoleWithSAMLOutput) GoString() string {
return s.String()
}
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
+ s.Issuer = &v
+ return s
+}
+
+// SetNameQualifier sets the NameQualifier field's value.
+func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
+ s.NameQualifier = &v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetSubject sets the Subject field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
+ s.Subject = &v
+ return s
+}
+
+// SetSubjectType sets the SubjectType field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
+ s.SubjectType = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest
type AssumeRoleWithWebIdentityInput struct {
_ struct{} `type:"structure"`
@@ -1365,10 +1519,9 @@ type AssumeRoleWithWebIdentityInput struct {
// are associated with that user. This session name is included as part of the
// ARN and assumed role ID in the AssumedRoleUser response element.
//
- // The format for this parameter, as described by its regex pattern, is a string
- // of characters consisting of upper- and lower-case alphanumeric characters
- // with no spaces. You can also include underscores or any of the following
- // characters: =,.@-
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
//
// RoleSessionName is a required field
RoleSessionName *string `min:"2" type:"string" required:"true"`
@@ -1429,8 +1582,45 @@ func (s *AssumeRoleWithWebIdentityInput) Validate() error {
return nil
}
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
+ s.Policy = &v
+ return s
+}
+
+// SetProviderId sets the ProviderId field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
+ s.ProviderId = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetWebIdentityToken sets the WebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
+ s.WebIdentityToken = &v
+ return s
+}
+
// Contains the response to a successful AssumeRoleWithWebIdentity request,
// including temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse
type AssumeRoleWithWebIdentityOutput struct {
_ struct{} `type:"structure"`
@@ -1485,8 +1675,45 @@ func (s AssumeRoleWithWebIdentityOutput) GoString() string {
return s.String()
}
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetProvider sets the Provider field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Provider = &v
+ return s
+}
+
+// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
+ s.SubjectFromWebIdentityToken = &v
+ return s
+}
+
// The identifiers for the temporary security credentials that the operation
// returns.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
type AssumedRoleUser struct {
_ struct{} `type:"structure"`
@@ -1516,7 +1743,20 @@ func (s AssumedRoleUser) GoString() string {
return s.String()
}
+// SetArn sets the Arn field's value.
+func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
+ s.Arn = &v
+ return s
+}
+
+// SetAssumedRoleId sets the AssumedRoleId field's value.
+func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
+ s.AssumedRoleId = &v
+ return s
+}
+
// AWS credentials for API authentication.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials
type Credentials struct {
_ struct{} `type:"structure"`
@@ -1551,6 +1791,31 @@ func (s Credentials) GoString() string {
return s.String()
}
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *Credentials) SetAccessKeyId(v string) *Credentials {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Credentials) SetExpiration(v time.Time) *Credentials {
+ s.Expiration = &v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *Credentials) SetSessionToken(v string) *Credentials {
+ s.SessionToken = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest
type DecodeAuthorizationMessageInput struct {
_ struct{} `type:"structure"`
@@ -1586,9 +1851,16 @@ func (s *DecodeAuthorizationMessageInput) Validate() error {
return nil
}
+// SetEncodedMessage sets the EncodedMessage field's value.
+func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
+ s.EncodedMessage = &v
+ return s
+}
+
// A document that contains additional information about the authorization status
// of a request from an encoded message that is returned in response to an AWS
// request.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse
type DecodeAuthorizationMessageOutput struct {
_ struct{} `type:"structure"`
@@ -1606,7 +1878,14 @@ func (s DecodeAuthorizationMessageOutput) GoString() string {
return s.String()
}
+// SetDecodedMessage sets the DecodedMessage field's value.
+func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
+ s.DecodedMessage = &v
+ return s
+}
+
// Identifiers for the federated user that is associated with the credentials.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser
type FederatedUser struct {
_ struct{} `type:"structure"`
@@ -1635,6 +1914,19 @@ func (s FederatedUser) GoString() string {
return s.String()
}
+// SetArn sets the Arn field's value.
+func (s *FederatedUser) SetArn(v string) *FederatedUser {
+ s.Arn = &v
+ return s
+}
+
+// SetFederatedUserId sets the FederatedUserId field's value.
+func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
+ s.FederatedUserId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest
type GetCallerIdentityInput struct {
_ struct{} `type:"structure"`
}
@@ -1651,6 +1943,7 @@ func (s GetCallerIdentityInput) GoString() string {
// Contains the response to a successful GetCallerIdentity request, including
// information about the entity making the request.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse
type GetCallerIdentityOutput struct {
_ struct{} `type:"structure"`
@@ -1678,6 +1971,25 @@ func (s GetCallerIdentityOutput) GoString() string {
return s.String()
}
+// SetAccount sets the Account field's value.
+func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
+ s.Account = &v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
+ s.Arn = &v
+ return s
+}
+
+// SetUserId sets the UserId field's value.
+func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
+ s.UserId = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest
type GetFederationTokenInput struct {
_ struct{} `type:"structure"`
@@ -1695,10 +2007,9 @@ type GetFederationTokenInput struct {
// the federated user name in a resource-based policy, such as in an Amazon
// S3 bucket policy.
//
- // The format for this parameter, as described by its regex pattern, is a string
- // of characters consisting of upper- and lower-case alphanumeric characters
- // with no spaces. You can also include underscores or any of the following
- // characters: =,.@-
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
//
// Name is a required field
Name *string `min:"2" type:"string" required:"true"`
@@ -1767,8 +2078,27 @@ func (s *GetFederationTokenInput) Validate() error {
return nil
}
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
+ s.Name = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
+ s.Policy = &v
+ return s
+}
+
// Contains the response to a successful GetFederationToken request, including
// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse
type GetFederationTokenOutput struct {
_ struct{} `type:"structure"`
@@ -1803,6 +2133,25 @@ func (s GetFederationTokenOutput) GoString() string {
return s.String()
}
+// SetCredentials sets the Credentials field's value.
+func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetFederatedUser sets the FederatedUser field's value.
+func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
+ s.FederatedUser = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest
type GetSessionTokenInput struct {
_ struct{} `type:"structure"`
@@ -1822,10 +2171,9 @@ type GetSessionTokenInput struct {
// You can find the device for an IAM user by going to the AWS Management Console
// and viewing the user's security credentials.
//
- // The format for this parameter, as described by its regex pattern, is a string
- // of characters consisting of upper- and lower-case alphanumeric characters
- // with no spaces. You can also include underscores or any of the following
- // characters: =,.@-
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
SerialNumber *string `min:"9" type:"string"`
// The value provided by the MFA device, if MFA is required. If any policy requires
@@ -1868,8 +2216,27 @@ func (s *GetSessionTokenInput) Validate() error {
return nil
}
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
+ s.TokenCode = &v
+ return s
+}
+
// Contains the response to a successful GetSessionToken request, including
// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse
type GetSessionTokenOutput struct {
_ struct{} `type:"structure"`
@@ -1892,3 +2259,9 @@ func (s GetSessionTokenOutput) String() string {
func (s GetSessionTokenOutput) GoString() string {
return s.String()
}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
+ s.Credentials = v
+ return s
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
index a9b9b32..9c4bfb8 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -56,8 +56,9 @@ import (
// successfully made to STS, who made the request, when it was made, and so
// on. To learn more about CloudTrail, including how to turn it on and find
// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
-//The service client's operations are safe to be used concurrently.
+// The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15
type STS struct {
*client.Client
}
@@ -68,8 +69,11 @@ var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
-// A ServiceName is the name of the service the client will make API calls to.
-const ServiceName = "sts"
+// Service information constants
+const (
+ ServiceName = "sts" // Service endpoint prefix API calls made to.
+ EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
+)
// New creates a new instance of the STS client with a session.
// If additional configuration is needed for the client instance use the optional
@@ -82,17 +86,18 @@ const ServiceName = "sts"
// // Create a STS client with additional configuration
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
- c := p.ClientConfig(ServiceName, cfgs...)
- return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
-func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS {
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
svc := &STS{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
+ SigningName: signingName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2011-06-15",
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
index a939d75..22a4234 100644
--- a/vendor/github.com/go-ini/ini/README.md
+++ b/vendor/github.com/go-ini/ini/README.md
@@ -9,7 +9,7 @@ Package ini provides INI file read and write functionality in Go.
## Feature
-- Load multiple data sources(`[]byte` or file) with overwrites.
+- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
- Read with recursion values.
- Read with parent-child sections.
- Read with auto-increment key names.
@@ -44,10 +44,10 @@ Please add `-u` flag to update in the future.
### Loading from data sources
-A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many data sources as you want**. Passing other types will simply return an error.
+A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error.
```go
-cfg, err := ini.Load([]byte("raw data"), "filename")
+cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
```
Or start with an empty object:
@@ -106,6 +106,16 @@ cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
+#### Comment
+
+Take care that following format will be treated as comment:
+
+1. Line begins with `#` or `;`
+2. Words after `#` or `;`
+3. Words after section name (i.e words after `[some section name]`)
+
+If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```.
+
### Working with sections
To get a section, you would need to:
@@ -123,7 +133,7 @@ section, err := cfg.GetSection("")
When you're pretty sure the section exists, following code could make your life easier:
```go
-section := cfg.Section("")
+section := cfg.Section("section name")
```
What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
@@ -400,6 +410,12 @@ cfg.WriteTo(writer)
cfg.WriteToIndent(writer, "\t")
```
+By default, spaces are used to align "=" sign between key and values, to disable that:
+
+```go
+ini.PrettyFormat = false
+```
+
## Advanced Usage
### Recursive Values
@@ -447,6 +463,21 @@ cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
```
+### Unparseable Sections
+
+Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`:
+
+```go
+cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
+
+body := cfg.Section("COMMENTS").Body()
+
+/* --- start ---
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
+------ end --- */
+```
+
### Auto-increment Key Names
If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md
index 2178e47..3b4fb66 100644
--- a/vendor/github.com/go-ini/ini/README_ZH.md
+++ b/vendor/github.com/go-ini/ini/README_ZH.md
@@ -2,7 +2,7 @@
## 功能特性
-- 支持覆盖加载多个数据源(`[]byte` 或文件)
+- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`)
- 支持递归读取键值
- 支持读取父子分区
- 支持读取自增键名
@@ -37,10 +37,10 @@
### 从数据源加载
-一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
+一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
```go
-cfg, err := ini.Load([]byte("raw data"), "filename")
+cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
```
或者从一个空白的文件开始:
@@ -99,6 +99,16 @@ cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
+#### 关于注释
+
+下述几种情况的内容将被视为注释:
+
+1. 所有以 `#` 或 `;` 开头的行
+2. 所有在 `#` 或 `;` 之后的内容
+3. 分区标签后的文字 (即 `[分区名]` 之后的内容)
+
+如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。
+
### 操作分区(Section)
获取指定分区:
@@ -116,7 +126,7 @@ section, err := cfg.GetSection("")
当您非常确定某个分区是存在的,可以使用以下简便方法:
```go
-section := cfg.Section("")
+section := cfg.Section("section name")
```
如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
@@ -393,9 +403,15 @@ cfg.WriteTo(writer)
cfg.WriteToIndent(writer, "\t")
```
-### 高级用法
+默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能:
-#### 递归读取键值
+```go
+ini.PrettyFormat = false
+```
+
+## 高级用法
+
+### 递归读取键值
在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
@@ -415,7 +431,7 @@ cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
```
-#### 读取父子分区
+### 读取父子分区
您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
@@ -440,7 +456,22 @@ cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
```
-#### 读取自增键名
+### 无法解析的分区
+
+如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理:
+
+```go
+cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
+
+body := cfg.Section("COMMENTS").Body()
+
+/* --- start ---
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
+------ end --- */
+```
+
+### 读取自增键名
如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
index cd065e7..50abd81 100644
--- a/vendor/github.com/go-ini/ini/ini.go
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"io"
+ "io/ioutil"
"os"
"regexp"
"runtime"
@@ -36,7 +37,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
- _VERSION = "1.21.1"
+ _VERSION = "1.23.0"
)
// Version returns current package version literal.
@@ -108,7 +109,16 @@ type sourceData struct {
}
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
- return &bytesReadCloser{bytes.NewReader(s.data)}, nil
+ return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+ reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+ return s.reader, nil
}
// File represents a combination of a or more INI file(s) in memory.
@@ -149,6 +159,8 @@ func parseDataSource(source interface{}) (dataSource, error) {
return sourceFile{s}, nil
case []byte:
return &sourceData{s}, nil
+ case io.ReadCloser:
+ return &sourceReadCloser{s}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
}
@@ -164,6 +176,9 @@ type LoadOptions struct {
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
+ // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
+ // conform to key/value pairs. Specify the names of those blocks here.
+ UnparseableSections []string
}
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
@@ -233,6 +248,18 @@ func (f *File) NewSection(name string) (*Section, error) {
return f.sections[name], nil
}
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+ section, err := f.NewSection(name)
+ if err != nil {
+ return nil, err
+ }
+
+ section.isRawSection = true
+ section.rawBody = body
+ return section, nil
+}
+
// NewSections creates a list of sections.
func (f *File) NewSections(names ...string) (err error) {
for _, name := range names {
@@ -386,6 +413,13 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
}
}
+ if sec.isRawSection {
+ if _, err = buf.WriteString(sec.rawBody); err != nil {
+ return 0, err
+ }
+ continue
+ }
+
// Count and generate alignment length and buffer spaces using the
// longest key. Keys may be modifed if they contain certain characters so
// we need to take that into account in our calculation.
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
index dc6df87..b0aabe3 100644
--- a/vendor/github.com/go-ini/ini/parser.go
+++ b/vendor/github.com/go-ini/ini/parser.go
@@ -48,16 +48,31 @@ func newParser(r io.Reader) *parser {
}
}
-// BOM handles header of BOM-UTF8 format.
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
func (p *parser) BOM() error {
- mask, err := p.buf.Peek(3)
+ mask, err := p.buf.Peek(2)
if err != nil && err != io.EOF {
return err
- } else if len(mask) < 3 {
+ } else if len(mask) < 2 {
return nil
- } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
+ }
+
+ switch {
+ case mask[0] == 254 && mask[1] == 255:
+ fallthrough
+ case mask[0] == 255 && mask[1] == 254:
p.buf.Read(mask)
+ case mask[0] == 239 && mask[1] == 187:
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ }
+ if mask[2] == 191 {
+ p.buf.Read(mask)
+ }
}
return nil
}
@@ -235,6 +250,7 @@ func (f *File) parse(reader io.Reader) (err error) {
section, _ := f.NewSection(DEFAULT_SECTION)
var line []byte
+ var inUnparseableSection bool
for !p.isEOF {
line, err = p.readUntil('\n')
if err != nil {
@@ -280,6 +296,21 @@ func (f *File) parse(reader io.Reader) (err error) {
// Reset aotu-counter and comments
p.comment.Reset()
p.count = 1
+
+ inUnparseableSection = false
+ for i := range f.options.UnparseableSections {
+ if f.options.UnparseableSections[i] == name ||
+ (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
+ inUnparseableSection = true
+ continue
+ }
+ }
+ continue
+ }
+
+ if inUnparseableSection {
+ section.isRawSection = true
+ section.rawBody += string(line)
continue
}
diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go
index bbb73ca..45d2f3b 100644
--- a/vendor/github.com/go-ini/ini/section.go
+++ b/vendor/github.com/go-ini/ini/section.go
@@ -28,10 +28,19 @@ type Section struct {
keys map[string]*Key
keyList []string
keysHash map[string]string
+
+ isRawSection bool
+ rawBody string
}
func newSection(f *File, name string) *Section {
- return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)}
+ return &Section{
+ f: f,
+ name: name,
+ keys: make(map[string]*Key),
+ keyList: make([]string, 0, 10),
+ keysHash: make(map[string]string),
+ }
}
// Name returns name of Section.
@@ -39,6 +48,12 @@ func (s *Section) Name() string {
return s.name
}
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+ return strings.TrimSpace(s.rawBody)
+}
+
// NewKey creates a new key to given section.
func (s *Section) NewKey(name, val string) (*Key, error) {
if len(name) == 0 {
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index 466ac86..1003707 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -18,6 +18,7 @@ Chris Moos <chris at tech9computers.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
DisposaBoy <disposaboy at dby.me>
+Egor Smolyakov <egorsmkv at gmail.com>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
Hanno Braun <mail at hannobraun.com>
@@ -38,11 +39,13 @@ Lucas Liu <extrafliu at gmail.com>
Luke Scott <luke at webconnex.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
+Olivier Mengué <dolmen at cpan.org>
Paul Bonser <misterpib at gmail.com>
Runrioter Wung <runrioter at gmail.com>
Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com>
Stanley Gunawan <gunawan.stanley at gmail.com>
+Xiangyu Hu <xiangyu.hu at outlook.com>
Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc>
Zhenye Xie <xiezhenye at gmail.com>
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
index 617ad80..6bcad7e 100644
--- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -1,4 +1,4 @@
-## HEAD
+## Version 1.3 (2016-12-01)
Changes:
@@ -8,6 +8,8 @@ Changes:
- TLS ServerName defaults to the host (#283)
- Refactoring (#400, #410, #437)
- Adjusted documentation for second generation CloudSQL (#485)
+ - Documented DSN system var quoting rules (#502)
+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
New Features:
@@ -21,6 +23,7 @@ New Features:
- Support for JSON field type (#414)
- Support for multi-statements and multi-results (#411, #431)
- DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+ - Native password authentication plugin support (#494, #524)
Bugfixes:
@@ -38,6 +41,8 @@ Bugfixes:
- Fixed parsing of floats into float64 when placeholders are used (#434)
- Fixed DSN tests with Go 1.7+ (#459)
- Handle ERR packets while waiting for EOF (#473)
+ - Invalidate connection on error while discarding additional results (#513)
+ - Allow terminating packets of length 0 (#516)
## Version 1.2 (2014-06-03)
diff --git a/vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md
deleted file mode 100644
index d9771f1..0000000
--- a/vendor/github.com/go-sql-driver/mysql/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,21 +0,0 @@
-### Issue description
-Tell us what should happen and what happens instead
-
-### Example code
-```go
-If possible, please enter some example code here to reproduce the issue.
-```
-
-### Error log
-```
-If you have an error log, please paste it here.
-```
-
-### Configuration
-*Driver version (or git SHA):*
-
-*Go version:* run `go version` in your console
-
-*Server version:* E.g. MySQL 5.6, MariaDB 10.0.20
-
-*Server OS:* E.g. Debian 8.1 (Jessie), Windows 10
diff --git a/vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 6f5c7eb..0000000
--- a/vendor/github.com/go-sql-driver/mysql/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-### Description
-Please explain the changes you made here.
-
-### Checklist
-- [ ] Code compiles correctly
-- [ ] Created tests which fail without the change (if possible)
-- [ ] All tests passing
-- [ ] Extended the README / documentation, if necessary
-- [ ] Added myself / the copyright holder to the AUTHORS file
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index a110cf1..6452038 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -1,13 +1,9 @@
# Go-MySQL-Driver
-A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
-**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases)
-
-[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql)
-
---------------------------------------
* [Features](#features)
* [Requirements](#requirements)
@@ -30,11 +26,11 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
## Features
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
* Native Go implementation. No C-bindings, just pure Go
- * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
* Automatic handling of broken connections
* Automatic Connection Pooling *(by database/sql package)*
* Supports queries larger than 16MB
- * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
* Intelligent `LONG DATA` handling in prepared statements
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
* Optional `time.Time` parsing
@@ -47,14 +43,14 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
---------------------------------------
## Installation
-Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
$ go get github.com/go-sql-driver/mysql
```
-Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
## Usage
-_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
```go
@@ -99,13 +95,13 @@ Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mys
Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
-See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
#### Address
For TCP and UDP networks, addresses have the form `host:port`.
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
-The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
@@ -135,6 +131,15 @@ Default: false
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+##### `allowNativePasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+`allowNativePasswords=true` allows the usage of the mysql native password method.
+
##### `allowOldPasswords`
```
@@ -215,11 +220,11 @@ Valid Values: <escaped name>
Default: UTC
```
-Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
-Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
##### `maxAllowedPacket`
```
@@ -292,7 +297,7 @@ Valid Values: true, false, skip-verify, <name>
Default: false
```
-`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
##### `writeTimeout`
@@ -306,13 +311,21 @@ I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms
##### System Variables
-All other parameters are interpreted as system variables:
- * `autocommit`: `"SET autocommit=<value>"`
- * [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone=<value>"`
- * [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation=<value>"`
- * `param`: `"SET <param>=<value>"`
+Any other parameters are interpreted as system variables:
+ * `<boolean_var>=<value>`: `SET <boolean_var>=<value>`
+ * `<enum_var>=<value>`: `SET <enum_var>=<value>`
+ * `<string_var>=%27<value>%27`: `SET <string_var>='<value>'`
+
+Rules:
+* The values for string variables must be quoted with '
+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
+ (which implies values of string variables must be wrapped with `%27`)
+
+Examples:
+ * `autocommit=1`: `SET autocommit=1`
+ * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
+ * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
-*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!*
#### Examples
```
@@ -377,17 +390,17 @@ Files must be whitelisted by registering them with `mysql.RegisterLocalFile(file
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::<name>` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
-See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
### `time.Time` support
The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
-However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
-Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
+Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
### Unicode support
@@ -422,7 +435,7 @@ That means:
* When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
* You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
-Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
index 562ddef..0022d1f 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -134,9 +134,9 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
return mc, nil
}
-func handleAuthResult(mc *mysqlConn, cipher []byte) error {
+func handleAuthResult(mc *mysqlConn, oldCipher []byte) error {
// Read Result Packet
- err := mc.readResultOK()
+ cipher, err := mc.readResultOK()
if err == nil {
return nil // auth successful
}
@@ -150,10 +150,17 @@ func handleAuthResult(mc *mysqlConn, cipher []byte) error {
// Retry with old authentication method. Note: there are edge cases
// where this should work but doesn't; this is currently "wontfix":
// https://github.com/go-sql-driver/mysql/issues/184
+
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if cipher == nil {
+ cipher = oldCipher
+ }
+
if err = mc.writeOldAuthPacket(cipher); err != nil {
return err
}
- err = mc.readResultOK()
+ _, err = mc.readResultOK()
} else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
// Retry with clear text password for
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
@@ -161,7 +168,12 @@ func handleAuthResult(mc *mysqlConn, cipher []byte) error {
if err = mc.writeClearAuthPacket(); err != nil {
return err
}
- err = mc.readResultOK()
+ _, err = mc.readResultOK()
+ } else if mc.cfg.AllowNativePasswords && err == ErrNativePassword {
+ if err = mc.writeNativeAuthPacket(cipher); err != nil {
+ return err
+ }
+ _, err = mc.readResultOK()
}
return err
}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
index 896be9e..ac00dce 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -46,6 +46,7 @@ type Config struct {
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowNativePasswords bool // Allows the native password authentication method
AllowOldPasswords bool // Allows the old insecure password method
ClientFoundRows bool // Return number of matching rows instead of rows changed
ColumnsWithAlias bool // Prepend table alias to column names
@@ -101,6 +102,15 @@ func (cfg *Config) FormatDSN() string {
}
}
+ if cfg.AllowNativePasswords {
+ if hasParam {
+ buf.WriteString("&allowNativePasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowNativePasswords=true")
+ }
+ }
+
if cfg.AllowOldPasswords {
if hasParam {
buf.WriteString("&allowOldPasswords=true")
@@ -381,6 +391,14 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
+ // Use native password authentication
+ case "allowNativePasswords":
+ var isBool bool
+ cfg.AllowNativePasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
// Use old authentication mode (pre MySQL 4.1)
case "allowOldPasswords":
var isBool bool
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
index 1543a80..857854e 100644
--- a/vendor/github.com/go-sql-driver/mysql/errors.go
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -22,8 +22,9 @@ var (
ErrInvalidConn = errors.New("invalid connection")
ErrMalformPkt = errors.New("malformed packet")
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
- ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
ErrPktSync = errors.New("commands out of sync. You can't run this command now")
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
index 0f975bb..547357c 100644
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -173,7 +173,8 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
// read OK packet
if err == nil {
- return mc.readResultOK()
+ _, err = mc.readResultOK()
+ return err
}
mc.readPacket()
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index f06752b..aafe979 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -25,9 +25,9 @@ import (
// Read packet to buffer 'data'
func (mc *mysqlConn) readPacket() ([]byte, error) {
- var payload []byte
+ var prevData []byte
for {
- // Read packet header
+ // read packet header
data, err := mc.buf.readNext(4)
if err != nil {
errLog.Print(err)
@@ -35,16 +35,10 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
return nil, driver.ErrBadConn
}
- // Packet Length [24 bit]
+ // packet length [24 bit]
pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
- if pktLen < 1 {
- errLog.Print(ErrMalformPkt)
- mc.Close()
- return nil, driver.ErrBadConn
- }
-
- // Check Packet Sync [8 bit]
+ // check packet sync [8 bit]
if data[3] != mc.sequence {
if data[3] > mc.sequence {
return nil, ErrPktSyncMul
@@ -53,7 +47,20 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
}
mc.sequence++
- // Read packet body [pktLen bytes]
+ // packets with length 0 terminate a previous packet which is a
+ // multiple of (2^24)−1 bytes long
+ if pktLen == 0 {
+ // there was no previous packet
+ if prevData == nil {
+ errLog.Print(ErrMalformPkt)
+ mc.Close()
+ return nil, driver.ErrBadConn
+ }
+
+ return prevData, nil
+ }
+
+ // read packet body [pktLen bytes]
data, err = mc.buf.readNext(pktLen)
if err != nil {
errLog.Print(err)
@@ -61,18 +68,17 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
return nil, driver.ErrBadConn
}
- isLastPacket := (pktLen < maxPacketSize)
+ // return data if this was the last packet
+ if pktLen < maxPacketSize {
+ // zero allocations for non-split packets
+ if prevData == nil {
+ return data, nil
+ }
- // Zero allocations for non-splitting packets
- if isLastPacket && payload == nil {
- return data, nil
+ return append(prevData, data...), nil
}
- payload = append(payload, data...)
-
- if isLastPacket {
- return payload, nil
- }
+ prevData = append(prevData, data...)
}
}
@@ -372,6 +378,26 @@ func (mc *mysqlConn) writeClearAuthPacket() error {
return mc.writePacket(data)
}
+// Native password authentication method
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
+ scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
+
+ // Calculate the packet length and add a tailing 0
+ pktLen := len(scrambleBuff)
+ data := mc.buf.takeSmallBuffer(4 + pktLen)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return driver.ErrBadConn
+ }
+
+ // Add the scramble
+ copy(data[4:], scrambleBuff)
+
+ return mc.writePacket(data)
+}
+
/******************************************************************************
* Command Packets *
******************************************************************************/
@@ -445,36 +471,43 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
******************************************************************************/
// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() error {
+func (mc *mysqlConn) readResultOK() ([]byte, error) {
data, err := mc.readPacket()
if err == nil {
// packet indicator
switch data[0] {
case iOK:
- return mc.handleOkPacket(data)
+ return nil, mc.handleOkPacket(data)
case iEOF:
if len(data) > 1 {
- plugin := string(data[1:bytes.IndexByte(data, 0x00)])
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ plugin := string(data[1:pluginEndIndex])
+ cipher := data[pluginEndIndex+1 : len(data)-1]
+
if plugin == "mysql_old_password" {
// using old_passwords
- return ErrOldPassword
+ return cipher, ErrOldPassword
} else if plugin == "mysql_clear_password" {
// using clear text password
- return ErrCleartextPassword
+ return cipher, ErrCleartextPassword
+ } else if plugin == "mysql_native_password" {
+ // using mysql default authentication method
+ return cipher, ErrNativePassword
} else {
- return ErrUnknownPlugin
+ return cipher, ErrUnknownPlugin
}
} else {
- return ErrOldPassword
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, ErrOldPassword
}
default: // Error otherwise
- return mc.handleErrorPacket(data)
+ return nil, mc.handleErrorPacket(data)
}
}
- return err
+ return nil, err
}
// Result Set Header Packet
@@ -674,11 +707,15 @@ func (rows *textRows) readRow(dest []driver.Value) error {
if data[0] == iEOF && len(data) == 5 {
// server_status [2 bytes]
rows.mc.status = readStatus(data[3:])
- if err := rows.mc.discardResults(); err != nil {
- return err
+ err = rows.mc.discardResults()
+ if err == nil {
+ err = io.EOF
+ } else {
+ // connection unusable
+ rows.mc.Close()
}
rows.mc = nil
- return io.EOF
+ return err
}
if data[0] == iERR {
rows.mc = nil
@@ -1079,11 +1116,15 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// EOF Packet
if data[0] == iEOF && len(data) == 5 {
rows.mc.status = readStatus(data[3:])
- if err := rows.mc.discardResults(); err != nil {
- return err
+ err = rows.mc.discardResults()
+ if err == nil {
+ err = io.EOF
+ } else {
+ // connection unusable
+ rows.mc.Close()
}
rows.mc = nil
- return io.EOF
+ return err
}
rows.mc = nil
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index ead9a6b..7f9b045 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -24,7 +24,10 @@ type mysqlStmt struct {
func (stmt *mysqlStmt) Close() error {
if stmt.mc == nil || stmt.mc.netConn == nil {
- errLog.Print(ErrInvalidConn)
+ // driver.Stmt.Close can be called more than once, thus this function
+ // has to be idempotent.
+ // See also Issue #450 and golang/go#16019.
+ //errLog.Print(ErrInvalidConn)
return driver.ErrBadConn
}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
index 68b9b30..2b30f84 100644
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -1075,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error {
func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
exts := structPointer_Extensions(base, p.field)
- if err := encodeExtensions(exts); err != nil {
+
+ v, mu := exts.extensionsRead()
+ if v == nil {
+ return nil
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+ if err := encodeExtensionsMap(v); err != nil {
return err
}
- v, _ := exts.extensionsRead()
return o.enc_map_body(v)
}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index 6b9b363..eaad218 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -154,6 +154,7 @@ type ExtensionDesc struct {
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
}
func (ed *ExtensionDesc) repeated() bool {
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
index 4fd0531..61f83c1 100644
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -592,7 +592,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
- sv.Field(oop.Field).Set(nv)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
diff --git a/vendor/github.com/google/go-github/github/activity_events.go b/vendor/github.com/google/go-github/github/activity_events.go
index 6bf38b4..557001e 100644
--- a/vendor/github.com/google/go-github/github/activity_events.go
+++ b/vendor/github.com/google/go-github/github/activity_events.go
@@ -55,16 +55,24 @@ func (e *Event) Payload() (payload interface{}) {
payload = &IssueCommentEvent{}
case "IssuesEvent":
payload = &IssuesEvent{}
+ case "LabelEvent":
+ payload = &LabelEvent{}
case "MemberEvent":
payload = &MemberEvent{}
case "MembershipEvent":
payload = &MembershipEvent{}
+ case "MilestoneEvent":
+ payload = &MilestoneEvent{}
case "PageBuildEvent":
payload = &PageBuildEvent{}
+ case "PingEvent":
+ payload = &PingEvent{}
case "PublicEvent":
payload = &PublicEvent{}
case "PullRequestEvent":
payload = &PullRequestEvent{}
+ case "PullRequestReviewEvent":
+ payload = &PullRequestReviewEvent{}
case "PullRequestReviewCommentEvent":
payload = &PullRequestReviewCommentEvent{}
case "PushEvent":
@@ -136,7 +144,7 @@ func (s *ActivityService) ListRepositoryEvents(owner, repo string, opt *ListOpti
// ListIssueEventsForRepository lists issue events for a repository.
//
// GitHub API docs: http://developer.github.com/v3/activity/events/#list-issue-events-for-a-repository
-func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
+func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -148,7 +156,7 @@ func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *
return nil, nil, err
}
- events := new([]*Event)
+ events := new([]*IssueEvent)
resp, err := s.client.Do(req, events)
if err != nil {
return nil, resp, err
diff --git a/vendor/github.com/google/go-github/github/activity_watching.go b/vendor/github.com/google/go-github/github/activity_watching.go
index 6bf91dd..9a27541 100644
--- a/vendor/github.com/google/go-github/github/activity_watching.go
+++ b/vendor/github.com/google/go-github/github/activity_watching.go
@@ -103,6 +103,10 @@ func (s *ActivityService) GetRepositorySubscription(owner, repo string) (*Subscr
// SetRepositorySubscription sets the subscription for the specified repository
// for the authenticated user.
//
+// To watch a repository, set subscription.Subscribed to true.
+// To ignore notifications made within a repository, set subscription.Ignored to true.
+// To stop watching a repository, use DeleteRepositorySubscription.
+//
// GitHub API Docs: https://developer.github.com/v3/activity/watching/#set-a-repository-subscription
func (s *ActivityService) SetRepositorySubscription(owner, repo string, subscription *Subscription) (*Subscription, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
@@ -124,6 +128,9 @@ func (s *ActivityService) SetRepositorySubscription(owner, repo string, subscrip
// DeleteRepositorySubscription deletes the subscription for the specified
// repository for the authenticated user.
//
+// This is used to stop watching a repository. To control whether or not to
+// receive notifications from a repository, use SetRepositorySubscription.
+//
// GitHub API Docs: https://developer.github.com/v3/activity/watching/#delete-a-repository-subscription
func (s *ActivityService) DeleteRepositorySubscription(owner, repo string) (*Response, error) {
u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
diff --git a/vendor/github.com/google/go-github/github/admin.go b/vendor/github.com/google/go-github/github/admin.go
new file mode 100644
index 0000000..44d7a9f
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/admin.go
@@ -0,0 +1,98 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// AdminService handles communication with the admin related methods of the
+// GitHub API. These API routes are normally only accessible for GitHub
+// Enterprise installations.
+//
+// GitHub API docs: https://developer.github.com/v3/enterprise/
+type AdminService service
+
+// TeamLDAPMapping represents the mapping between a GitHub team and an LDAP group.
+type TeamLDAPMapping struct {
+ ID *int `json:"id,omitempty"`
+ LDAPDN *string `json:"ldap_dn,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Slug *string `json:"slug,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Privacy *string `json:"privacy,omitempty"`
+ Permission *string `json:"permission,omitempty"`
+
+ MembersURL *string `json:"members_url,omitempty"`
+ RepositoriesURL *string `json:"repositories_url,omitempty"`
+}
+
+func (m TeamLDAPMapping) String() string {
+ return Stringify(m)
+}
+
+// UserLDAPMapping represents the mapping between a GitHub user and an LDAP user.
+type UserLDAPMapping struct {
+ ID *int `json:"id,omitempty"`
+ LDAPDN *string `json:"ldap_dn,omitempty"`
+ Login *string `json:"login,omitempty"`
+ AvatarURL *string `json:"avatar_url,omitempty"`
+ GravatarID *string `json:"gravatar_id,omitempty"`
+ Type *string `json:"type,omitempty"`
+ SiteAdmin *bool `json:"site_admin,omitempty"`
+
+ URL *string `json:"url,omitempty"`
+ EventsURL *string `json:"events_url,omitempty"`
+ FollowingURL *string `json:"following_url,omitempty"`
+ FollowersURL *string `json:"followers_url,omitempty"`
+ GistsURL *string `json:"gists_url,omitempty"`
+ OrganizationsURL *string `json:"organizations_url,omitempty"`
+ ReceivedEventsURL *string `json:"received_events_url,omitempty"`
+ ReposURL *string `json:"repos_url,omitempty"`
+ StarredURL *string `json:"starred_url,omitempty"`
+ SubscriptionsURL *string `json:"subscriptions_url,omitempty"`
+}
+
+func (m UserLDAPMapping) String() string {
+ return Stringify(m)
+}
+
+// UpdateUserLDAPMapping updates the mapping between a GitHub user and an LDAP user.
+//
+// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-user
+func (s *AdminService) UpdateUserLDAPMapping(user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) {
+ u := fmt.Sprintf("admin/ldap/users/%v/mapping", user)
+ req, err := s.client.NewRequest("PATCH", u, mapping)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ m := new(UserLDAPMapping)
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, err
+}
+
+// UpdateTeamLDAPMapping updates the mapping between a GitHub team and an LDAP group.
+//
+// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-team
+func (s *AdminService) UpdateTeamLDAPMapping(team int, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) {
+ u := fmt.Sprintf("admin/ldap/teams/%v/mapping", team)
+ req, err := s.client.NewRequest("PATCH", u, mapping)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ m := new(TeamLDAPMapping)
+ resp, err := s.client.Do(req, m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, err
+}
diff --git a/vendor/github.com/google/go-github/github/authorizations.go b/vendor/github.com/google/go-github/github/authorizations.go
index 35ce9a9..d5a5e63 100644
--- a/vendor/github.com/google/go-github/github/authorizations.go
+++ b/vendor/github.com/google/go-github/github/authorizations.go
@@ -346,9 +346,6 @@ func (s *AuthorizationsService) ListGrants() ([]*Grant, *Response, error) {
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeOAuthGrantAuthorizationsPreview)
-
grants := []*Grant{}
resp, err := s.client.Do(req, &grants)
if err != nil {
@@ -368,9 +365,6 @@ func (s *AuthorizationsService) GetGrant(id int) (*Grant, *Response, error) {
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeOAuthGrantAuthorizationsPreview)
-
grant := new(Grant)
resp, err := s.client.Do(req, grant)
if err != nil {
@@ -392,9 +386,6 @@ func (s *AuthorizationsService) DeleteGrant(id int) (*Response, error) {
return nil, err
}
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeOAuthGrantAuthorizationsPreview)
-
return s.client.Do(req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/doc.go b/vendor/github.com/google/go-github/github/doc.go
index 2b61068..659dd82 100644
--- a/vendor/github.com/google/go-github/github/doc.go
+++ b/vendor/github.com/google/go-github/github/doc.go
@@ -86,6 +86,21 @@ To detect an API rate limit error, you can check if its type is *github.RateLimi
Learn more about GitHub rate limiting at
http://developer.github.com/v3/#rate-limiting.
+Accepted Status
+
+Some endpoints may return a 202 Accepted status code, meaning that the
+information required is not yet ready and was scheduled to be gathered on
+the GitHub side. Methods known to behave like this are documented specifying
+this behavior.
+
+To detect this condition of error, you can check if its type is
+*github.AcceptedError:
+
+ stats, _, err := client.Repositories.ListContributorsStats(org, repo)
+ if _, ok := err.(*github.AcceptedError); ok {
+ log.Println("scheduled on GitHub side")
+ }
+
Conditional Requests
The GitHub API has good support for conditional requests which will help
diff --git a/vendor/github.com/google/go-github/github/event_types.go b/vendor/github.com/google/go-github/github/event_types.go
index 16f89b0..a1ffa69 100644
--- a/vendor/github.com/google/go-github/github/event_types.go
+++ b/vendor/github.com/google/go-github/github/event_types.go
@@ -209,6 +209,22 @@ type IssuesEvent struct {
Sender *User `json:"sender,omitempty"`
}
+// LabelEvent is triggered when a repository's label is created, edited, or deleted.
+// The Webhook event name is "label"
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#labelevent
+type LabelEvent struct {
+ // Action is the action that was performed. Possible values are:
+ // "created", "edited", "deleted"
+ Action *string `json:"action,omitempty"`
+ Label *Label `json:"label,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Changes *EditChange `json:"changes,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Org *Organization `json:"organization,omitempty"`
+}
+
// MemberEvent is triggered when a user is added as a collaborator to a repository.
// The Webhook event name is "member".
//
@@ -243,6 +259,23 @@ type MembershipEvent struct {
Sender *User `json:"sender,omitempty"`
}
+// MilestoneEvent is triggered when a milestone is created, closed, opened, edited, or deleted.
+// The Webhook event name is "milestone".
+//
+// Github docs: https://developer.github.com/v3/activity/events/types/#milestoneevent
+type MilestoneEvent struct {
+ // Action is the action that was performed. Possible values are:
+ // "created", "closed", "opened", "edited", "deleted"
+ Action *string `json:"action,omitempty"`
+ Milestone *Milestone `json:"milestone,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Changes *EditChange `json:"changes,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Org *Organization `json:"organization,omitempty"`
+}
+
// PageBuildEvent represents an attempted build of a GitHub Pages site, whether
// successful or not.
// The Webhook event name is "page_build".
@@ -262,6 +295,18 @@ type PageBuildEvent struct {
Sender *User `json:"sender,omitempty"`
}
+// PingEvent is triggered when a Webhook is added to GitHub.
+//
+// GitHub docs: https://developer.github.com/webhooks/#ping-event
+type PingEvent struct {
+ // Random string of GitHub zen.
+ Zen *string `json:"zen,omitempty"`
+ // The ID of the webhook that triggered the ping.
+ HookID *int `json:"hook_id,omitempty"`
+ // The webhook configuration.
+ Hook *Hook `json:"hook,omitempty"`
+}
+
// PublicEvent is triggered when a private repository is open sourced.
// According to GitHub: "Without a doubt: the best GitHub event."
// The Webhook event name is "public".
@@ -294,6 +339,26 @@ type PullRequestEvent struct {
Sender *User `json:"sender,omitempty"`
}
+// PullRequestReviewEvent is triggered when a review is submitted on a pull
+// request.
+// The Webhook event name is "pull_request_review".
+//
+// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewevent
+type PullRequestReviewEvent struct {
+ // Action is always "submitted".
+ Action *string `json:"action,omitempty"`
+ Review *PullRequestReview `json:"review,omitempty"`
+ PullRequest *PullRequest `json:"pull_request,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+
+ // The following field is only present when the webhook is triggered on
+ // a repository belonging to an organization.
+ Organization *Organization `json:"organization,omitempty"`
+}
+
// PullRequestReviewCommentEvent is triggered when a comment is created on a
// portion of the unified diff of a pull request.
// The Webhook event name is "pull_request_review_comment".
diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go
index 465574b..3d7421f 100644
--- a/vendor/github.com/google/go-github/github/github.go
+++ b/vendor/github.com/google/go-github/github/github.go
@@ -42,6 +42,8 @@ const (
mediaTypeV3 = "application/vnd.github.v3+json"
defaultMediaType = "application/octet-stream"
mediaTypeV3SHA = "application/vnd.github.v3.sha"
+ mediaTypeV3Diff = "application/vnd.github.v3.diff"
+ mediaTypeV3Patch = "application/vnd.github.v3.patch"
mediaTypeOrgPermissionRepo = "application/vnd.github.v3.repository+json"
// Media Type values to access preview APIs
@@ -68,6 +70,7 @@ const (
mediaTypeReactionsPreview = "application/vnd.github.squirrel-girl-preview"
// https://developer.github.com/changes/2016-04-01-squash-api-preview/
+ // https://developer.github.com/changes/2016-09-26-pull-request-merge-api-update/
mediaTypeSquashPreview = "application/vnd.github.polaris-preview+json"
// https://developer.github.com/changes/2016-04-04-git-signing-api-preview/
@@ -79,9 +82,6 @@ const (
// https://developer.github.com/changes/2016-06-14-repository-invitations/
mediaTypeRepositoryInvitationsPreview = "application/vnd.github.swamp-thing-preview+json"
- // https://developer.github.com/changes/2016-04-21-oauth-authorizations-grants-api-preview/
- mediaTypeOAuthGrantAuthorizationsPreview = "application/vnd.github.damage-preview+json"
-
// https://developer.github.com/changes/2016-07-06-github-pages-preiew-api/
mediaTypePagesPreview = "application/vnd.github.mister-fantastic-preview+json"
@@ -119,6 +119,7 @@ type Client struct {
// Services used for talking to different parts of the GitHub API.
Activity *ActivityService
+ Admin *AdminService
Authorizations *AuthorizationsService
Gists *GistsService
Git *GitService
@@ -126,6 +127,7 @@ type Client struct {
Integrations *IntegrationsService
Issues *IssuesService
Organizations *OrganizationsService
+ Projects *ProjectsService
PullRequests *PullRequestsService
Repositories *RepositoriesService
Search *SearchService
@@ -154,6 +156,22 @@ type UploadOptions struct {
Name string `url:"name,omitempty"`
}
+// RawType represents type of raw format of a request instead of JSON.
+type RawType uint8
+
+const (
+ // Diff format.
+ Diff RawType = 1 + iota
+ // Patch format.
+ Patch
+)
+
+// RawOptions specifies parameters when user wants to get raw format of
+// a response instead of JSON.
+type RawOptions struct {
+ Type RawType
+}
+
// addOptions adds the parameters in opt as URL query parameters to s. opt
// must be a struct whose fields may contain "url" tags.
func addOptions(s string, opt interface{}) (string, error) {
@@ -190,6 +208,7 @@ func NewClient(httpClient *http.Client) *Client {
c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent, UploadURL: uploadURL}
c.common.client = c
c.Activity = (*ActivityService)(&c.common)
+ c.Admin = (*AdminService)(&c.common)
c.Authorizations = (*AuthorizationsService)(&c.common)
c.Gists = (*GistsService)(&c.common)
c.Git = (*GitService)(&c.common)
@@ -199,6 +218,7 @@ func NewClient(httpClient *http.Client) *Client {
c.Licenses = (*LicensesService)(&c.common)
c.Migrations = (*MigrationService)(&c.common)
c.Organizations = (*OrganizationsService)(&c.common)
+ c.Projects = (*ProjectsService)(&c.common)
c.PullRequests = (*PullRequestsService)(&c.common)
c.Reactions = (*ReactionsService)(&c.common)
c.Repositories = (*RepositoriesService)(&c.common)
@@ -499,6 +519,18 @@ func (r *RateLimitError) Error() string {
r.Response.StatusCode, r.Message, r.Rate.Reset.Time.Sub(time.Now()))
}
+// AcceptedError occurs when GitHub returns 202 Accepted response with an
+// empty body, which means a job was scheduled on the GitHub side to process
+// the information needed and cache it.
+// Technically, 202 Accepted is not a real error, it's just used to
+// indicate that results are not ready yet, but should be available soon.
+// The request can be repeated after some time.
+type AcceptedError struct{}
+
+func (*AcceptedError) Error() string {
+ return "job scheduled on GitHub side; try again later"
+}
+
// AbuseRateLimitError occurs when GitHub returns 403 Forbidden response with the
// "documentation_url" field value equal to "https://developer.github.com/v3#abuse-rate-limits".
type AbuseRateLimitError struct {
@@ -562,14 +594,19 @@ func (e *Error) Error() string {
}
// CheckResponse checks the API response for errors, and returns them if
-// present. A response is considered an error if it has a status code outside
-// the 200 range. API error responses are expected to have either no response
+// present. A response is considered an error if it has a status code outside
+// the 200 range or equal to 202 Accepted.
+// API error responses are expected to have either no response
// body, or a JSON response body that maps to ErrorResponse. Any other
// response body will be silently ignored.
//
// The error type will be *RateLimitError for rate limit exceeded errors,
+// *AcceptedError for 202 Accepted status codes,
// and *TwoFactorAuthError for two-factor authentication errors.
func CheckResponse(r *http.Response) error {
+ if r.StatusCode == http.StatusAccepted {
+ return &AcceptedError{}
+ }
if c := r.StatusCode; 200 <= c && c <= 299 {
return nil
}
diff --git a/vendor/github.com/google/go-github/github/issues.go b/vendor/github.com/google/go-github/github/issues.go
index 02c82cd..d8e7d41 100644
--- a/vendor/github.com/google/go-github/github/issues.go
+++ b/vendor/github.com/google/go-github/github/issues.go
@@ -17,6 +17,11 @@ import (
type IssuesService service
// Issue represents a GitHub issue on a repository.
+//
+// Note: As far as the GitHub API is concerned, every pull request is an issue,
+// but not every issue is a pull request. Some endpoints, events, and webhooks
+// may also return pull requests via this struct. If PullRequestLinks is nil,
+// this is an issue, and if PullRequestLinks is not nil, this is a pull request.
type Issue struct {
ID *int `json:"id,omitempty"`
Number *int `json:"number,omitempty"`
diff --git a/vendor/github.com/google/go-github/github/licenses.go b/vendor/github.com/google/go-github/github/licenses.go
index 35cd234..0b5e8b3 100644
--- a/vendor/github.com/google/go-github/github/licenses.go
+++ b/vendor/github.com/google/go-github/github/licenses.go
@@ -10,23 +10,44 @@ import "fmt"
// LicensesService handles communication with the license related
// methods of the GitHub API.
//
-// GitHub API docs: http://developer.github.com/v3/pulls/
+// GitHub API docs: https://developer.github.com/v3/licenses/
type LicensesService service
+// RepositoryLicense represents the license for a repository.
+type RepositoryLicense struct {
+ Name *string `json:"name,omitempty"`
+ Path *string `json:"path,omitempty"`
+
+ SHA *string `json:"sha,omitempty"`
+ Size *int `json:"size,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ GitURL *string `json:"git_url,omitempty"`
+ DownloadURL *string `json:"download_url,omitempty"`
+ Type *string `json:"type,omitempty"`
+ Content *string `json:"content,omitempty"`
+ Encoding *string `json:"encoding,omitempty"`
+ License *License `json:"license,omitempty"`
+}
+
+func (l RepositoryLicense) String() string {
+ return Stringify(l)
+}
+
// License represents an open source license.
type License struct {
Key *string `json:"key,omitempty"`
Name *string `json:"name,omitempty"`
URL *string `json:"url,omitempty"`
+ SPDXID *string `json:"spdx_id,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
Featured *bool `json:"featured,omitempty"`
Description *string `json:"description,omitempty"`
- Category *string `json:"category,omitempty"`
Implementation *string `json:"implementation,omitempty"`
- Required *[]string `json:"required,omitempty"`
- Permitted *[]string `json:"permitted,omitempty"`
- Forbidden *[]string `json:"forbidden,omitempty"`
+ Permissions *[]string `json:"permissions,omitempty"`
+ Conditions *[]string `json:"conditions,omitempty"`
+ Limitations *[]string `json:"limitations,omitempty"`
Body *string `json:"body,omitempty"`
}
diff --git a/vendor/github.com/google/go-github/github/messages.go b/vendor/github.com/google/go-github/github/messages.go
index eedb190..5e167ee 100644
--- a/vendor/github.com/google/go-github/github/messages.go
+++ b/vendor/github.com/google/go-github/github/messages.go
@@ -49,10 +49,14 @@ var (
"integration_installation_repositories": "IntegrationInstallationRepositoriesEvent",
"issue_comment": "IssueCommentEvent",
"issues": "IssuesEvent",
+ "label": "LabelEvent",
"member": "MemberEvent",
"membership": "MembershipEvent",
+ "milestone": "MilestoneEvent",
"page_build": "PageBuildEvent",
+ "ping": "PingEvent",
"public": "PublicEvent",
+ "pull_request_review": "PullRequestReviewEvent",
"pull_request_review_comment": "PullRequestReviewCommentEvent",
"pull_request": "PullRequestEvent",
"push": "PushEvent",
@@ -168,9 +172,9 @@ func WebHookType(r *http.Request) string {
// event, err := github.ParseWebHook(github.WebHookType(r), payload)
// if err != nil { ... }
// switch event := event.(type) {
-// case CommitCommentEvent:
+// case *github.CommitCommentEvent:
// processCommitCommentEvent(event)
-// case CreateEvent:
+// case *github.CreateEvent:
// processCreateEvent(event)
// ...
// }
diff --git a/vendor/github.com/google/go-github/github/projects.go b/vendor/github.com/google/go-github/github/projects.go
new file mode 100644
index 0000000..2330056
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/projects.go
@@ -0,0 +1,417 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "fmt"
+
+// ProjectsService provides access to the projects functions in the
+// GitHub API.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/
+type ProjectsService service
+
+// Project represents a GitHub Project.
+type Project struct {
+ ID *int `json:"id,omitempty"`
+ URL *string `json:"url,omitempty"`
+ OwnerURL *string `json:"owner_url,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Body *string `json:"body,omitempty"`
+ Number *int `json:"number,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+
+ // The User object that generated the project.
+ Creator *User `json:"creator,omitempty"`
+}
+
+func (p Project) String() string {
+ return Stringify(p)
+}
+
+// GetProject gets a GitHub Project for a repo.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#get-a-project
+func (s *ProjectsService) GetProject(id int) (*Project, *Response, error) {
+ u := fmt.Sprintf("projects/%v", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ project := &Project{}
+ resp, err := s.client.Do(req, project)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return project, resp, err
+}
+
+// ProjectOptions specifies the parameters to the
+// RepositoriesService.CreateProject and
+// ProjectsService.UpdateProject methods.
+type ProjectOptions struct {
+ // The name of the project. (Required for creation; optional for update.)
+ Name string `json:"name,omitempty"`
+ // The body of the project. (Optional.)
+ Body string `json:"body,omitempty"`
+}
+
+// UpdateProject updates a repository project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#update-a-project
+func (s *ProjectsService) UpdateProject(id int, opt *ProjectOptions) (*Project, *Response, error) {
+ u := fmt.Sprintf("projects/%v", id)
+ req, err := s.client.NewRequest("PATCH", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ project := &Project{}
+ resp, err := s.client.Do(req, project)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return project, resp, err
+}
+
+// DeleteProject deletes a GitHub Project from a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#delete-a-project
+func (s *ProjectsService) DeleteProject(id int) (*Response, error) {
+ u := fmt.Sprintf("projects/%v", id)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// ProjectColumn represents a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/projects/
+type ProjectColumn struct {
+ ID *int `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ ProjectURL *string `json:"project_url,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+}
+
+// ListProjectColumns lists the columns of a GitHub Project for a repo.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#list-project-columns
+func (s *ProjectsService) ListProjectColumns(projectID int, opt *ListOptions) ([]*ProjectColumn, *Response, error) {
+ u := fmt.Sprintf("projects/%v/columns", projectID)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ columns := []*ProjectColumn{}
+ resp, err := s.client.Do(req, &columns)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return columns, resp, err
+}
+
+// GetProjectColumn gets a column of a GitHub Project for a repo.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#get-a-project-column
+func (s *ProjectsService) GetProjectColumn(id int) (*ProjectColumn, *Response, error) {
+ u := fmt.Sprintf("projects/columns/%v", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ column := &ProjectColumn{}
+ resp, err := s.client.Do(req, column)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return column, resp, err
+}
+
+// ProjectColumnOptions specifies the parameters to the
+// ProjectsService.CreateProjectColumn and
+// ProjectsService.UpdateProjectColumn methods.
+type ProjectColumnOptions struct {
+ // The name of the project column. (Required for creation and update.)
+ Name string `json:"name"`
+}
+
+// CreateProjectColumn creates a column for the specified (by number) project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#create-a-project-column
+func (s *ProjectsService) CreateProjectColumn(projectID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
+ u := fmt.Sprintf("projects/%v/columns", projectID)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ column := &ProjectColumn{}
+ resp, err := s.client.Do(req, column)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return column, resp, err
+}
+
+// UpdateProjectColumn updates a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#update-a-project-column
+func (s *ProjectsService) UpdateProjectColumn(columnID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
+ u := fmt.Sprintf("projects/columns/%v", columnID)
+ req, err := s.client.NewRequest("PATCH", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ column := &ProjectColumn{}
+ resp, err := s.client.Do(req, column)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return column, resp, err
+}
+
+// DeleteProjectColumn deletes a column from a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#delete-a-project-column
+func (s *ProjectsService) DeleteProjectColumn(columnID int) (*Response, error) {
+ u := fmt.Sprintf("projects/columns/%v", columnID)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// ProjectColumnMoveOptions specifies the parameters to the
+// ProjectsService.MoveProjectColumn method.
+type ProjectColumnMoveOptions struct {
+ // Position can be one of "first", "last", or "after:<column-id>", where
+ // <column-id> is the ID of a column in the same project. (Required.)
+ Position string `json:"position"`
+}
+
+// MoveProjectColumn moves a column within a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/columns/#move-a-project-column
+func (s *ProjectsService) MoveProjectColumn(columnID int, opt *ProjectColumnMoveOptions) (*Response, error) {
+ u := fmt.Sprintf("projects/columns/%v/moves", columnID)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// ProjectCard represents a card in a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/projects/
+type ProjectCard struct {
+ ColumnURL *string `json:"column_url,omitempty"`
+ ContentURL *string `json:"content_url,omitempty"`
+ ID *int `json:"id,omitempty"`
+ Note *string `json:"note,omitempty"`
+ CreatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+}
+
+// ListProjectCards lists the cards in a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#list-project-cards
+func (s *ProjectsService) ListProjectCards(columnID int, opt *ListOptions) ([]*ProjectCard, *Response, error) {
+ u := fmt.Sprintf("projects/columns/%v/cards", columnID)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ cards := []*ProjectCard{}
+ resp, err := s.client.Do(req, &cards)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return cards, resp, err
+}
+
+// GetProjectCard gets a card in a column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card
+func (s *ProjectsService) GetProjectCard(columnID int) (*ProjectCard, *Response, error) {
+ u := fmt.Sprintf("projects/columns/cards/%v", columnID)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ card := &ProjectCard{}
+ resp, err := s.client.Do(req, card)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return card, resp, err
+}
+
+// ProjectCardOptions specifies the parameters to the
+// ProjectsService.CreateProjectCard and
+// ProjectsService.UpdateProjectCard methods.
+type ProjectCardOptions struct {
+ // The note of the card. Note and ContentID are mutually exclusive.
+ Note string `json:"note,omitempty"`
+ // The ID (not Number) of the Issue or Pull Request to associate with this card.
+ // Note and ContentID are mutually exclusive.
+ ContentID int `json:"content_id,omitempty"`
+ // The type of content to associate with this card. Possible values are: "Issue", "PullRequest".
+ ContentType string `json:"content_type,omitempty"`
+}
+
+// CreateProjectCard creates a card in the specified column of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#create-a-project-card
+func (s *ProjectsService) CreateProjectCard(columnID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) {
+ u := fmt.Sprintf("projects/columns/%v/cards", columnID)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ card := &ProjectCard{}
+ resp, err := s.client.Do(req, card)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return card, resp, err
+}
+
+// UpdateProjectCard updates a card of a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#update-a-project-card
+func (s *ProjectsService) UpdateProjectCard(cardID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) {
+ u := fmt.Sprintf("projects/columns/cards/%v", cardID)
+ req, err := s.client.NewRequest("PATCH", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ card := &ProjectCard{}
+ resp, err := s.client.Do(req, card)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return card, resp, err
+}
+
+// DeleteProjectCard deletes a card from a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#delete-a-project-card
+func (s *ProjectsService) DeleteProjectCard(cardID int) (*Response, error) {
+ u := fmt.Sprintf("projects/columns/cards/%v", cardID)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
+
+// ProjectCardMoveOptions specifies the parameters to the
+// ProjectsService.MoveProjectCard method.
+type ProjectCardMoveOptions struct {
+ // Position can be one of "top", "bottom", or "after:<card-id>", where
+ // <card-id> is the ID of a card in the same project.
+ Position string `json:"position"`
+ // ColumnID is the ID of a column in the same project. Note that ColumnID
+ // is required when using Position "after:<card-id>" when that card is in
+ // another column; otherwise it is optional.
+ ColumnID int `json:"column_id,omitempty"`
+}
+
+// MoveProjectCard moves a card within a GitHub Project.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#move-a-project-card
+func (s *ProjectsService) MoveProjectCard(cardID int, opt *ProjectCardMoveOptions) (*Response, error) {
+ u := fmt.Sprintf("projects/columns/cards/%v/moves", cardID)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/pulls.go b/vendor/github.com/google/go-github/github/pulls.go
index 3c88365..51c6ccb 100644
--- a/vendor/github.com/google/go-github/github/pulls.go
+++ b/vendor/github.com/google/go-github/github/pulls.go
@@ -6,6 +6,7 @@
package github
import (
+ "bytes"
"fmt"
"time"
)
@@ -134,6 +135,32 @@ func (s *PullRequestsService) Get(owner string, repo string, number int) (*PullR
return pull, resp, err
}
+// GetRaw gets raw (diff or patch) format of a pull request.
+func (s *PullRequestsService) GetRaw(owner string, repo string, number int, opt RawOptions) (string, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return "", nil, err
+ }
+
+ switch opt.Type {
+ case Diff:
+ req.Header.Set("Accept", mediaTypeV3Diff)
+ case Patch:
+ req.Header.Set("Accept", mediaTypeV3Patch)
+ default:
+ return "", nil, fmt.Errorf("unsupported raw type %d", opt.Type)
+ }
+
+ ret := new(bytes.Buffer)
+ resp, err := s.client.Do(req, ret)
+ if err != nil {
+ return "", resp, err
+ }
+
+ return ret.String(), resp, err
+}
+
// NewPullRequest represents a new pull request to be created.
type NewPullRequest struct {
Title *string `json:"title,omitempty"`
@@ -253,32 +280,41 @@ type PullRequestMergeResult struct {
// PullRequestOptions lets you define how a pull request will be merged.
type PullRequestOptions struct {
- Squash bool
+ CommitTitle string // Extra detail to append to automatic commit message. (Optional.)
+ SHA string // SHA that pull request head must match to allow merge. (Optional.)
+
+ // The merge method to use. Possible values include: "merge", "squash", and "rebase" with the default being merge. (Optional.)
+ MergeMethod string
}
type pullRequestMergeRequest struct {
- CommitMessage *string `json:"commit_message"`
- Squash *bool `json:"squash,omitempty"`
+ CommitMessage string `json:"commit_message"`
+ CommitTitle string `json:"commit_title,omitempty"`
+ MergeMethod string `json:"merge_method,omitempty"`
+ SHA string `json:"sha,omitempty"`
}
// Merge a pull request (Merge Button™).
+// commitMessage is the title for the automatic commit message.
//
// GitHub API docs: https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-buttontrade
func (s *PullRequestsService) Merge(owner string, repo string, number int, commitMessage string, options *PullRequestOptions) (*PullRequestMergeResult, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number)
- pullRequestBody := &pullRequestMergeRequest{CommitMessage: &commitMessage}
+ pullRequestBody := &pullRequestMergeRequest{CommitMessage: commitMessage}
if options != nil {
- pullRequestBody.Squash = &options.Squash
+ pullRequestBody.CommitTitle = options.CommitTitle
+ pullRequestBody.MergeMethod = options.MergeMethod
+ pullRequestBody.SHA = options.SHA
}
req, err := s.client.NewRequest("PUT", u, pullRequestBody)
-
- // TODO: This header will be unnecessary when the API is no longer in preview.
- req.Header.Set("Accept", mediaTypeSquashPreview)
if err != nil {
return nil, nil, err
}
+ // TODO: This header will be unnecessary when the API is no longer in preview.
+ req.Header.Set("Accept", mediaTypeSquashPreview)
+
mergeResult := new(PullRequestMergeResult)
resp, err := s.client.Do(req, mergeResult)
if err != nil {
diff --git a/vendor/github.com/google/go-github/github/pulls_reviews.go b/vendor/github.com/google/go-github/github/pulls_reviews.go
new file mode 100644
index 0000000..ae3cdd4
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/pulls_reviews.go
@@ -0,0 +1,19 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import "time"
+
+// PullRequestReview represents a review of a pull request.
+type PullRequestReview struct {
+ ID *int `json:"id,omitempty"`
+ User *User `json:"user,omitempty"`
+ Body *string `json:"body,omitempty"`
+ SubmittedAt *time.Time `json:"submitted_at,omitempty"`
+
+ // State can be "approved", "rejected", or "commented".
+ State *string `json:"state,omitempty"`
+}
diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go
index 98e4ac5..040cd31 100644
--- a/vendor/github.com/google/go-github/github/repos.go
+++ b/vendor/github.com/google/go-github/github/repos.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "fmt"
+ "strings"
+)
// RepositoriesService handles communication with the repository related
// methods of the GitHub API.
@@ -46,6 +49,9 @@ type Repository struct {
Source *Repository `json:"source,omitempty"`
Organization *Organization `json:"organization,omitempty"`
Permissions *map[string]bool `json:"permissions,omitempty"`
+ AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"`
+ AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"`
+ AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"`
// Only provided when using RepositoriesService.Get while in preview
License *License `json:"license,omitempty"`
@@ -286,7 +292,8 @@ func (s *RepositoriesService) Get(owner, repo string) (*Repository, *Response, e
// TODO: remove custom Accept header when the license support fully launches
// https://developer.github.com/v3/licenses/#get-a-repositorys-license
- req.Header.Set("Accept", mediaTypeLicensesPreview)
+ acceptHeaders := []string{mediaTypeLicensesPreview, mediaTypeSquashPreview}
+ req.Header.Set("Accept", strings.Join(acceptHeaders, ", "))
repository := new(Repository)
resp, err := s.client.Do(req, repository)
@@ -330,6 +337,9 @@ func (s *RepositoriesService) Edit(owner, repo string, repository *Repository) (
return nil, nil, err
}
+ // TODO: Remove this preview header after API is fully vetted.
+ req.Header.Add("Accept", mediaTypeSquashPreview)
+
r := new(Repository)
resp, err := s.client.Do(req, r)
if err != nil {
@@ -491,29 +501,54 @@ func (s *RepositoriesService) ListTags(owner string, repo string, opt *ListOptio
// Branch represents a repository branch
type Branch struct {
- Name *string `json:"name,omitempty"`
- Commit *Commit `json:"commit,omitempty"`
- Protection *Protection `json:"protection,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Commit *RepositoryCommit `json:"commit,omitempty"`
+ Protected *bool `json:"protected,omitempty"`
}
-// Protection represents a repository branch's protection
+// Protection represents a repository branch's protection.
type Protection struct {
- Enabled *bool `json:"enabled,omitempty"`
- RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks,omitempty"`
+ RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"`
+ Restrictions *BranchRestrictions `json:"restrictions"`
}
-// RequiredStatusChecks represents the protection status of a individual branch
+// ProtectionRequest represents a request to create/edit a branch's protection.
+type ProtectionRequest struct {
+ RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"`
+ Restrictions *BranchRestrictionsRequest `json:"restrictions"`
+}
+
+// RequiredStatusChecks represents the protection status of a individual branch.
type RequiredStatusChecks struct {
- // Who required status checks apply to.
- // Possible values are:
- // off
- // non_admins
- // everyone
- EnforcementLevel *string `json:"enforcement_level,omitempty"`
- // The list of status checks which are required
+ // Enforce required status checks for repository administrators.
+ IncludeAdmins *bool `json:"include_admins,omitempty"`
+ // Require branches to be up to date before merging.
+ Strict *bool `json:"strict,omitempty"`
+ // The list of status checks to require in order to merge into this
+ // branch.
Contexts *[]string `json:"contexts,omitempty"`
}
+// BranchRestrictions represents the restriction that only certain users or
+// teams may push to a branch.
+type BranchRestrictions struct {
+ // The list of user logins with push access.
+ Users []*User `json:"users,omitempty"`
+ // The list of team slugs with push access.
+ Teams []*Team `json:"teams,omitempty"`
+}
+
+// BranchRestrictionsRequest represents the request to create/edit the
+// restriction that only certain users or teams may push to a branch. It is
+// separate from BranchRestrictions above because the request structure is
+// different from the response structure.
+type BranchRestrictionsRequest struct {
+ // The list of user logins with push access.
+ Users *[]string `json:"users,omitempty"`
+ // The list of team slugs with push access.
+ Teams *[]string `json:"teams,omitempty"`
+}
+
// ListBranches lists branches for the specified repository.
//
// GitHub API docs: http://developer.github.com/v3/repos/#list-branches
@@ -529,6 +564,7 @@ func (s *RepositoriesService) ListBranches(owner string, repo string, opt *ListO
return nil, nil, err
}
+ // TODO: remove custom Accept header when this API fully launches
req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
branches := new([]*Branch)
@@ -550,6 +586,7 @@ func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *R
return nil, nil, err
}
+ // TODO: remove custom Accept header when this API fully launches
req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
b := new(Branch)
@@ -561,42 +598,81 @@ func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *R
return b, resp, err
}
-// EditBranch edits the branch (currently only Branch Protection)
+// GetBranchProtection gets the protection of a given branch.
//
-// GitHub API docs: https://developer.github.com/v3/repos/#enabling-and-disabling-branch-protection
-func (s *RepositoriesService) EditBranch(owner, repo, branchName string, branch *Branch) (*Branch, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branchName)
- req, err := s.client.NewRequest("PATCH", u, branch)
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-branch-protection
+func (s *RepositoriesService) GetBranchProtection(owner, repo, branch string) (*Protection, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch)
+ req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
+ // TODO: remove custom Accept header when this API fully launches
req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
- b := new(Branch)
- resp, err := s.client.Do(req, b)
+ p := new(Protection)
+ resp, err := s.client.Do(req, p)
if err != nil {
return nil, resp, err
}
- return b, resp, err
+ return p, resp, err
+}
+
+// UpdateBranchProtection updates the protection of a given branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-branch-protection
+func (s *RepositoriesService) UpdateBranchProtection(owner, repo, branch string, preq *ProtectionRequest) (*Protection, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch)
+ req, err := s.client.NewRequest("PUT", u, preq)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ p := new(Protection)
+ resp, err := s.client.Do(req, p)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return p, resp, err
+}
+
+// RemoveBranchProtection removes the protection of a given branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-branch-protection
+func (s *RepositoriesService) RemoveBranchProtection(owner, repo, branch string) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ return s.client.Do(req, nil)
}
// License gets the contents of a repository's license if one is detected.
//
// GitHub API docs: https://developer.github.com/v3/licenses/#get-the-contents-of-a-repositorys-license
-func (s *RepositoriesService) License(owner, repo string) (*License, *Response, error) {
+func (s *RepositoriesService) License(owner, repo string) (*RepositoryLicense, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/license", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
- r := &Repository{}
+ r := &RepositoryLicense{}
resp, err := s.client.Do(req, r)
if err != nil {
return nil, resp, err
}
- return r.License, resp, err
+ return r, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_commits.go b/vendor/github.com/google/go-github/github/repos_commits.go
index c1359f4..22e8fca 100644
--- a/vendor/github.com/google/go-github/github/repos_commits.go
+++ b/vendor/github.com/google/go-github/github/repos_commits.go
@@ -20,7 +20,6 @@ type RepositoryCommit struct {
Author *User `json:"author,omitempty"`
Committer *User `json:"committer,omitempty"`
Parents []Commit `json:"parents,omitempty"`
- Message *string `json:"message,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
URL *string `json:"url,omitempty"`
CommentsURL *string `json:"comments_url,omitempty"`
@@ -48,13 +47,16 @@ func (c CommitStats) String() string {
// CommitFile represents a file modified in a commit.
type CommitFile struct {
- SHA *string `json:"sha,omitempty"`
- Filename *string `json:"filename,omitempty"`
- Additions *int `json:"additions,omitempty"`
- Deletions *int `json:"deletions,omitempty"`
- Changes *int `json:"changes,omitempty"`
- Status *string `json:"status,omitempty"`
- Patch *string `json:"patch,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ Filename *string `json:"filename,omitempty"`
+ Additions *int `json:"additions,omitempty"`
+ Deletions *int `json:"deletions,omitempty"`
+ Changes *int `json:"changes,omitempty"`
+ Status *string `json:"status,omitempty"`
+ Patch *string `json:"patch,omitempty"`
+ BlobURL *string `json:"blob_url,omitempty"`
+ RawURL *string `json:"raw_url,omitempty"`
+ ContentsURL *string `json:"contents_url,omitempty"`
}
func (c CommitFile) String() string {
diff --git a/vendor/github.com/google/go-github/github/repos_contents.go b/vendor/github.com/google/go-github/github/repos_contents.go
index ebf4d04..7b08cf0 100644
--- a/vendor/github.com/google/go-github/github/repos_contents.go
+++ b/vendor/github.com/google/go-github/github/repos_contents.go
@@ -267,8 +267,12 @@ func (s *RepositoriesService) GetArchiveLink(owner, repo string, archiveformat a
} else {
resp, err = s.client.client.Transport.RoundTrip(req)
}
- if err != nil || resp.StatusCode != http.StatusFound {
- return nil, newResponse(resp), err
+ if err != nil {
+ return nil, nil, err
+ }
+ resp.Body.Close()
+ if resp.StatusCode != http.StatusFound {
+ return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
}
parsedURL, err := url.Parse(resp.Header.Get("Location"))
return parsedURL, newResponse(resp), err
diff --git a/vendor/github.com/google/go-github/github/repos_deployments.go b/vendor/github.com/google/go-github/github/repos_deployments.go
index f3272b0..4b40fbe 100644
--- a/vendor/github.com/google/go-github/github/repos_deployments.go
+++ b/vendor/github.com/google/go-github/github/repos_deployments.go
@@ -82,6 +82,27 @@ func (s *RepositoriesService) ListDeployments(owner, repo string, opt *Deploymen
return *deployments, resp, err
}
+// GetDeployment returns a single deployment of a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/deployments/
+// Note: GetDeployment uses the undocumented GitHub API endpoint /repos/:owner/:repo/deployments/:id.
+func (s *RepositoriesService) GetDeployment(owner, repo string, deploymentID int) (*Deployment, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/deployments/%v", owner, repo, deploymentID)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ deployment := new(Deployment)
+ resp, err := s.client.Do(req, deployment)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return deployment, resp, err
+}
+
// CreateDeployment creates a new deployment for a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment
diff --git a/vendor/github.com/google/go-github/github/repos_forks.go b/vendor/github.com/google/go-github/github/repos_forks.go
index 7ef4cb3..c88f3d3 100644
--- a/vendor/github.com/google/go-github/github/repos_forks.go
+++ b/vendor/github.com/google/go-github/github/repos_forks.go
@@ -50,6 +50,12 @@ type RepositoryCreateForkOptions struct {
// CreateFork creates a fork of the specified repository.
//
+// This method might return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing creating the fork in a background task.
+// A follow up request, after a delay of a second or so, should result
+// in a successful request.
+//
// GitHub API docs: https://developer.github.com/v3/repos/forks/#create-a-fork
func (s *RepositoriesService) CreateFork(owner, repo string, opt *RepositoryCreateForkOptions) (*Repository, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/forks", owner, repo)
diff --git a/vendor/github.com/google/go-github/github/repos_pages.go b/vendor/github.com/google/go-github/github/repos_pages.go
index ccd24f3..ddd8301 100644
--- a/vendor/github.com/google/go-github/github/repos_pages.go
+++ b/vendor/github.com/google/go-github/github/repos_pages.go
@@ -30,13 +30,13 @@ type PagesBuild struct {
Commit *string `json:"commit,omitempty"`
Duration *int `json:"duration,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"created_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
}
// GetPagesInfo fetches information about a GitHub Pages site.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#get-information-about-a-pages-site
-func (s *RepositoriesService) GetPagesInfo(owner string, repo string) (*Pages, *Response, error) {
+func (s *RepositoriesService) GetPagesInfo(owner, repo string) (*Pages, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -58,7 +58,7 @@ func (s *RepositoriesService) GetPagesInfo(owner string, repo string) (*Pages, *
// ListPagesBuilds lists the builds for a GitHub Pages site.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-pages-builds
-func (s *RepositoriesService) ListPagesBuilds(owner string, repo string) ([]*PagesBuild, *Response, error) {
+func (s *RepositoriesService) ListPagesBuilds(owner, repo string) ([]*PagesBuild, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -77,7 +77,7 @@ func (s *RepositoriesService) ListPagesBuilds(owner string, repo string) ([]*Pag
// GetLatestPagesBuild fetches the latest build information for a GitHub pages site.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-latest-pages-build
-func (s *RepositoriesService) GetLatestPagesBuild(owner string, repo string) (*PagesBuild, *Response, error) {
+func (s *RepositoriesService) GetLatestPagesBuild(owner, repo string) (*PagesBuild, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages/builds/latest", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -93,10 +93,29 @@ func (s *RepositoriesService) GetLatestPagesBuild(owner string, repo string) (*P
return build, resp, err
}
+// GetPageBuild fetches the specific build information for a GitHub pages site.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-a-specific-pages-build
+func (s *RepositoriesService) GetPageBuild(owner, repo string, id int) (*PagesBuild, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pages/builds/%v", owner, repo, id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ build := new(PagesBuild)
+ resp, err := s.client.Do(req, build)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return build, resp, err
+}
+
// RequestPageBuild requests a build of a GitHub Pages site without needing to push new commit.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#request-a-page-build
-func (s *RepositoriesService) RequestPageBuild(owner string, repo string) (*PagesBuild, *Response, error) {
+func (s *RepositoriesService) RequestPageBuild(owner, repo string) (*PagesBuild, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
req, err := s.client.NewRequest("POST", u, nil)
if err != nil {
diff --git a/vendor/github.com/google/go-github/github/repos_projects.go b/vendor/github.com/google/go-github/github/repos_projects.go
index e37e220..137f89d 100644
--- a/vendor/github.com/google/go-github/github/repos_projects.go
+++ b/vendor/github.com/google/go-github/github/repos_projects.go
@@ -5,34 +5,11 @@
package github
-import (
- "fmt"
-)
-
-// Project represents a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/
-type Project struct {
- ID *int `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
- OwnerURL *string `json:"owner_url,omitempty"`
- Name *string `json:"name,omitempty"`
- Body *string `json:"body,omitempty"`
- Number *int `json:"number,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
-
- // The User object that generated the project.
- Creator *User `json:"creator,omitempty"`
-}
-
-func (p Project) String() string {
- return Stringify(p)
-}
+import "fmt"
// ListProjects lists the projects for a repo.
//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#list-projects
+// GitHub API docs: https://developer.github.com/v3/projects/#list-repository-projects
func (s *RepositoriesService) ListProjects(owner, repo string, opt *ListOptions) ([]*Project, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/projects", owner, repo)
u, err := addOptions(u, opt)
@@ -57,44 +34,12 @@ func (s *RepositoriesService) ListProjects(owner, repo string, opt *ListOptions)
return projects, resp, err
}
-// GetProject gets a GitHub Project for a repo.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#get-a-project
-func (s *RepositoriesService) GetProject(owner, repo string, number int) (*Project, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/%v", owner, repo, number)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- project := &Project{}
- resp, err := s.client.Do(req, project)
- if err != nil {
- return nil, resp, err
- }
-
- return project, resp, err
-}
-
-// ProjectOptions specifies the parameters to the
-// RepositoriesService.CreateProject and
-// RepositoriesService.UpdateProject methods.
-type ProjectOptions struct {
- // The name of the project. (Required for creation; optional for update.)
- Name string `json:"name,omitempty"`
- // The body of the project. (Optional.)
- Body string `json:"body,omitempty"`
-}
-
// CreateProject creates a GitHub Project for the specified repository.
//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#create-a-project
-func (s *RepositoriesService) CreateProject(owner, repo string, projectOptions *ProjectOptions) (*Project, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/projects/#create-a-repository-project
+func (s *RepositoriesService) CreateProject(owner, repo string, opt *ProjectOptions) (*Project, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/projects", owner, repo)
- req, err := s.client.NewRequest("POST", u, projectOptions)
+ req, err := s.client.NewRequest("POST", u, opt)
if err != nil {
return nil, nil, err
}
@@ -110,355 +55,3 @@ func (s *RepositoriesService) CreateProject(owner, repo string, projectOptions *
return project, resp, err
}
-
-// UpdateProject updates a repository project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#update-a-project
-func (s *RepositoriesService) UpdateProject(owner, repo string, number int, projectOptions *ProjectOptions) (*Project, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/%v", owner, repo, number)
- req, err := s.client.NewRequest("PATCH", u, projectOptions)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- project := &Project{}
- resp, err := s.client.Do(req, project)
- if err != nil {
- return nil, resp, err
- }
-
- return project, resp, err
-}
-
-// DeleteProject deletes a GitHub Project from a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#delete-a-project
-func (s *RepositoriesService) DeleteProject(owner, repo string, number int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/%v", owner, repo, number)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(req, nil)
-}
-
-// ProjectColumn represents a column of a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/
-type ProjectColumn struct {
- ID *int `json:"id,omitempty"`
- Name *string `json:"name,omitempty"`
- ProjectURL *string `json:"project_url,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
-}
-
-// ListProjectColumns lists the columns of a GitHub Project for a repo.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#list-columns
-func (s *RepositoriesService) ListProjectColumns(owner, repo string, number int, opt *ListOptions) ([]*ProjectColumn, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/%v/columns", owner, repo, number)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- columns := []*ProjectColumn{}
- resp, err := s.client.Do(req, &columns)
- if err != nil {
- return nil, resp, err
- }
-
- return columns, resp, err
-}
-
-// GetProjectColumn gets a column of a GitHub Project for a repo.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#get-a-column
-func (s *RepositoriesService) GetProjectColumn(owner, repo string, columnID int) (*ProjectColumn, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/%v", owner, repo, columnID)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- column := &ProjectColumn{}
- resp, err := s.client.Do(req, column)
- if err != nil {
- return nil, resp, err
- }
-
- return column, resp, err
-}
-
-// ProjectColumnOptions specifies the parameters to the
-// RepositoriesService.CreateProjectColumn and
-// RepositoriesService.UpdateProjectColumn methods.
-type ProjectColumnOptions struct {
- // The name of the project column. (Required for creation and update.)
- Name string `json:"name"`
-}
-
-// CreateProjectColumn creates a column for the specified (by number) project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#create-a-column
-func (s *RepositoriesService) CreateProjectColumn(owner, repo string, number int, columnOptions *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/%v/columns", owner, repo, number)
- req, err := s.client.NewRequest("POST", u, columnOptions)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- column := &ProjectColumn{}
- resp, err := s.client.Do(req, column)
- if err != nil {
- return nil, resp, err
- }
-
- return column, resp, err
-}
-
-// UpdateProjectColumn updates a column of a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#update-a-column
-func (s *RepositoriesService) UpdateProjectColumn(owner, repo string, columnID int, columnOptions *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/%v", owner, repo, columnID)
- req, err := s.client.NewRequest("PATCH", u, columnOptions)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- column := &ProjectColumn{}
- resp, err := s.client.Do(req, column)
- if err != nil {
- return nil, resp, err
- }
-
- return column, resp, err
-}
-
-// DeleteProjectColumn deletes a column from a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#delete-a-column
-func (s *RepositoriesService) DeleteProjectColumn(owner, repo string, columnID int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/%v", owner, repo, columnID)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(req, nil)
-}
-
-// ProjectColumnMoveOptions specifies the parameters to the
-// RepositoriesService.MoveProjectColumn method.
-type ProjectColumnMoveOptions struct {
- // Position can be one of "first", "last", or "after:<column-id>", where
- // <column-id> is the ID of a column in the same project. (Required.)
- Position string `json:"position"`
-}
-
-// MoveProjectColumn moves a column within a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#move-a-column
-func (s *RepositoriesService) MoveProjectColumn(owner, repo string, columnID int, moveOptions *ProjectColumnMoveOptions) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/%v/moves", owner, repo, columnID)
- req, err := s.client.NewRequest("POST", u, moveOptions)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(req, nil)
-}
-
-// ProjectCard represents a card in a column of a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/
-type ProjectCard struct {
- ColumnURL *string `json:"column_url,omitempty"`
- ContentURL *string `json:"content_url,omitempty"`
- ID *int `json:"id,omitempty"`
- Note *string `json:"note,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
-}
-
-// ListProjectCards lists the cards in a column of a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#list-projects-cards
-func (s *RepositoriesService) ListProjectCards(owner, repo string, columnID int, opt *ListOptions) ([]*ProjectCard, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/%v/cards", owner, repo, columnID)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- cards := []*ProjectCard{}
- resp, err := s.client.Do(req, &cards)
- if err != nil {
- return nil, resp, err
- }
-
- return cards, resp, err
-}
-
-// GetProjectCard gets a card in a column of a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#get-a-project-card
-func (s *RepositoriesService) GetProjectCard(owner, repo string, columnID int) (*ProjectCard, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/cards/%v", owner, repo, columnID)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- card := &ProjectCard{}
- resp, err := s.client.Do(req, card)
- if err != nil {
- return nil, resp, err
- }
-
- return card, resp, err
-}
-
-// ProjectCardOptions specifies the parameters to the
-// RepositoriesService.CreateProjectCard and
-// RepositoriesService.UpdateProjectCard methods.
-type ProjectCardOptions struct {
- // The note of the card. Note and ContentID are mutually exclusive.
- Note string `json:"note,omitempty"`
- // The ID (not Number) of the Issue or Pull Request to associate with this card.
- // Note and ContentID are mutually exclusive.
- ContentID int `json:"content_id,omitempty"`
- // The type of content to associate with this card. Possible values are: "Issue", "PullRequest".
- ContentType string `json:"content_type,omitempty"`
-}
-
-// CreateProjectCard creates a card in the specified column of a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#create-a-project-card
-func (s *RepositoriesService) CreateProjectCard(owner, repo string, columnID int, cardOptions *ProjectCardOptions) (*ProjectCard, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/%v/cards", owner, repo, columnID)
- req, err := s.client.NewRequest("POST", u, cardOptions)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- card := &ProjectCard{}
- resp, err := s.client.Do(req, card)
- if err != nil {
- return nil, resp, err
- }
-
- return card, resp, err
-}
-
-// UpdateProjectCard updates a card of a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#update-a-project-card
-func (s *RepositoriesService) UpdateProjectCard(owner, repo string, cardID int, cardOptions *ProjectCardOptions) (*ProjectCard, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/cards/%v", owner, repo, cardID)
- req, err := s.client.NewRequest("PATCH", u, cardOptions)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- card := &ProjectCard{}
- resp, err := s.client.Do(req, card)
- if err != nil {
- return nil, resp, err
- }
-
- return card, resp, err
-}
-
-// DeleteProjectCard deletes a card from a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#delete-a-project-card
-func (s *RepositoriesService) DeleteProjectCard(owner, repo string, cardID int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/cards/%v", owner, repo, cardID)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(req, nil)
-}
-
-// ProjectCardMoveOptions specifies the parameters to the
-// RepositoriesService.MoveProjectCard method.
-type ProjectCardMoveOptions struct {
- // Position can be one of "top", "bottom", or "after:<card-id>", where
- // <card-id> is the ID of a card in the same project.
- Position string `json:"position"`
- // ColumnID is the ID of a column in the same project. Note that ColumnID
- // is required when using Position "after:<card-id>" when that card is in
- // another column; otherwise it is optional.
- ColumnID int `json:"column_id,omitempty"`
-}
-
-// MoveProjectCard moves a card within a GitHub Project.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/#move-a-project-card
-func (s *RepositoriesService) MoveProjectCard(owner, repo string, cardID int, moveOptions *ProjectCardMoveOptions) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/projects/columns/cards/%v/moves", owner, repo, cardID)
- req, err := s.client.NewRequest("POST", u, moveOptions)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeProjectsPreview)
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/github.com/google/go-github/github/repos_stats.go b/vendor/github.com/google/go-github/github/repos_stats.go
index e4f75a5..8657bd7 100644
--- a/vendor/github.com/google/go-github/github/repos_stats.go
+++ b/vendor/github.com/google/go-github/github/repos_stats.go
@@ -39,8 +39,8 @@ func (w WeeklyStats) String() string {
// deletions and commit counts.
//
// If this is the first time these statistics are requested for the given
-// repository, this method will return a non-nil error and a status code of
-// 202. This is because this is the status that github returns to signify that
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
// it is now computing the requested statistics. A follow up request, after a
// delay of a second or so, should result in a successful request.
//
@@ -78,8 +78,8 @@ func (w WeeklyCommitActivity) String() string {
// starting on Sunday.
//
// If this is the first time these statistics are requested for the given
-// repository, this method will return a non-nil error and a status code of
-// 202. This is because this is the status that github returns to signify that
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
// it is now computing the requested statistics. A follow up request, after a
// delay of a second or so, should result in a successful request.
//
@@ -104,6 +104,12 @@ func (s *RepositoriesService) ListCommitActivity(owner, repo string) ([]*WeeklyC
// deletions pushed to a repository. Returned WeeklyStats will contain
// additions and deletions, but not total commits.
//
+// If this is the first time these statistics are requested for the given
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing the requested statistics. A follow up request, after a
+// delay of a second or so, should result in a successful request.
+//
// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#code-frequency
func (s *RepositoriesService) ListCodeFrequency(owner, repo string) ([]*WeeklyStats, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/stats/code_frequency", owner, repo)
@@ -152,11 +158,10 @@ func (r RepositoryParticipation) String() string {
// The array order is oldest week (index 0) to most recent week.
//
// If this is the first time these statistics are requested for the given
-// repository, this method will return a non-nil error and a status code
-// of 202. This is because this is the status that github returns to
-// signify that it is now computing the requested statistics. A follow
-// up request, after a delay of a second or so, should result in a
-// successful request.
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing the requested statistics. A follow up request, after a
+// delay of a second or so, should result in a successful request.
//
// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#participation
func (s *RepositoriesService) ListParticipation(owner, repo string) (*RepositoryParticipation, *Response, error) {
@@ -185,6 +190,12 @@ type PunchCard struct {
// ListPunchCard returns the number of commits per hour in each day.
//
+// If this is the first time these statistics are requested for the given
+// repository, this method will return an *AcceptedError and a status code of
+// 202. This is because this is the status that GitHub returns to signify that
+// it is now computing the requested statistics. A follow up request, after a
+// delay of a second or so, should result in a successful request.
+//
// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#punch-card
func (s *RepositoriesService) ListPunchCard(owner, repo string) ([]*PunchCard, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/stats/punch_card", owner, repo)
diff --git a/vendor/github.com/googleapis/gax-go/README.md b/vendor/github.com/googleapis/gax-go/README.md
index 38ebdcf..3cedd5b 100644
--- a/vendor/github.com/googleapis/gax-go/README.md
+++ b/vendor/github.com/googleapis/gax-go/README.md
@@ -8,4 +8,17 @@ Google API Extensions for Go (gax-go) is a set of modules which aids the
development of APIs for clients and servers based on `gRPC` and Google API
conventions.
-This project is currently experimental and not supported.
+Application code will rarely need to use this library directly,
+but the code generated automatically from API definition files can use it
+to simplify code generation and to provide more convenient and idiomatic API surface.
+
+**This project is currently experimental and not supported.**
+
+Go Versions
+===========
+This library requires Go 1.6 or above.
+
+License
+=======
+BSD - please see [LICENSE](https://github.com/googleapis/gax-go/blob/master/LICENSE)
+for more information.
diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go
index c7e4ce9..5ebedff 100644
--- a/vendor/github.com/googleapis/gax-go/gax.go
+++ b/vendor/github.com/googleapis/gax-go/gax.go
@@ -27,6 +27,14 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Package gax contains a set of modules which aid the development of APIs
+// for clients and servers based on gRPC and Google API conventions.
+//
+// Application code will rarely need to use this library directly.
+// However, code generated automatically from API definition files can use it
+// to simplify code generation and to provide more convenient and idiomatic API surfaces.
+//
+// This project is currently experimental and not supported.
package gax
const Version = "0.1.0"
diff --git a/vendor/github.com/gorilla/csrf/README.md b/vendor/github.com/gorilla/csrf/README.md
index daa3c87..8cad716 100644
--- a/vendor/github.com/gorilla/csrf/README.md
+++ b/vendor/github.com/gorilla/csrf/README.md
@@ -130,7 +130,7 @@ func main() {
r := mux.NewRouter()
api := r.PathPrefix("/api").Subrouter()
- api.HandleFunc("/user/:id", GetUser).Methods("GET")
+ api.HandleFunc("/user/{id}", GetUser).Methods("GET")
http.ListenAndServe(":8000",
csrf.Protect([]byte("32-byte-long-auth-key"))(r))
diff --git a/vendor/github.com/gorilla/csrf/csrf.go b/vendor/github.com/gorilla/csrf/csrf.go
index 926be23..cc7878f 100644
--- a/vendor/github.com/gorilla/csrf/csrf.go
+++ b/vendor/github.com/gorilla/csrf/csrf.go
@@ -89,21 +89,25 @@ type options struct {
// package main
//
// import (
-// "github.com/elithrar/protect"
+// "html/template"
+//
+// "github.com/gorilla/csrf"
// "github.com/gorilla/mux"
// )
//
+// var t = template.Must(template.New("signup_form.tmpl").Parse(form))
+//
// func main() {
-// r := mux.NewRouter()
+// r := mux.NewRouter()
//
-// mux.HandlerFunc("/signup", GetSignupForm)
-// // POST requests without a valid token will return a HTTP 403 Forbidden.
-// mux.HandlerFunc("/signup/post", PostSignupForm)
+// r.HandleFunc("/signup", GetSignupForm)
+// // POST requests without a valid token will return a HTTP 403 Forbidden.
+// r.HandleFunc("/signup/post", PostSignupForm)
//
-// // Add the middleware to your router.
-// http.ListenAndServe(":8000",
-// // Note that the authentication key provided should be 32 bytes
-// // long and persist across application restarts.
+// // Add the middleware to your router.
+// http.ListenAndServe(":8000",
+// // Note that the authentication key provided should be 32 bytes
+// // long and persist across application restarts.
// csrf.Protect([]byte("32-byte-long-auth-key"))(r))
// }
//
diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go
index 65b7de5..b1be9dc 100644
--- a/vendor/github.com/gorilla/handlers/recovery.go
+++ b/vendor/github.com/gorilla/handlers/recovery.go
@@ -6,9 +6,14 @@ import (
"runtime/debug"
)
+// RecoveryHandlerLogger is an interface used by the recovering handler to print logs.
+type RecoveryHandlerLogger interface {
+ Println(...interface{})
+}
+
type recoveryHandler struct {
handler http.Handler
- logger *log.Logger
+ logger RecoveryHandlerLogger
printStack bool
}
@@ -46,7 +51,7 @@ func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler {
// RecoveryLogger is a functional option to override
// the default logger
-func RecoveryLogger(logger *log.Logger) RecoveryOption {
+func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption {
return func(h http.Handler) {
r := h.(*recoveryHandler)
r.logger = logger
@@ -73,11 +78,11 @@ func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
h.handler.ServeHTTP(w, req)
}
-func (h recoveryHandler) log(message interface{}) {
+func (h recoveryHandler) log(v ...interface{}) {
if h.logger != nil {
- h.logger.Println(message)
+ h.logger.Println(v...)
} else {
- log.Println(message)
+ log.Println(v...)
}
if h.printStack {
diff --git a/vendor/github.com/gorilla/sessions/README.md b/vendor/github.com/gorilla/sessions/README.md
index 65e5e1b..5bb3107 100644
--- a/vendor/github.com/gorilla/sessions/README.md
+++ b/vendor/github.com/gorilla/sessions/README.md
@@ -67,12 +67,14 @@ Other implementations of the `sessions.Store` interface:
* [github.com/dsoprea/go-appengine-sessioncascade](https://github.com/dsoprea/go-appengine-sessioncascade) - Memcache/Datastore/Context in AppEngine
* [github.com/kidstuff/mongostore](https://github.com/kidstuff/mongostore) - MongoDB
* [github.com/srinathgs/mysqlstore](https://github.com/srinathgs/mysqlstore) - MySQL
+* [github.com/EnumApps/clustersqlstore](https://github.com/EnumApps/clustersqlstore) - MySQL Cluster
* [github.com/antonlindstrom/pgstore](https://github.com/antonlindstrom/pgstore) - PostgreSQL
* [github.com/boj/redistore](https://github.com/boj/redistore) - Redis
* [github.com/boj/rethinkstore](https://github.com/boj/rethinkstore) - RethinkDB
* [github.com/boj/riakstore](https://github.com/boj/riakstore) - Riak
* [github.com/michaeljs1990/sqlitestore](https://github.com/michaeljs1990/sqlitestore) - SQLite
* [github.com/wader/gormstore](https://github.com/wader/gormstore) - GORM (MySQL, PostgreSQL, SQLite)
+* [github.com/gernest/qlstore](https://github.com/gernest/qlstore) - ql
## License
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
index 00afa9b..775b6e7 100644
--- a/vendor/github.com/hashicorp/go-multierror/append.go
+++ b/vendor/github.com/hashicorp/go-multierror/append.go
@@ -18,9 +18,13 @@ func Append(err error, errs ...error) *Error {
for _, e := range errs {
switch e := e.(type) {
case *Error:
- err.Errors = append(err.Errors, e.Errors...)
+ if e != nil {
+ err.Errors = append(err.Errors, e.Errors...)
+ }
default:
- err.Errors = append(err.Errors, e)
+ if e != nil {
+ err.Errors = append(err.Errors, e)
+ }
}
}
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
index bb65a12..6c7a3cc 100644
--- a/vendor/github.com/hashicorp/go-multierror/format.go
+++ b/vendor/github.com/hashicorp/go-multierror/format.go
@@ -12,12 +12,16 @@ type ErrorFormatFunc func([]error) string
// ListFormatFunc is a basic formatter that outputs the number of errors
// that occurred along with a bullet point list of the errors.
func ListFormatFunc(es []error) string {
+ if len(es) == 1 {
+ return fmt.Sprintf("1 error occurred:\n\n* %s", es[0])
+ }
+
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %s", err)
}
return fmt.Sprintf(
- "%d error(s) occurred:\n\n%s",
+ "%d errors occurred:\n\n%s",
len(es), strings.Join(points, "\n"))
}
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
index c8a077d..0b39c1b 100644
--- a/vendor/github.com/hashicorp/hcl/decoder.go
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -91,7 +91,7 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
return d.decodeBool(name, node, result)
case reflect.Float64:
return d.decodeFloat(name, node, result)
- case reflect.Int:
+ case reflect.Int, reflect.Int32, reflect.Int64:
return d.decodeInt(name, node, result)
case reflect.Interface:
// When we see an interface, we make our own thing
@@ -164,7 +164,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
return err
}
- result.Set(reflect.ValueOf(int(v)))
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
return nil
case token.STRING:
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
@@ -172,7 +176,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er
return err
}
- result.Set(reflect.ValueOf(int(v)))
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
return nil
}
}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
index 54a6493..476ed04 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -256,7 +256,10 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL:
- fmt.Println("illegal")
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("illegal character"),
+ }
default:
return keys, &PosError{
Pos: p.tok.Pos,
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
index 0735d95..6966236 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -95,6 +95,12 @@ func (s *Scanner) next() rune {
s.srcPos.Column = 0
}
+ // If we see a null character with data left, then that is an error
+ if ch == '\x00' && s.buf.Len() > 0 {
+ s.err("unexpected null character (0x00)")
+ return eof
+ }
+
// debug
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
return ch
@@ -474,7 +480,7 @@ func (s *Scanner) scanString() {
// read character after quote
ch := s.next()
- if ch < 0 || ch == eof {
+ if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
s.err("literal not terminated")
return
}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
index 956c899..5f981ea 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -27,6 +27,9 @@ func Unquote(s string) (t string, err error) {
if quote != '"' {
return "", ErrSyntax
}
+ if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+ return "", ErrSyntax
+ }
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
@@ -46,7 +49,7 @@ func Unquote(s string) (t string, err error) {
for len(s) > 0 {
// If we're starting a '${}' then let it through un-unquoted.
// Specifically: we don't unquote any characters within the `${}`
- // section, except for escaped backslashes, which we handle specifically.
+ // section.
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
buf = append(buf, '$', '{')
s = s[2:]
@@ -61,16 +64,6 @@ func Unquote(s string) (t string, err error) {
s = s[size:]
- // We special case escaped backslashes in interpolations, converting
- // them to their unescaped equivalents.
- if r == '\\' {
- q, _ := utf8.DecodeRuneInString(s)
- switch q {
- case '\\':
- continue
- }
- }
-
n := utf8.EncodeRune(runeTmp[:], r)
buf = append(buf, runeTmp[:n]...)
@@ -94,6 +87,10 @@ func Unquote(s string) (t string, err error) {
}
}
+ if s[0] == '\n' {
+ return "", ErrSyntax
+ }
+
c, multibyte, ss, err := unquoteChar(s, quote)
if err != nil {
return "", err
diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go
index 4aee40c..88a8ea4 100644
--- a/vendor/github.com/hashicorp/vault/api/client.go
+++ b/vendor/github.com/hashicorp/vault/api/client.go
@@ -48,7 +48,7 @@ type Config struct {
redirectSetup sync.Once
// MaxRetries controls the maximum number of times to retry when a 5xx error
- // occurs. Set to 0 or less to disable retrying.
+ // occurs. Set to 0 or less to disable retrying. Defaults to 0.
MaxRetries int
}
@@ -99,8 +99,6 @@ func DefaultConfig() *Config {
config.Address = v
}
- config.MaxRetries = pester.DefaultClient.MaxRetries
-
return config
}
@@ -289,6 +287,11 @@ func (c *Client) SetAddress(addr string) error {
return nil
}
+// Address returns the Vault URL the client is configured to connect to
+func (c *Client) Address() string {
+ return c.addr.String()
+}
+
// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
// for a given operation and path
func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) {
diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go
index 9753e96..0d5e7d4 100644
--- a/vendor/github.com/hashicorp/vault/api/logical.go
+++ b/vendor/github.com/hashicorp/vault/api/logical.go
@@ -119,9 +119,13 @@ func (c *Logical) Delete(path string) (*Secret, error) {
func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
var data map[string]interface{}
- if wrappingToken != "" && wrappingToken != c.c.Token() {
- data = map[string]interface{}{
- "token": wrappingToken,
+ if wrappingToken != "" {
+ if c.c.Token() == "" {
+ c.c.SetToken(wrappingToken)
+ } else if wrappingToken != c.c.Token() {
+ data = map[string]interface{}{
+ "token": wrappingToken,
+ }
}
}
@@ -134,8 +138,13 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
if resp != nil {
defer resp.Body.Close()
}
- if err != nil && resp.StatusCode != 404 {
- return nil, err
+ if err != nil {
+ if resp != nil && resp.StatusCode != 404 {
+ return nil, err
+ }
+ }
+ if resp == nil {
+ return nil, nil
}
switch resp.StatusCode {
diff --git a/vendor/github.com/kr/fs/LICENSE b/vendor/github.com/kr/fs/LICENSE
deleted file mode 100644
index 7448756..0000000
--- a/vendor/github.com/kr/fs/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/kr/fs/Readme b/vendor/github.com/kr/fs/Readme
deleted file mode 100644
index c95e13f..0000000
--- a/vendor/github.com/kr/fs/Readme
+++ /dev/null
@@ -1,3 +0,0 @@
-Filesystem Package
-
-http://godoc.org/github.com/kr/fs
diff --git a/vendor/github.com/kr/fs/filesystem.go b/vendor/github.com/kr/fs/filesystem.go
deleted file mode 100644
index f1c4805..0000000
--- a/vendor/github.com/kr/fs/filesystem.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package fs
-
-import (
- "io/ioutil"
- "os"
- "path/filepath"
-)
-
-// FileSystem defines the methods of an abstract filesystem.
-type FileSystem interface {
-
- // ReadDir reads the directory named by dirname and returns a
- // list of directory entries.
- ReadDir(dirname string) ([]os.FileInfo, error)
-
- // Lstat returns a FileInfo describing the named file. If the file is a
- // symbolic link, the returned FileInfo describes the symbolic link. Lstat
- // makes no attempt to follow the link.
- Lstat(name string) (os.FileInfo, error)
-
- // Join joins any number of path elements into a single path, adding a
- // separator if necessary. The result is Cleaned; in particular, all
- // empty strings are ignored.
- //
- // The separator is FileSystem specific.
- Join(elem ...string) string
-}
-
-// fs represents a FileSystem provided by the os package.
-type fs struct{}
-
-func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) }
-
-func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
-
-func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) }
diff --git a/vendor/github.com/kr/fs/walk.go b/vendor/github.com/kr/fs/walk.go
deleted file mode 100644
index 6ffa1e0..0000000
--- a/vendor/github.com/kr/fs/walk.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Package fs provides filesystem-related functions.
-package fs
-
-import (
- "os"
-)
-
-// Walker provides a convenient interface for iterating over the
-// descendants of a filesystem path.
-// Successive calls to the Step method will step through each
-// file or directory in the tree, including the root. The files
-// are walked in lexical order, which makes the output deterministic
-// but means that for very large directories Walker can be inefficient.
-// Walker does not follow symbolic links.
-type Walker struct {
- fs FileSystem
- cur item
- stack []item
- descend bool
-}
-
-type item struct {
- path string
- info os.FileInfo
- err error
-}
-
-// Walk returns a new Walker rooted at root.
-func Walk(root string) *Walker {
- return WalkFS(root, new(fs))
-}
-
-// WalkFS returns a new Walker rooted at root on the FileSystem fs.
-func WalkFS(root string, fs FileSystem) *Walker {
- info, err := fs.Lstat(root)
- return &Walker{
- fs: fs,
- stack: []item{{root, info, err}},
- }
-}
-
-// Step advances the Walker to the next file or directory,
-// which will then be available through the Path, Stat,
-// and Err methods.
-// It returns false when the walk stops at the end of the tree.
-func (w *Walker) Step() bool {
- if w.descend && w.cur.err == nil && w.cur.info.IsDir() {
- list, err := w.fs.ReadDir(w.cur.path)
- if err != nil {
- w.cur.err = err
- w.stack = append(w.stack, w.cur)
- } else {
- for i := len(list) - 1; i >= 0; i-- {
- path := w.fs.Join(w.cur.path, list[i].Name())
- w.stack = append(w.stack, item{path, list[i], nil})
- }
- }
- }
-
- if len(w.stack) == 0 {
- return false
- }
- i := len(w.stack) - 1
- w.cur = w.stack[i]
- w.stack = w.stack[:i]
- w.descend = true
- return true
-}
-
-// Path returns the path to the most recent file or directory
-// visited by a call to Step. It contains the argument to Walk
-// as a prefix; that is, if Walk is called with "dir", which is
-// a directory containing the file "a", Path will return "dir/a".
-func (w *Walker) Path() string {
- return w.cur.path
-}
-
-// Stat returns info for the most recent file or directory
-// visited by a call to Step.
-func (w *Walker) Stat() os.FileInfo {
- return w.cur.info
-}
-
-// Err returns the error, if any, for the most recent attempt
-// by Step to visit a file or directory. If a directory has
-// an error, w will not descend into that directory.
-func (w *Walker) Err() error {
- return w.cur.err
-}
-
-// SkipDir causes the currently visited directory to be skipped.
-// If w is not on a directory, SkipDir has no effect.
-func (w *Walker) SkipDir() {
- w.descend = false
-}
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
index bf49a13..415b9f8 100644
--- a/vendor/github.com/magiconair/properties/CHANGELOG.md
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -1,5 +1,9 @@
## Changelog
+### Unreleased
+
+ * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy
+
### [1.7.0](https://github.com/magiconair/properties/tags/v1.7.0) - 20 Mar 2016
* [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#Properties.LoadURL) method to load properties from a URL.
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
index e7e0104..50209d8 100644
--- a/vendor/github.com/magiconair/properties/properties.go
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -632,14 +632,14 @@ func (p *Properties) Delete(key string) {
// Merge merges properties, comments and keys from other *Properties into p
func (p *Properties) Merge(other *Properties) {
- for k,v := range other.m {
+ for k, v := range other.m {
p.m[k] = v
}
- for k,v := range other.c {
+ for k, v := range other.c {
p.c[k] = v
}
- outer:
+outer:
for _, otherKey := range other.k {
for _, key := range p.k {
if otherKey == key {
diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md
index c2e0d5a..a875e31 100644
--- a/vendor/github.com/mattn/go-sqlite3/README.md
+++ b/vendor/github.com/mattn/go-sqlite3/README.md
@@ -64,6 +64,10 @@ FAQ
Use `loc=auto` in SQLite3 filename schema like `file:foo.db?loc=auto`.
+* Can use this in multiple routines concurrently?
+
+ Yes for readonly. But, No for writable. See #50, #51, #209.
+
License
-------
diff --git a/vendor/github.com/mattn/go-sqlite3/backup.go b/vendor/github.com/mattn/go-sqlite3/backup.go
index 05f8038..5ab3a54 100644
--- a/vendor/github.com/mattn/go-sqlite3/backup.go
+++ b/vendor/github.com/mattn/go-sqlite3/backup.go
@@ -19,10 +19,12 @@ import (
"unsafe"
)
+// SQLiteBackup implement interface of Backup.
type SQLiteBackup struct {
b *C.sqlite3_backup
}
+// Backup make backup from src to dest.
func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) {
destptr := C.CString(dest)
defer C.free(unsafe.Pointer(destptr))
@@ -37,10 +39,10 @@ func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteB
return nil, c.lastError()
}
-// Backs up for one step. Calls the underlying `sqlite3_backup_step` function.
-// This function returns a boolean indicating if the backup is done and
-// an error signalling any other error. Done is returned if the underlying C
-// function returns SQLITE_DONE (Code 101)
+// Step to backs up for one step. Calls the underlying `sqlite3_backup_step`
+// function. This function returns a boolean indicating if the backup is done
+// and an error signalling any other error. Done is returned if the underlying
+// C function returns SQLITE_DONE (Code 101)
func (b *SQLiteBackup) Step(p int) (bool, error) {
ret := C.sqlite3_backup_step(b.b, C.int(p))
if ret == C.SQLITE_DONE {
@@ -51,18 +53,22 @@ func (b *SQLiteBackup) Step(p int) (bool, error) {
return false, nil
}
+// Remaining return whether have the rest for backup.
func (b *SQLiteBackup) Remaining() int {
return int(C.sqlite3_backup_remaining(b.b))
}
+// PageCount return count of pages.
func (b *SQLiteBackup) PageCount() int {
return int(C.sqlite3_backup_pagecount(b.b))
}
+// Finish close backup.
func (b *SQLiteBackup) Finish() error {
return b.Close()
}
+// Close close backup.
func (b *SQLiteBackup) Close() error {
ret := C.sqlite3_backup_finish(b.b)
diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go
index 190b695..48fc63a 100644
--- a/vendor/github.com/mattn/go-sqlite3/callback.go
+++ b/vendor/github.com/mattn/go-sqlite3/callback.go
@@ -40,8 +40,8 @@ func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value
}
//export stepTrampoline
-func stepTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
- args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
+func stepTrampoline(ctx *C.sqlite3_context, argc C.int, argv **C.sqlite3_value) {
+ args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:int(argc):int(argc)]
ai := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*aggInfo)
ai.Step(ctx, args)
}
diff --git a/vendor/github.com/mattn/go-sqlite3/error.go b/vendor/github.com/mattn/go-sqlite3/error.go
index b910108..1f14aba 100644
--- a/vendor/github.com/mattn/go-sqlite3/error.go
+++ b/vendor/github.com/mattn/go-sqlite3/error.go
@@ -7,12 +7,16 @@ package sqlite3
import "C"
+// ErrNo inherit errno.
type ErrNo int
+// ErrNoMask is mask code.
const ErrNoMask C.int = 0xff
+// ErrNoExtended is extended errno.
type ErrNoExtended int
+// Error implement sqlite error code.
type Error struct {
Code ErrNo /* The error code returned by SQLite */
ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */
@@ -52,18 +56,22 @@ var (
ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */
)
+// Error return error message from errno.
func (err ErrNo) Error() string {
return Error{Code: err}.Error()
}
+// Extend return extended errno.
func (err ErrNo) Extend(by int) ErrNoExtended {
return ErrNoExtended(int(err) | (by << 8))
}
+// Error return error message that is extended code.
func (err ErrNoExtended) Error() string {
return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error()
}
+// Error return error message.
func (err Error) Error() string {
if err.err != "" {
return err.err
@@ -121,7 +129,7 @@ var (
ErrConstraintTrigger = ErrConstraint.Extend(7)
ErrConstraintUnique = ErrConstraint.Extend(8)
ErrConstraintVTab = ErrConstraint.Extend(9)
- ErrConstraintRowId = ErrConstraint.Extend(10)
+ ErrConstraintRowID = ErrConstraint.Extend(10)
ErrNoticeRecoverWAL = ErrNotice.Extend(1)
ErrNoticeRecoverRollback = ErrNotice.Extend(2)
ErrWarningAutoIndex = ErrWarning.Extend(1)
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
index a8790de..7a852e6 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
@@ -1,6 +1,7 @@
+#ifndef USE_LIBSQLITE3
/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
-** version 3.14.0. By combining all the individual C code files into this
+** version 3.15.1. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
@@ -9,7 +10,7 @@
**
** This file is all you need to compile SQLite. To use SQLite in other
** programs, you need this file and the "sqlite3.h" header file that defines
-** the programming interface to the SQLite library. (If you do not have
+** the programming interface to the SQLite library. (If you do not have
** the "sqlite3.h" header file at hand, you will find a copy embedded within
** the text of this file. Search for "Begin file sqlite3.h" to find the start
** of the embedded sqlite3.h header file.) Additional code files may be needed
@@ -17,7 +18,6 @@
** language. The code for the "sqlite3" command-line shell is also in a
** separate file. This file contains only code for the core SQLite library.
*/
-#ifndef USE_LIBSQLITE3
#define SQLITE_CORE 1
#define SQLITE_AMALGAMATION 1
#ifndef SQLITE_PRIVATE
@@ -369,7 +369,8 @@ extern "C" {
** be held constant and Z will be incremented or else Y will be incremented
** and Z will be reset to zero.
**
-** Since version 3.6.18, SQLite source code has been stored in the
+** Since [version 3.6.18] ([dateof:3.6.18]),
+** SQLite source code has been stored in the
** <a href="http://www.fossil-scm.org/">Fossil configuration management
** system</a>. ^The SQLITE_SOURCE_ID macro evaluates to
** a string which identifies a particular check-in of SQLite
@@ -381,9 +382,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.14.0"
-#define SQLITE_VERSION_NUMBER 3014000
-#define SQLITE_SOURCE_ID "2016-08-08 13:40:27 d5e98057028abcf7217d0d2b2e29bbbcdf09d6de"
+#define SQLITE_VERSION "3.15.1"
+#define SQLITE_VERSION_NUMBER 3015001
+#define SQLITE_SOURCE_ID "2016-11-04 12:08:49 1136863c76576110e710dd5d69ab6bf347c65e36"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -416,9 +417,9 @@ extern "C" {
** See also: [sqlite_version()] and [sqlite_source_id()].
*/
SQLITE_API const char sqlite3_version[] = SQLITE_VERSION;
-SQLITE_API const char *SQLITE_STDCALL sqlite3_libversion(void);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sourceid(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void);
+SQLITE_API const char *sqlite3_libversion(void);
+SQLITE_API const char *sqlite3_sourceid(void);
+SQLITE_API int sqlite3_libversion_number(void);
/*
** CAPI3REF: Run-Time Library Compilation Options Diagnostics
@@ -443,8 +444,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void);
** [sqlite_compileoption_get()] and the [compile_options pragma].
*/
#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
+SQLITE_API int sqlite3_compileoption_used(const char *zOptName);
+SQLITE_API const char *sqlite3_compileoption_get(int N);
#endif
/*
@@ -483,7 +484,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
**
** See the [threading mode] documentation for additional information.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void);
+SQLITE_API int sqlite3_threadsafe(void);
/*
** CAPI3REF: Database Connection Handle
@@ -580,8 +581,8 @@ typedef sqlite_uint64 sqlite3_uint64;
** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer
** argument is a harmless no-op.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_close(sqlite3*);
-SQLITE_API int SQLITE_STDCALL sqlite3_close_v2(sqlite3*);
+SQLITE_API int sqlite3_close(sqlite3*);
+SQLITE_API int sqlite3_close_v2(sqlite3*);
/*
** The type for a callback function.
@@ -652,7 +653,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
** </ul>
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_exec(
+SQLITE_API int sqlite3_exec(
sqlite3*, /* An open database */
const char *sql, /* SQL to be evaluated */
int (*callback)(void*,int,char**,char**), /* Callback function */
@@ -713,7 +714,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
** [result codes]. However, experience has shown that many of
** these result codes are too coarse-grained. They do not provide as
** much information about problems as programmers might like. In an effort to
-** address this, newer versions of SQLite (version 3.3.8 and later) include
+** address this, newer versions of SQLite (version 3.3.8 [dateof:3.3.8]
+** and later) include
** support for additional result codes that provide more detailed information
** about errors. These [extended result codes] are enabled or disabled
** on a per database connection basis using the
@@ -1237,6 +1239,12 @@ struct sqlite3_io_methods {
** on whether or not the file has been renamed, moved, or deleted since it
** was first opened.
**
+** <li>[[SQLITE_FCNTL_WIN32_GET_HANDLE]]
+** The [SQLITE_FCNTL_WIN32_GET_HANDLE] opcode can be used to obtain the
+** underlying native file handle associated with a file handle. This file
+** control interprets its argument as a pointer to a native file handle and
+** writes the resulting value there.
+**
** <li>[[SQLITE_FCNTL_WIN32_SET_HANDLE]]
** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This
** opcode causes the xFileControl method to swap the file handle with the one
@@ -1287,6 +1295,7 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_RBU 26
#define SQLITE_FCNTL_VFS_POINTER 27
#define SQLITE_FCNTL_JOURNAL_POINTER 28
+#define SQLITE_FCNTL_WIN32_GET_HANDLE 29
/* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
@@ -1651,10 +1660,10 @@ struct sqlite3_vfs {
** must return [SQLITE_OK] on success and some other [error code] upon
** failure.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
+SQLITE_API int sqlite3_initialize(void);
+SQLITE_API int sqlite3_shutdown(void);
+SQLITE_API int sqlite3_os_init(void);
+SQLITE_API int sqlite3_os_end(void);
/*
** CAPI3REF: Configuring The SQLite Library
@@ -1687,7 +1696,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
** ^If the option is unknown or SQLite is unable to set the option
** then this routine returns a non-zero [error code].
*/
-SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...);
+SQLITE_API int sqlite3_config(int, ...);
/*
** CAPI3REF: Configure database connections
@@ -1706,7 +1715,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...);
** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if
** the call is considered successful.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3*, int op, ...);
+SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...);
/*
** CAPI3REF: Memory Allocation Routines
@@ -2230,8 +2239,18 @@ struct sqlite3_mem_methods {
** be a NULL pointer, in which case the new setting is not reported back.
** </dd>
**
+** <dt>SQLITE_DBCONFIG_MAINDBNAME</dt>
+** <dd> ^This option is used to change the name of the "main" database
+** schema. ^The sole argument is a pointer to a constant UTF8 string
+** which will become the new schema name in place of "main". ^SQLite
+** does not make a copy of the new main schema name string, so the application
+** must ensure that the argument passed into this DBCONFIG option is unchanged
+** until after the database connection closes.
+** </dd>
+**
** </dl>
*/
+#define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */
#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */
#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */
#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */
@@ -2247,7 +2266,7 @@ struct sqlite3_mem_methods {
** [extended result codes] feature of SQLite. ^The extended result
** codes are disabled by default for historical compatibility.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff);
+SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff);
/*
** CAPI3REF: Last Insert Rowid
@@ -2299,7 +2318,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff)
** unpredictable and might not equal either the old or the new
** last insert [rowid].
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*);
+SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
/*
** CAPI3REF: Count The Number Of Rows Modified
@@ -2352,7 +2371,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*);
** while [sqlite3_changes()] is running then the value returned
** is unpredictable and not meaningful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*);
+SQLITE_API int sqlite3_changes(sqlite3*);
/*
** CAPI3REF: Total Number Of Rows Modified
@@ -2376,7 +2395,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*);
** while [sqlite3_total_changes()] is running then the value
** returned is unpredictable and not meaningful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*);
+SQLITE_API int sqlite3_total_changes(sqlite3*);
/*
** CAPI3REF: Interrupt A Long-Running Query
@@ -2416,7 +2435,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*);
** If the database connection closes while [sqlite3_interrupt()]
** is running then bad things will likely happen.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*);
+SQLITE_API void sqlite3_interrupt(sqlite3*);
/*
** CAPI3REF: Determine If An SQL Statement Is Complete
@@ -2451,8 +2470,8 @@ SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*);
** The input to [sqlite3_complete16()] must be a zero-terminated
** UTF-16 string in native byte order.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *sql);
-SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql);
+SQLITE_API int sqlite3_complete(const char *sql);
+SQLITE_API int sqlite3_complete16(const void *sql);
/*
** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors
@@ -2513,7 +2532,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql);
** A busy handler must not close the database connection
** or [prepared statement] that invoked the busy handler.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*);
+SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*);
/*
** CAPI3REF: Set A Busy Timeout
@@ -2536,7 +2555,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*,int(*)(void*,int),vo
**
** See also: [PRAGMA busy_timeout]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms);
+SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms);
/*
** CAPI3REF: Convenience Routines For Running Queries
@@ -2611,7 +2630,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms);
** reflected in subsequent calls to [sqlite3_errcode()] or
** [sqlite3_errmsg()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
+SQLITE_API int sqlite3_get_table(
sqlite3 *db, /* An open database */
const char *zSql, /* SQL to be evaluated */
char ***pazResult, /* Results of the query */
@@ -2619,7 +2638,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
int *pnColumn, /* Number of result columns written here */
char **pzErrmsg /* Error msg written here */
);
-SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
+SQLITE_API void sqlite3_free_table(char **result);
/*
** CAPI3REF: Formatted String Printing Functions
@@ -2725,10 +2744,10 @@ SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
** addition that after the string has been read and copied into
** the result, [sqlite3_free()] is called on the input string.)^
*/
-SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char*,...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char*, va_list);
-SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int,char*,const char*, ...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list);
+SQLITE_API char *sqlite3_mprintf(const char*,...);
+SQLITE_API char *sqlite3_vmprintf(const char*, va_list);
+SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...);
+SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list);
/*
** CAPI3REF: Memory Allocation Subsystem
@@ -2818,12 +2837,12 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list
** a block of memory after it has been released using
** [sqlite3_free()] or [sqlite3_realloc()].
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void*, int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void*, sqlite3_uint64);
-SQLITE_API void SQLITE_STDCALL sqlite3_free(void*);
-SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*);
+SQLITE_API void *sqlite3_malloc(int);
+SQLITE_API void *sqlite3_malloc64(sqlite3_uint64);
+SQLITE_API void *sqlite3_realloc(void*, int);
+SQLITE_API void *sqlite3_realloc64(void*, sqlite3_uint64);
+SQLITE_API void sqlite3_free(void*);
+SQLITE_API sqlite3_uint64 sqlite3_msize(void*);
/*
** CAPI3REF: Memory Allocator Statistics
@@ -2848,8 +2867,8 @@ SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*);
** by [sqlite3_memory_highwater(1)] is the high-water mark
** prior to the reset.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag);
+SQLITE_API sqlite3_int64 sqlite3_memory_used(void);
+SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
/*
** CAPI3REF: Pseudo-Random Number Generator
@@ -2872,7 +2891,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag);
** internally and without recourse to the [sqlite3_vfs] xRandomness
** method.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P);
+SQLITE_API void sqlite3_randomness(int N, void *P);
/*
** CAPI3REF: Compile-Time Authorization Callbacks
@@ -2955,7 +2974,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P);
** as stated in the previous paragraph, sqlite3_step() invokes
** sqlite3_prepare_v2() to reprepare a statement after a schema change.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
+SQLITE_API int sqlite3_set_authorizer(
sqlite3*,
int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
void *pUserData
@@ -3063,9 +3082,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
** sqlite3_profile() function is considered experimental and is
** subject to change in future versions of SQLite.
*/
-SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_trace(sqlite3*,
+SQLITE_API SQLITE_DEPRECATED void *sqlite3_trace(sqlite3*,
void(*xTrace)(void*,const char*), void*);
-SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_profile(sqlite3*,
+SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
/*
@@ -3154,7 +3173,7 @@ SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_profile(sqlite3*,
** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which
** are deprecated.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2(
+SQLITE_API int sqlite3_trace_v2(
sqlite3*,
unsigned uMask,
int(*xCallback)(unsigned,void*,void*,void*),
@@ -3193,7 +3212,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2(
** database connections for the meaning of "modify" in this paragraph.
**
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
+SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
/*
** CAPI3REF: Opening A New Database Connection
@@ -3422,15 +3441,15 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
**
** See also: [sqlite3_temp_directory]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_open(
+SQLITE_API int sqlite3_open(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_open16(
+SQLITE_API int sqlite3_open16(
const void *filename, /* Database filename (UTF-16) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
+SQLITE_API int sqlite3_open_v2(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb, /* OUT: SQLite db handle */
int flags, /* Flags */
@@ -3476,9 +3495,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
** VFS method, then the behavior of this routine is undefined and probably
** undesirable.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilename, const char *zParam);
-SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
+SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam);
+SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
/*
@@ -3522,11 +3541,11 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const cha
** was invoked incorrectly by the application. In that case, the
** error code and message may or may not be set.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db);
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3*);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errstr(int);
+SQLITE_API int sqlite3_errcode(sqlite3 *db);
+SQLITE_API int sqlite3_extended_errcode(sqlite3 *db);
+SQLITE_API const char *sqlite3_errmsg(sqlite3*);
+SQLITE_API const void *sqlite3_errmsg16(sqlite3*);
+SQLITE_API const char *sqlite3_errstr(int);
/*
** CAPI3REF: Prepared Statement Object
@@ -3594,7 +3613,7 @@ typedef struct sqlite3_stmt sqlite3_stmt;
**
** New run-time limit categories may be added in future releases.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
+SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
/*
** CAPI3REF: Run-Time Limit Categories
@@ -3746,28 +3765,28 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
** </li>
** </ol>
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare(
+SQLITE_API int sqlite3_prepare(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare_v2(
+SQLITE_API int sqlite3_prepare_v2(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16(
+SQLITE_API int sqlite3_prepare16(
sqlite3 *db, /* Database handle */
const void *zSql, /* SQL statement, UTF-16 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const void **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
+SQLITE_API int sqlite3_prepare16_v2(
sqlite3 *db, /* Database handle */
const void *zSql, /* SQL statement, UTF-16 encoded */
int nByte, /* Maximum length of zSql in bytes. */
@@ -3806,8 +3825,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
** is obtained from [sqlite3_malloc()] and must be free by the application
** by passing it to [sqlite3_free()].
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt);
-SQLITE_API char *SQLITE_STDCALL sqlite3_expanded_sql(sqlite3_stmt *pStmt);
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt);
+SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If An SQL Statement Writes The Database
@@ -3839,7 +3858,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_expanded_sql(sqlite3_stmt *pStmt);
** change the configuration of a database connection, they do not make
** changes to the content of the database files on disk.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
@@ -3860,7 +3879,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
** for example, in diagnostic routines to search for prepared
** statements that are holding a transaction open.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt*);
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*);
/*
** CAPI3REF: Dynamically Typed Value Object
@@ -4024,20 +4043,20 @@ typedef struct sqlite3_context sqlite3_context;
** See also: [sqlite3_bind_parameter_count()],
** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64,
+SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
+SQLITE_API int sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64,
void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt*, int, double);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int(sqlite3_stmt*, int, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt*, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64,
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double);
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int);
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int);
+SQLITE_API int sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*));
+SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
+SQLITE_API int sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64,
void(*)(void*), unsigned char encoding);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64);
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
+SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64);
/*
** CAPI3REF: Number Of SQL Parameters
@@ -4058,7 +4077,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite
** [sqlite3_bind_parameter_name()], and
** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*);
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*);
/*
** CAPI3REF: Name Of A Host Parameter
@@ -4086,7 +4105,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*);
** [sqlite3_bind_parameter_count()], and
** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*, int);
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
/*
** CAPI3REF: Index Of A Parameter With A Given Name
@@ -4103,7 +4122,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*,
** [sqlite3_bind_parameter_count()], and
** [sqlite3_bind_parameter_name()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
/*
** CAPI3REF: Reset All Bindings On A Prepared Statement
@@ -4113,7 +4132,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const
** the [sqlite3_bind_blob | bindings] on a [prepared statement].
** ^Use this routine to reset all host parameters to NULL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*);
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*);
/*
** CAPI3REF: Number Of Columns In A Result Set
@@ -4125,7 +4144,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*);
**
** See also: [sqlite3_data_count()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Column Names In A Result Set
@@ -4154,8 +4173,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt);
** then the name of the column is unspecified and may change from
** one release of SQLite to the next.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_name(sqlite3_stmt*, int N);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N);
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N);
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N);
/*
** CAPI3REF: Source Of Data In A Query Result
@@ -4203,12 +4222,12 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N
** for the same [prepared statement] and result column
** at the same time then the results are undefined.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_database_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_table_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_origin_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int);
/*
** CAPI3REF: Declared Datatype Of A Query Result
@@ -4240,8 +4259,8 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*
** is associated with individual values, not with the containers
** used to hold those values.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_decltype(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int);
/*
** CAPI3REF: Evaluate An SQL Statement
@@ -4302,7 +4321,8 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,in
** other than [SQLITE_ROW] before any subsequent invocation of
** sqlite3_step(). Failure to reset the prepared statement using
** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from
-** sqlite3_step(). But after version 3.6.23.1, sqlite3_step() began
+** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1],
+** sqlite3_step() began
** calling [sqlite3_reset()] automatically in this circumstance rather
** than returning [SQLITE_MISUSE]. This is not considered a compatibility
** break because any application that ever receives an SQLITE_MISUSE error
@@ -4321,7 +4341,7 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,in
** then the more specific [error codes] are returned directly
** by sqlite3_step(). The use of the "v2" interface is recommended.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*);
+SQLITE_API int sqlite3_step(sqlite3_stmt*);
/*
** CAPI3REF: Number of columns in a result set
@@ -4342,7 +4362,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*);
**
** See also: [sqlite3_column_count()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Fundamental Datatypes
@@ -4532,16 +4552,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** pointer. Subsequent calls to [sqlite3_errcode()] will return
** [SQLITE_NOMEM].)^
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
-SQLITE_API double SQLITE_STDCALL sqlite3_column_double(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_int(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_column_int64(sqlite3_stmt*, int iCol);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_column_text(sqlite3_stmt*, int iCol);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_text16(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_type(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
+SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol);
/*
** CAPI3REF: Destroy A Prepared Statement Object
@@ -4569,7 +4589,7 @@ SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int
** statement after it has been finalized can result in undefined and
** undesirable behavior such as segfaults and heap corruption.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Reset A Prepared Statement Object
@@ -4596,7 +4616,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt);
** ^The [sqlite3_reset(S)] interface does not change the values
** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Create Or Redefine SQL Functions
@@ -4696,7 +4716,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
** close the database connection nor finalize or reset the prepared
** statement in which the function is running.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
+SQLITE_API int sqlite3_create_function(
sqlite3 *db,
const char *zFunctionName,
int nArg,
@@ -4706,7 +4726,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
+SQLITE_API int sqlite3_create_function16(
sqlite3 *db,
const void *zFunctionName,
int nArg,
@@ -4716,7 +4736,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
+SQLITE_API int sqlite3_create_function_v2(
sqlite3 *db,
const char *zFunctionName,
int nArg,
@@ -4762,12 +4782,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
** these functions, we will not explain what they do.
*/
#ifndef SQLITE_OMIT_DEPRECATED
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_expired(sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_global_recover(void);
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_thread_cleanup(void);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
+SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
void*,sqlite3_int64);
#endif
@@ -4817,18 +4837,18 @@ SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(voi
** These routines must be called from the same thread as
** the SQL function that supplied the [sqlite3_value*] parameters.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes16(sqlite3_value*);
-SQLITE_API double SQLITE_STDCALL sqlite3_value_double(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_int(sqlite3_value*);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_value_int64(sqlite3_value*);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value*);
+SQLITE_API double sqlite3_value_double(sqlite3_value*);
+SQLITE_API int sqlite3_value_int(sqlite3_value*);
+SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*);
+SQLITE_API int sqlite3_value_type(sqlite3_value*);
+SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
/*
** CAPI3REF: Finding The Subtype Of SQL Values
@@ -4844,7 +4864,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*);
** from the result of one [application-defined SQL function] into the
** input of another.
*/
-SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value*);
+SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
/*
** CAPI3REF: Copy And Free SQL Values
@@ -4860,8 +4880,8 @@ SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value*);
** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer
** then sqlite3_value_free(V) is a harmless no-op.
*/
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value*);
-SQLITE_API void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*);
+SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value*);
+SQLITE_API void sqlite3_value_free(sqlite3_value*);
/*
** CAPI3REF: Obtain Aggregate Function Context
@@ -4906,7 +4926,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*);
** This routine must be called from the same thread in which
** the aggregate SQL function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int nBytes);
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
/*
** CAPI3REF: User Data For Functions
@@ -4921,7 +4941,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int
** This routine must be called from the same thread in which
** the application-defined function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*);
+SQLITE_API void *sqlite3_user_data(sqlite3_context*);
/*
** CAPI3REF: Database Connection For Functions
@@ -4933,7 +4953,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*);
** and [sqlite3_create_function16()] routines that originally
** registered the application defined function.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*);
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
/*
** CAPI3REF: Function Auxiliary Data
@@ -4987,8 +5007,8 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*);
** These routines must be called from the same thread in which
** the SQL function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context*, int N);
-SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
+SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
/*
@@ -5124,27 +5144,27 @@ typedef void (*sqlite3_destructor_type)(void*);
** than the one containing the application-defined function that received
** the [sqlite3_context] pointer, the results are undefined.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64(sqlite3_context*,const void*,
+SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_blob64(sqlite3_context*,const void*,
sqlite3_uint64,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_double(sqlite3_context*, double);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error(sqlite3_context*, const char*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error16(sqlite3_context*, const void*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_nomem(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_null(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64,
+SQLITE_API void sqlite3_result_double(sqlite3_context*, double);
+SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int);
+SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int);
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
+SQLITE_API void sqlite3_result_null(sqlite3_context*);
+SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64,
void(*)(void*), unsigned char encoding);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context*, sqlite3_value*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context*, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
+SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n);
+SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
/*
@@ -5159,7 +5179,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite
** The number of subtype bytes preserved by SQLite might increase
** in future releases of SQLite.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned int);
+SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);
/*
** CAPI3REF: Define New Collating Sequences
@@ -5241,14 +5261,14 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned
**
** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation(
+SQLITE_API int sqlite3_create_collation(
sqlite3*,
const char *zName,
int eTextRep,
void *pArg,
int(*xCompare)(void*,int,const void*,int,const void*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
+SQLITE_API int sqlite3_create_collation_v2(
sqlite3*,
const char *zName,
int eTextRep,
@@ -5256,7 +5276,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
int(*xCompare)(void*,int,const void*,int,const void*),
void(*xDestroy)(void*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
+SQLITE_API int sqlite3_create_collation16(
sqlite3*,
const void *zName,
int eTextRep,
@@ -5291,12 +5311,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
** [sqlite3_create_collation()], [sqlite3_create_collation16()], or
** [sqlite3_create_collation_v2()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed(
+SQLITE_API int sqlite3_collation_needed(
sqlite3*,
void*,
void(*)(void*,sqlite3*,int eTextRep,const char*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
+SQLITE_API int sqlite3_collation_needed16(
sqlite3*,
void*,
void(*)(void*,sqlite3*,int eTextRep,const void*)
@@ -5310,11 +5330,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
** The code to implement this API is not available in the public release
** of SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_key(
+SQLITE_API int sqlite3_key(
sqlite3 *db, /* Database to be rekeyed */
const void *pKey, int nKey /* The key */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_key_v2(
+SQLITE_API int sqlite3_key_v2(
sqlite3 *db, /* Database to be rekeyed */
const char *zDbName, /* Name of the database */
const void *pKey, int nKey /* The key */
@@ -5328,11 +5348,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_key_v2(
** The code to implement this API is not available in the public release
** of SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey(
+SQLITE_API int sqlite3_rekey(
sqlite3 *db, /* Database to be rekeyed */
const void *pKey, int nKey /* The new key */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2(
+SQLITE_API int sqlite3_rekey_v2(
sqlite3 *db, /* Database to be rekeyed */
const char *zDbName, /* Name of the database */
const void *pKey, int nKey /* The new key */
@@ -5342,7 +5362,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2(
** Specify the activation key for a SEE database. Unless
** activated, none of the SEE routines will work.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_see(
+SQLITE_API void sqlite3_activate_see(
const char *zPassPhrase /* Activation phrase */
);
#endif
@@ -5352,7 +5372,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_activate_see(
** Specify the activation key for a CEROD database. Unless
** activated, none of the CEROD routines will work.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod(
+SQLITE_API void sqlite3_activate_cerod(
const char *zPassPhrase /* Activation phrase */
);
#endif
@@ -5374,7 +5394,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod(
** all, then the behavior of sqlite3_sleep() may deviate from the description
** in the previous paragraphs.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int);
+SQLITE_API int sqlite3_sleep(int);
/*
** CAPI3REF: Name Of The Folder Holding Temporary Files
@@ -5493,7 +5513,7 @@ SQLITE_API char *sqlite3_data_directory;
** connection while this routine is running, then the return value
** is undefined.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*);
+SQLITE_API int sqlite3_get_autocommit(sqlite3*);
/*
** CAPI3REF: Find The Database Handle Of A Prepared Statement
@@ -5506,7 +5526,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*);
** to the [sqlite3_prepare_v2()] call (or its variants) that was used to
** create the statement in the first place.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*);
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
/*
** CAPI3REF: Return The Filename For A Database Connection
@@ -5523,7 +5543,7 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*);
** will be an absolute pathname, even if the filename used
** to open the database originally was a URI or relative pathname.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const char *zDbName);
+SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName);
/*
** CAPI3REF: Determine if a database is read-only
@@ -5533,7 +5553,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const cha
** of connection D is read-only, 0 if it is read/write, or -1 if N is not
** the name of a database on connection D.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
+SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
/*
** CAPI3REF: Find the next prepared statement
@@ -5549,7 +5569,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbNa
** [sqlite3_next_stmt(D,S)] must refer to an open database
** connection and in particular must not be a NULL pointer.
*/
-SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
/*
** CAPI3REF: Commit And Rollback Notification Callbacks
@@ -5598,8 +5618,8 @@ SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_
**
** See also the [sqlite3_update_hook()] interface.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
-SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
+SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
+SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
/*
** CAPI3REF: Data Change Notification Callbacks
@@ -5650,7 +5670,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *),
** See also the [sqlite3_commit_hook()], [sqlite3_rollback_hook()],
** and [sqlite3_preupdate_hook()] interfaces.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
+SQLITE_API void *sqlite3_update_hook(
sqlite3*,
void(*)(void *,int ,char const *,char const *,sqlite3_int64),
void*
@@ -5665,7 +5685,8 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
** and disabled if the argument is false.)^
**
** ^Cache sharing is enabled and disabled for an entire process.
-** This is a change as of SQLite version 3.5.0. In prior versions of SQLite,
+** This is a change as of SQLite [version 3.5.0] ([dateof:3.5.0]).
+** In prior versions of SQLite,
** sharing was enabled or disabled for each thread separately.
**
** ^(The cache sharing mode set by this interface effects all subsequent
@@ -5690,7 +5711,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
**
** See Also: [SQLite Shared-Cache Mode]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int);
+SQLITE_API int sqlite3_enable_shared_cache(int);
/*
** CAPI3REF: Attempt To Free Heap Memory
@@ -5706,7 +5727,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int);
**
** See also: [sqlite3_db_release_memory()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int);
+SQLITE_API int sqlite3_release_memory(int);
/*
** CAPI3REF: Free Memory Used By A Database Connection
@@ -5720,7 +5741,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int);
**
** See also: [sqlite3_release_memory()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
+SQLITE_API int sqlite3_db_release_memory(sqlite3*);
/*
** CAPI3REF: Impose A Limit On Heap Size
@@ -5759,7 +5780,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
** from the heap.
** </ul>)^
**
-** Beginning with SQLite version 3.7.3, the soft heap limit is enforced
+** Beginning with SQLite [version 3.7.3] ([dateof:3.7.3]),
+** the soft heap limit is enforced
** regardless of whether or not the [SQLITE_ENABLE_MEMORY_MANAGEMENT]
** compile-time option is invoked. With [SQLITE_ENABLE_MEMORY_MANAGEMENT],
** the soft heap limit is enforced on every memory allocation. Without
@@ -5772,7 +5794,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
** The circumstances under which SQLite will enforce the soft heap limit may
** changes in future releases of SQLite.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 N);
+SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N);
/*
** CAPI3REF: Deprecated Soft Heap Limit Interface
@@ -5783,7 +5805,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64
** only. All new applications should use the
** [sqlite3_soft_heap_limit64()] interface rather than this one.
*/
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N);
/*
@@ -5853,7 +5875,7 @@ SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
** parsed, if that has not already been done, and returns an error if
** any errors are encountered while loading the schema.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
+SQLITE_API int sqlite3_table_column_metadata(
sqlite3 *db, /* Connection handle */
const char *zDbName, /* Database name or NULL */
const char *zTableName, /* Table name */
@@ -5909,7 +5931,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
**
** See also the [load_extension() SQL function].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
+SQLITE_API int sqlite3_load_extension(
sqlite3 *db, /* Load the extension into this database connection */
const char *zFile, /* Name of the shared library containing extension */
const char *zProc, /* Entry point. Derived from zFile if 0 */
@@ -5941,7 +5963,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
** remains disabled. This will prevent SQL injections from giving attackers
** access to extension loading capabilities.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff);
+SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
/*
** CAPI3REF: Automatically Load Statically Linked Extensions
@@ -5979,7 +6001,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono
** See also: [sqlite3_reset_auto_extension()]
** and [sqlite3_cancel_auto_extension()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void(*xEntryPoint)(void));
+SQLITE_API int sqlite3_auto_extension(void(*xEntryPoint)(void));
/*
** CAPI3REF: Cancel Automatic Extension Loading
@@ -5991,7 +6013,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void(*xEntryPoint)(void));
** unregistered and it returns 0 if X was not on the list of initialization
** routines.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
+SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
/*
** CAPI3REF: Reset Automatic Extension Loading
@@ -5999,7 +6021,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void(*xEntryPoint)(v
** ^This interface disables all automatic extensions previously
** registered using [sqlite3_auto_extension()].
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void);
+SQLITE_API void sqlite3_reset_auto_extension(void);
/*
** The interface to the virtual-table mechanism is currently considered
@@ -6153,13 +6175,15 @@ struct sqlite3_module {
** the xUpdate method are automatically rolled back by SQLite.
**
** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
-** structure for SQLite version 3.8.2. If a virtual table extension is
+** structure for SQLite [version 3.8.2] ([dateof:3.8.2]).
+** If a virtual table extension is
** used with an SQLite version earlier than 3.8.2, the results of attempting
** to read or write the estimatedRows field are undefined (but are likely
** to included crashing the application). The estimatedRows field should
** therefore only be used if [sqlite3_libversion_number()] returns a
** value greater than or equal to 3008002. Similarly, the idxFlags field
-** was added for version 3.9.0. It may therefore only be used if
+** was added for [version 3.9.0] ([dateof:3.9.0]).
+** It may therefore only be used if
** sqlite3_libversion_number() returns a value greater than or equal to
** 3009000.
*/
@@ -6244,13 +6268,13 @@ struct sqlite3_index_info {
** interface is equivalent to sqlite3_create_module_v2() with a NULL
** destructor.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module(
+SQLITE_API int sqlite3_create_module(
sqlite3 *db, /* SQLite connection to register module with */
const char *zName, /* Name of the module */
const sqlite3_module *p, /* Methods for the module */
void *pClientData /* Client data for xCreate/xConnect */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2(
+SQLITE_API int sqlite3_create_module_v2(
sqlite3 *db, /* SQLite connection to register module with */
const char *zName, /* Name of the module */
const sqlite3_module *p, /* Methods for the module */
@@ -6313,7 +6337,7 @@ struct sqlite3_vtab_cursor {
** to declare the format (the names and datatypes of the columns) of
** the virtual tables they implement.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL);
+SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
/*
** CAPI3REF: Overload A Function For A Virtual Table
@@ -6332,7 +6356,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL);
** purpose is to be a placeholder function that can be overloaded
** by a [virtual table].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
+SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
/*
** The interface to the virtual-table mechanism defined above (back up
@@ -6431,7 +6455,7 @@ typedef struct sqlite3_blob sqlite3_blob;
** To avoid a resource leak, every open [BLOB handle] should eventually
** be released by a call to [sqlite3_blob_close()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
+SQLITE_API int sqlite3_blob_open(
sqlite3*,
const char *zDb,
const char *zTable,
@@ -6464,7 +6488,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
**
** ^This function sets the database handle error code and message.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
+SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
/*
** CAPI3REF: Close A BLOB Handle
@@ -6487,7 +6511,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64)
** is passed a valid open blob handle, the values returned by the
** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *);
+SQLITE_API int sqlite3_blob_close(sqlite3_blob *);
/*
** CAPI3REF: Return The Size Of An Open BLOB
@@ -6503,7 +6527,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *);
** been closed by [sqlite3_blob_close()]. Passing any other pointer in
** to this routine results in undefined and probably undesirable behavior.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *);
+SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *);
/*
** CAPI3REF: Read Data From A BLOB Incrementally
@@ -6532,7 +6556,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *);
**
** See also: [sqlite3_blob_write()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
+SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
/*
** CAPI3REF: Write Data Into A BLOB Incrementally
@@ -6574,7 +6598,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N,
**
** See also: [sqlite3_blob_read()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
+SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
/*
** CAPI3REF: Virtual File System Objects
@@ -6605,9 +6629,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z,
** ^(If the default VFS is unregistered, another VFS is chosen as
** the default. The choice for the new VFS is arbitrary.)^
*/
-SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfsName);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
+SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName);
+SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
+SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
/*
** CAPI3REF: Mutexes
@@ -6723,11 +6747,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
**
** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()].
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*);
+SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int);
+SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*);
/*
** CAPI3REF: Mutex Methods Object
@@ -6837,8 +6861,8 @@ struct sqlite3_mutex_methods {
** interface should also return 1 when given a NULL pointer.
*/
#ifndef NDEBUG
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*);
#endif
/*
@@ -6857,7 +6881,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */
#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */
#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */
-#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */
+#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_randomness() */
#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */
#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */
#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */
@@ -6878,7 +6902,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
** ^If the [threading mode] is Single-thread or Multi-thread then this
** routine returns a NULL pointer.
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*);
+SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
/*
** CAPI3REF: Low-Level Control Of Database Files
@@ -6913,7 +6937,7 @@ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*);
**
** See also: [SQLITE_FCNTL_LOCKSTATE]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
+SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
/*
** CAPI3REF: Testing Interface
@@ -6932,7 +6956,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName
** Unlike most of the SQLite API, this function is not guaranteed to
** operate consistently from one release to the next.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
+SQLITE_API int sqlite3_test_control(int op, ...);
/*
** CAPI3REF: Testing Interface Operation Codes
@@ -6961,6 +6985,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17
#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */
+#define SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD 19
#define SQLITE_TESTCTRL_NEVER_CORRUPT 20
#define SQLITE_TESTCTRL_VDBE_COVERAGE 21
#define SQLITE_TESTCTRL_BYTEORDER 22
@@ -6995,8 +7020,8 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
**
** See also: [sqlite3_db_status()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
-SQLITE_API int SQLITE_STDCALL sqlite3_status64(
+SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
+SQLITE_API int sqlite3_status64(
int op,
sqlite3_int64 *pCurrent,
sqlite3_int64 *pHighwater,
@@ -7121,7 +7146,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_status64(
**
** See also: [sqlite3_status()] and [sqlite3_stmt_status()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
+SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
/*
** CAPI3REF: Status Parameters for database connections
@@ -7264,7 +7289,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
**
** See also: [sqlite3_status()] and [sqlite3_db_status()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
/*
** CAPI3REF: Status Parameters for prepared statements
@@ -7733,16 +7758,16 @@ typedef struct sqlite3_backup sqlite3_backup;
** same time as another thread is invoking sqlite3_backup_step() it is
** possible that they return invalid values.
*/
-SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
sqlite3 *pDest, /* Destination database handle */
const char *zDestName, /* Destination database name */
sqlite3 *pSource, /* Source database handle */
const char *zSourceName /* Source database name */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage);
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p);
/*
** CAPI3REF: Unlock Notification
@@ -7859,7 +7884,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p);
** the special "DROP TABLE/INDEX" case, the extended error code is just
** SQLITE_LOCKED.)^
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
+SQLITE_API int sqlite3_unlock_notify(
sqlite3 *pBlocked, /* Waiting connection */
void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */
void *pNotifyArg /* Argument to pass to xNotify */
@@ -7874,8 +7899,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
** strings in a case-independent fashion, using the same definition of "case
** independence" that SQLite uses internally when comparing identifiers.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *, const char *);
-SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);
+SQLITE_API int sqlite3_stricmp(const char *, const char *);
+SQLITE_API int sqlite3_strnicmp(const char *, const char *, int);
/*
** CAPI3REF: String Globbing
@@ -7892,7 +7917,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);
**
** See also: [sqlite3_strlike()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zStr);
+SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr);
/*
** CAPI3REF: String LIKE Matching
@@ -7915,7 +7940,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zSt
**
** See also: [sqlite3_strglob()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc);
+SQLITE_API int sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc);
/*
** CAPI3REF: Error Logging Interface
@@ -7938,7 +7963,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strlike(const char *zGlob, const char *zSt
** a few hundred characters, it will be truncated to the length of the
** buffer.
*/
-SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...);
+SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...);
/*
** CAPI3REF: Write-Ahead Log Commit Hook
@@ -7974,7 +7999,7 @@ SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...)
** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will
** overwrite any prior [sqlite3_wal_hook()] settings.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
+SQLITE_API void *sqlite3_wal_hook(
sqlite3*,
int(*)(void *,sqlite3*,const char*,int),
void*
@@ -8009,7 +8034,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
** is only necessary if the default setting is found to be suboptimal
** for a particular application.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
+SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
/*
** CAPI3REF: Checkpoint a database
@@ -8031,7 +8056,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
** start a callback but which do not need the full power (and corresponding
** complication) of [sqlite3_wal_checkpoint_v2()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
+SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
/*
** CAPI3REF: Checkpoint a database
@@ -8125,7 +8150,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zD
** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface
** from SQL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
+SQLITE_API int sqlite3_wal_checkpoint_v2(
sqlite3 *db, /* Database handle */
const char *zDb, /* Name of attached database (or NULL) */
int eMode, /* SQLITE_CHECKPOINT_* value */
@@ -8161,7 +8186,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options
** may be added in the future.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...);
+SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
/*
** CAPI3REF: Virtual Table Configuration Options
@@ -8214,7 +8239,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...);
** of the SQL statement that triggered the call to the [xUpdate] method of the
** [virtual table].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *);
+SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *);
/*
** CAPI3REF: Conflict resolution modes
@@ -8319,7 +8344,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *);
**
** See also: [sqlite3_stmt_scanstatus_reset()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
+SQLITE_API int sqlite3_stmt_scanstatus(
sqlite3_stmt *pStmt, /* Prepared statement for which info desired */
int idx, /* Index of loop to report on */
int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
@@ -8335,7 +8360,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
** This API is only available if the library is built with pre-processor
** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
+SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
/*
** CAPI3REF: Flush caches to disk mid-transaction
@@ -8367,7 +8392,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
** ^This function does not set the database handle error code or message
** returned by the [sqlite3_errcode()] and [sqlite3_errmsg()] functions.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3*);
+SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
/*
** CAPI3REF: The pre-update hook.
@@ -8447,7 +8472,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3*);
**
** See also: [sqlite3_update_hook()]
*/
-SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_preupdate_hook(
+SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook(
sqlite3 *db,
void(*xPreUpdate)(
void *pCtx, /* Copy of third arg to preupdate_hook() */
@@ -8460,10 +8485,10 @@ SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_preupdate_hook(
),
void*
);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_count(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_depth(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_count(sqlite3 *);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_depth(sqlite3 *);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
/*
** CAPI3REF: Low-level system error code
@@ -8475,7 +8500,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3
** called to get back the underlying "errno" that caused the problem, such
** as ENOSPC, EAUTH, EISDIR, and so forth.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_system_errno(sqlite3*);
+SQLITE_API int sqlite3_system_errno(sqlite3*);
/*
** CAPI3REF: Database Snapshot
@@ -8525,7 +8550,7 @@ typedef struct sqlite3_snapshot sqlite3_snapshot;
** The [sqlite3_snapshot_get()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_get(
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
sqlite3 *db,
const char *zSchema,
sqlite3_snapshot **ppSnapshot
@@ -8563,7 +8588,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_get(
** The [sqlite3_snapshot_open()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_open(
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open(
sqlite3 *db,
const char *zSchema,
sqlite3_snapshot *pSnapshot
@@ -8580,7 +8605,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_open(
** The [sqlite3_snapshot_free()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
-SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3_snapshot*);
+SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*);
/*
** CAPI3REF: Compare the ages of two snapshot handles.
@@ -8604,7 +8629,7 @@ SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3
** snapshot than P2, zero if the two handles refer to the same database
** snapshot, and a positive value if P1 is a newer snapshot than P2.
*/
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_cmp(
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
sqlite3_snapshot *p1,
sqlite3_snapshot *p2
);
@@ -8662,7 +8687,7 @@ typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info;
**
** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zGeom(... params ...)
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
+SQLITE_API int sqlite3_rtree_geometry_callback(
sqlite3 *db,
const char *zGeom,
int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*),
@@ -8688,7 +8713,7 @@ struct sqlite3_rtree_geometry {
**
** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zQueryFunc(... params ...)
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback(
+SQLITE_API int sqlite3_rtree_query_callback(
sqlite3 *db,
const char *zQueryFunc,
int (*xQueryFunc)(sqlite3_rtree_query_info*),
@@ -8900,7 +8925,7 @@ int sqlite3session_attach(
** CAPI3REF: Set a table filter on a Session Object.
**
** The second argument (xFilter) is the "filter callback". For changes to rows
-** in tables that are not attached to the Session oject, the filter is called
+** in tables that are not attached to the Session object, the filter is called
** to determine whether changes to the table's rows should be tracked or not.
** If xFilter returns 0, changes is not tracked. Note that once a table is
** attached, xFilter will not be called again.
@@ -9166,7 +9191,7 @@ int sqlite3session_isempty(sqlite3_session *pSession);
** [sqlite3changeset_invert()] functions, all changes within the changeset
** that apply to a single table are grouped together. This means that when
** an application iterates through a changeset using an iterator created by
-** this function, all changes that relate to a single table are visted
+** this function, all changes that relate to a single table are visited
** consecutively. There is no chance that the iterator will visit a change
** the applies to table X, then one for table Y, and then later on visit
** another change for table X.
@@ -9253,7 +9278,7 @@ int sqlite3changeset_op(
** 0x01 if the corresponding column is part of the tables primary key, or
** 0x00 if it is not.
**
-** If argumet pnCol is not NULL, then *pnCol is set to the number of columns
+** If argument pnCol is not NULL, then *pnCol is set to the number of columns
** in the table.
**
** If this function is called when the iterator does not point to a valid
@@ -9470,12 +9495,12 @@ int sqlite3changeset_concat(
/*
-** Changegroup handle.
+** CAPI3REF: Changegroup Handle
*/
typedef struct sqlite3_changegroup sqlite3_changegroup;
/*
-** CAPI3REF: Combine two or more changesets into a single changeset.
+** CAPI3REF: Create A New Changegroup Object
**
** An sqlite3_changegroup object is used to combine two or more changesets
** (or patchsets) into a single changeset (or patchset). A single changegroup
@@ -9512,6 +9537,8 @@ typedef struct sqlite3_changegroup sqlite3_changegroup;
int sqlite3changegroup_new(sqlite3_changegroup **pp);
/*
+** CAPI3REF: Add A Changeset To A Changegroup
+**
** Add all changes within the changeset (or patchset) in buffer pData (size
** nData bytes) to the changegroup.
**
@@ -9526,7 +9553,7 @@ int sqlite3changegroup_new(sqlite3_changegroup **pp);
** apply to the same row as a change already present in the changegroup if
** the two rows have the same primary key.
**
-** Changes to rows that that do not already appear in the changegroup are
+** Changes to rows that do not already appear in the changegroup are
** simply copied into it. Or, if both the new changeset and the changegroup
** contain changes that apply to a single row, the final contents of the
** changegroup depends on the type of each change, as follows:
@@ -9587,6 +9614,8 @@ int sqlite3changegroup_new(sqlite3_changegroup **pp);
int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
/*
+** CAPI3REF: Obtain A Composite Changeset From A Changegroup
+**
** Obtain a buffer containing a changeset (or patchset) representing the
** current contents of the changegroup. If the inputs to the changegroup
** were themselves changesets, the output is a changeset. Or, if the
@@ -9615,7 +9644,7 @@ int sqlite3changegroup_output(
);
/*
-** Delete a changegroup object.
+** CAPI3REF: Delete A Changegroup Object
*/
void sqlite3changegroup_delete(sqlite3_changegroup*);
@@ -11392,9 +11421,9 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*);
#define TK_LIMIT 129
#define TK_WHERE 130
#define TK_INTO 131
-#define TK_INTEGER 132
-#define TK_FLOAT 133
-#define TK_BLOB 134
+#define TK_FLOAT 132
+#define TK_BLOB 133
+#define TK_INTEGER 134
#define TK_VARIABLE 135
#define TK_CASE 136
#define TK_WHEN 137
@@ -11418,10 +11447,12 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*);
#define TK_UMINUS 155
#define TK_UPLUS 156
#define TK_REGISTER 157
-#define TK_ASTERISK 158
-#define TK_SPAN 159
-#define TK_SPACE 160
-#define TK_ILLEGAL 161
+#define TK_VECTOR 158
+#define TK_SELECT_COLUMN 159
+#define TK_ASTERISK 160
+#define TK_SPAN 161
+#define TK_SPACE 162
+#define TK_ILLEGAL 163
/* The token codes above must all fit in 8 bits */
#define TKFLG_MASK 0xff
@@ -11903,8 +11934,8 @@ struct BusyHandler {
#define SQLITE_WSD const
#define GLOBAL(t,v) (*(t*)sqlite3_wsd_find((void*)&(v), sizeof(v)))
#define sqlite3GlobalConfig GLOBAL(struct Sqlite3Config, sqlite3Config)
-SQLITE_API int SQLITE_STDCALL sqlite3_wsd_init(int N, int J);
-SQLITE_API void *SQLITE_STDCALL sqlite3_wsd_find(void *K, int L);
+SQLITE_API int sqlite3_wsd_init(int N, int J);
+SQLITE_API void *sqlite3_wsd_find(void *K, int L);
#else
#define SQLITE_WSD
#define GLOBAL(t,v) v
@@ -12080,7 +12111,9 @@ SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree*);
SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree*);
SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *, int, void(*)(void *));
SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *pBtree);
+#ifndef SQLITE_OMIT_SHARED_CACHE
SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *pBtree, int iTab, u8 isWriteLock);
+#endif
SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *, int, int);
SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *);
@@ -12283,8 +12316,10 @@ SQLITE_PRIVATE int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*);
SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*);
SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*);
+#ifndef SQLITE_OMIT_INCRBLOB
SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*);
SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *);
+#endif
SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *);
SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBt, int iVersion);
SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor*, unsigned int mask);
@@ -12445,7 +12480,6 @@ struct SubProgram {
int nOp; /* Elements in aOp[] */
int nMem; /* Number of memory cells required */
int nCsr; /* Number of cursors required */
- int nOnce; /* Number of OP_Once instructions */
void *token; /* id that may be used to recursive triggers */
SubProgram *pNext; /* Next sub-program already visited */
};
@@ -12562,13 +12596,13 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_NotExists 33 /* synopsis: intkey=r[P3] */
#define OP_IsNull 34 /* same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */
#define OP_NotNull 35 /* same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */
-#define OP_Ne 36 /* same as TK_NE, synopsis: if r[P1]!=r[P3] goto P2 */
-#define OP_Eq 37 /* same as TK_EQ, synopsis: if r[P1]==r[P3] goto P2 */
-#define OP_Gt 38 /* same as TK_GT, synopsis: if r[P1]>r[P3] goto P2 */
-#define OP_Le 39 /* same as TK_LE, synopsis: if r[P1]<=r[P3] goto P2 */
-#define OP_Lt 40 /* same as TK_LT, synopsis: if r[P1]<r[P3] goto P2 */
-#define OP_Ge 41 /* same as TK_GE, synopsis: if r[P1]>=r[P3] goto P2 */
-#define OP_Last 42
+#define OP_Ne 36 /* same as TK_NE, synopsis: IF r[P3]!=r[P1] */
+#define OP_Eq 37 /* same as TK_EQ, synopsis: IF r[P3]==r[P1] */
+#define OP_Gt 38 /* same as TK_GT, synopsis: IF r[P3]>r[P1] */
+#define OP_Le 39 /* same as TK_LE, synopsis: IF r[P3]<=r[P1] */
+#define OP_Lt 40 /* same as TK_LT, synopsis: IF r[P3]<r[P1] */
+#define OP_Ge 41 /* same as TK_GE, synopsis: IF r[P3]>=r[P1] */
+#define OP_ElseNotEq 42 /* same as TK_ESCAPE */
#define OP_BitAnd 43 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */
#define OP_BitOr 44 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */
#define OP_ShiftLeft 45 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<<r[P1] */
@@ -12579,115 +12613,116 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_Divide 50 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */
#define OP_Remainder 51 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */
#define OP_Concat 52 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */
-#define OP_SorterSort 53
+#define OP_Last 53
#define OP_BitNot 54 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */
-#define OP_Sort 55
-#define OP_Rewind 56
-#define OP_IdxLE 57 /* synopsis: key=r[P3@P4] */
-#define OP_IdxGT 58 /* synopsis: key=r[P3@P4] */
-#define OP_IdxLT 59 /* synopsis: key=r[P3@P4] */
-#define OP_IdxGE 60 /* synopsis: key=r[P3@P4] */
-#define OP_RowSetRead 61 /* synopsis: r[P3]=rowset(P1) */
-#define OP_RowSetTest 62 /* synopsis: if r[P3] in rowset(P1) goto P2 */
-#define OP_Program 63
-#define OP_FkIfZero 64 /* synopsis: if fkctr[P1]==0 goto P2 */
-#define OP_IfPos 65 /* synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
-#define OP_IfNotZero 66 /* synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2 */
-#define OP_DecrJumpZero 67 /* synopsis: if (--r[P1])==0 goto P2 */
-#define OP_IncrVacuum 68
-#define OP_VNext 69
-#define OP_Init 70 /* synopsis: Start at P2 */
-#define OP_Return 71
-#define OP_EndCoroutine 72
-#define OP_HaltIfNull 73 /* synopsis: if r[P3]=null halt */
-#define OP_Halt 74
-#define OP_Integer 75 /* synopsis: r[P2]=P1 */
-#define OP_Int64 76 /* synopsis: r[P2]=P4 */
-#define OP_String 77 /* synopsis: r[P2]='P4' (len=P1) */
-#define OP_Null 78 /* synopsis: r[P2..P3]=NULL */
-#define OP_SoftNull 79 /* synopsis: r[P1]=NULL */
-#define OP_Blob 80 /* synopsis: r[P2]=P4 (len=P1) */
-#define OP_Variable 81 /* synopsis: r[P2]=parameter(P1,P4) */
-#define OP_Move 82 /* synopsis: r[P2@P3]=r[P1@P3] */
-#define OP_Copy 83 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */
-#define OP_SCopy 84 /* synopsis: r[P2]=r[P1] */
-#define OP_IntCopy 85 /* synopsis: r[P2]=r[P1] */
-#define OP_ResultRow 86 /* synopsis: output=r[P1@P2] */
-#define OP_CollSeq 87
-#define OP_Function0 88 /* synopsis: r[P3]=func(r[P2@P5]) */
-#define OP_Function 89 /* synopsis: r[P3]=func(r[P2@P5]) */
-#define OP_AddImm 90 /* synopsis: r[P1]=r[P1]+P2 */
-#define OP_RealAffinity 91
-#define OP_Cast 92 /* synopsis: affinity(r[P1]) */
-#define OP_Permutation 93
-#define OP_Compare 94 /* synopsis: r[P1@P3] <-> r[P2@P3] */
-#define OP_Column 95 /* synopsis: r[P3]=PX */
-#define OP_Affinity 96 /* synopsis: affinity(r[P1@P2]) */
+#define OP_SorterSort 55
+#define OP_Sort 56
+#define OP_Rewind 57
+#define OP_IdxLE 58 /* synopsis: key=r[P3@P4] */
+#define OP_IdxGT 59 /* synopsis: key=r[P3@P4] */
+#define OP_IdxLT 60 /* synopsis: key=r[P3@P4] */
+#define OP_IdxGE 61 /* synopsis: key=r[P3@P4] */
+#define OP_RowSetRead 62 /* synopsis: r[P3]=rowset(P1) */
+#define OP_RowSetTest 63 /* synopsis: if r[P3] in rowset(P1) goto P2 */
+#define OP_Program 64
+#define OP_FkIfZero 65 /* synopsis: if fkctr[P1]==0 goto P2 */
+#define OP_IfPos 66 /* synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
+#define OP_IfNotZero 67 /* synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2 */
+#define OP_DecrJumpZero 68 /* synopsis: if (--r[P1])==0 goto P2 */
+#define OP_IncrVacuum 69
+#define OP_VNext 70
+#define OP_Init 71 /* synopsis: Start at P2 */
+#define OP_Return 72
+#define OP_EndCoroutine 73
+#define OP_HaltIfNull 74 /* synopsis: if r[P3]=null halt */
+#define OP_Halt 75
+#define OP_Integer 76 /* synopsis: r[P2]=P1 */
+#define OP_Int64 77 /* synopsis: r[P2]=P4 */
+#define OP_String 78 /* synopsis: r[P2]='P4' (len=P1) */
+#define OP_Null 79 /* synopsis: r[P2..P3]=NULL */
+#define OP_SoftNull 80 /* synopsis: r[P1]=NULL */
+#define OP_Blob 81 /* synopsis: r[P2]=P4 (len=P1) */
+#define OP_Variable 82 /* synopsis: r[P2]=parameter(P1,P4) */
+#define OP_Move 83 /* synopsis: r[P2@P3]=r[P1@P3] */
+#define OP_Copy 84 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */
+#define OP_SCopy 85 /* synopsis: r[P2]=r[P1] */
+#define OP_IntCopy 86 /* synopsis: r[P2]=r[P1] */
+#define OP_ResultRow 87 /* synopsis: output=r[P1@P2] */
+#define OP_CollSeq 88
+#define OP_Function0 89 /* synopsis: r[P3]=func(r[P2@P5]) */
+#define OP_Function 90 /* synopsis: r[P3]=func(r[P2@P5]) */
+#define OP_AddImm 91 /* synopsis: r[P1]=r[P1]+P2 */
+#define OP_RealAffinity 92
+#define OP_Cast 93 /* synopsis: affinity(r[P1]) */
+#define OP_Permutation 94
+#define OP_Compare 95 /* synopsis: r[P1@P3] <-> r[P2@P3] */
+#define OP_Column 96 /* synopsis: r[P3]=PX */
#define OP_String8 97 /* same as TK_STRING, synopsis: r[P2]='P4' */
-#define OP_MakeRecord 98 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
-#define OP_Count 99 /* synopsis: r[P2]=count() */
-#define OP_ReadCookie 100
-#define OP_SetCookie 101
-#define OP_ReopenIdx 102 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenRead 103 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenWrite 104 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenAutoindex 105 /* synopsis: nColumn=P2 */
-#define OP_OpenEphemeral 106 /* synopsis: nColumn=P2 */
-#define OP_SorterOpen 107
-#define OP_SequenceTest 108 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */
-#define OP_OpenPseudo 109 /* synopsis: P3 columns in r[P2] */
-#define OP_Close 110
-#define OP_ColumnsUsed 111
-#define OP_Sequence 112 /* synopsis: r[P2]=cursor[P1].ctr++ */
-#define OP_NewRowid 113 /* synopsis: r[P2]=rowid */
-#define OP_Insert 114 /* synopsis: intkey=r[P3] data=r[P2] */
-#define OP_InsertInt 115 /* synopsis: intkey=P3 data=r[P2] */
-#define OP_Delete 116
-#define OP_ResetCount 117
-#define OP_SorterCompare 118 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */
-#define OP_SorterData 119 /* synopsis: r[P2]=data */
-#define OP_RowKey 120 /* synopsis: r[P2]=key */
-#define OP_RowData 121 /* synopsis: r[P2]=data */
-#define OP_Rowid 122 /* synopsis: r[P2]=rowid */
-#define OP_NullRow 123
-#define OP_SorterInsert 124
-#define OP_IdxInsert 125 /* synopsis: key=r[P2] */
-#define OP_IdxDelete 126 /* synopsis: key=r[P2@P3] */
-#define OP_Seek 127 /* synopsis: Move P3 to P1.rowid */
-#define OP_IdxRowid 128 /* synopsis: r[P2]=rowid */
-#define OP_Destroy 129
-#define OP_Clear 130
-#define OP_ResetSorter 131
-#define OP_CreateIndex 132 /* synopsis: r[P2]=root iDb=P1 */
-#define OP_Real 133 /* same as TK_FLOAT, synopsis: r[P2]=P4 */
-#define OP_CreateTable 134 /* synopsis: r[P2]=root iDb=P1 */
-#define OP_ParseSchema 135
-#define OP_LoadAnalysis 136
-#define OP_DropTable 137
-#define OP_DropIndex 138
-#define OP_DropTrigger 139
-#define OP_IntegrityCk 140
-#define OP_RowSetAdd 141 /* synopsis: rowset(P1)=r[P2] */
-#define OP_Param 142
-#define OP_FkCounter 143 /* synopsis: fkctr[P1]+=P2 */
-#define OP_MemMax 144 /* synopsis: r[P1]=max(r[P1],r[P2]) */
-#define OP_OffsetLimit 145 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
-#define OP_AggStep0 146 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggStep 147 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggFinal 148 /* synopsis: accum=r[P1] N=P2 */
-#define OP_Expire 149
-#define OP_TableLock 150 /* synopsis: iDb=P1 root=P2 write=P3 */
-#define OP_VBegin 151
-#define OP_VCreate 152
-#define OP_VDestroy 153
-#define OP_VOpen 154
-#define OP_VColumn 155 /* synopsis: r[P3]=vcolumn(P2) */
-#define OP_VRename 156
-#define OP_Pagecount 157
-#define OP_MaxPgcnt 158
-#define OP_CursorHint 159
-#define OP_Noop 160
-#define OP_Explain 161
+#define OP_Affinity 98 /* synopsis: affinity(r[P1@P2]) */
+#define OP_MakeRecord 99 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
+#define OP_Count 100 /* synopsis: r[P2]=count() */
+#define OP_ReadCookie 101
+#define OP_SetCookie 102
+#define OP_ReopenIdx 103 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenRead 104 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenWrite 105 /* synopsis: root=P2 iDb=P3 */
+#define OP_OpenAutoindex 106 /* synopsis: nColumn=P2 */
+#define OP_OpenEphemeral 107 /* synopsis: nColumn=P2 */
+#define OP_SorterOpen 108
+#define OP_SequenceTest 109 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */
+#define OP_OpenPseudo 110 /* synopsis: P3 columns in r[P2] */
+#define OP_Close 111
+#define OP_ColumnsUsed 112
+#define OP_Sequence 113 /* synopsis: r[P2]=cursor[P1].ctr++ */
+#define OP_NewRowid 114 /* synopsis: r[P2]=rowid */
+#define OP_Insert 115 /* synopsis: intkey=r[P3] data=r[P2] */
+#define OP_InsertInt 116 /* synopsis: intkey=P3 data=r[P2] */
+#define OP_Delete 117
+#define OP_ResetCount 118
+#define OP_SorterCompare 119 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */
+#define OP_SorterData 120 /* synopsis: r[P2]=data */
+#define OP_RowKey 121 /* synopsis: r[P2]=key */
+#define OP_RowData 122 /* synopsis: r[P2]=data */
+#define OP_Rowid 123 /* synopsis: r[P2]=rowid */
+#define OP_NullRow 124
+#define OP_SorterInsert 125
+#define OP_IdxInsert 126 /* synopsis: key=r[P2] */
+#define OP_IdxDelete 127 /* synopsis: key=r[P2@P3] */
+#define OP_Seek 128 /* synopsis: Move P3 to P1.rowid */
+#define OP_IdxRowid 129 /* synopsis: r[P2]=rowid */
+#define OP_Destroy 130
+#define OP_Clear 131
+#define OP_Real 132 /* same as TK_FLOAT, synopsis: r[P2]=P4 */
+#define OP_ResetSorter 133
+#define OP_CreateIndex 134 /* synopsis: r[P2]=root iDb=P1 */
+#define OP_CreateTable 135 /* synopsis: r[P2]=root iDb=P1 */
+#define OP_ParseSchema 136
+#define OP_LoadAnalysis 137
+#define OP_DropTable 138
+#define OP_DropIndex 139
+#define OP_DropTrigger 140
+#define OP_IntegrityCk 141
+#define OP_RowSetAdd 142 /* synopsis: rowset(P1)=r[P2] */
+#define OP_Param 143
+#define OP_FkCounter 144 /* synopsis: fkctr[P1]+=P2 */
+#define OP_MemMax 145 /* synopsis: r[P1]=max(r[P1],r[P2]) */
+#define OP_OffsetLimit 146 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
+#define OP_AggStep0 147 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggStep 148 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggFinal 149 /* synopsis: accum=r[P1] N=P2 */
+#define OP_Expire 150
+#define OP_TableLock 151 /* synopsis: iDb=P1 root=P2 write=P3 */
+#define OP_VBegin 152
+#define OP_VCreate 153
+#define OP_VDestroy 154
+#define OP_VOpen 155
+#define OP_VColumn 156 /* synopsis: r[P3]=vcolumn(P2) */
+#define OP_VRename 157
+#define OP_Pagecount 158
+#define OP_MaxPgcnt 159
+#define OP_CursorHint 160
+#define OP_Noop 161
+#define OP_Explain 162
/* Properties such as "out2" or "jump" that are specified in
** comments following the "case" for each opcode in the vdbe.c
@@ -12707,20 +12742,20 @@ typedef struct VdbeOpList VdbeOpList;
/* 32 */ 0x09, 0x09, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\
/* 40 */ 0x0b, 0x0b, 0x01, 0x26, 0x26, 0x26, 0x26, 0x26,\
/* 48 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x01, 0x12, 0x01,\
-/* 56 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x23, 0x0b, 0x01,\
-/* 64 */ 0x01, 0x03, 0x03, 0x03, 0x01, 0x01, 0x01, 0x02,\
-/* 72 */ 0x02, 0x08, 0x00, 0x10, 0x10, 0x10, 0x10, 0x00,\
-/* 80 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\
-/* 88 */ 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x00,\
-/* 96 */ 0x00, 0x10, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00,\
+/* 56 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x23, 0x0b,\
+/* 64 */ 0x01, 0x01, 0x03, 0x03, 0x03, 0x01, 0x01, 0x01,\
+/* 72 */ 0x02, 0x02, 0x08, 0x00, 0x10, 0x10, 0x10, 0x10,\
+/* 80 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00,\
+/* 88 */ 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00,\
+/* 96 */ 0x00, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\
/* 104 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 112 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 120 */ 0x00, 0x00, 0x10, 0x00, 0x04, 0x04, 0x00, 0x00,\
-/* 128 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00,\
-/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10, 0x00,\
-/* 144 */ 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\
-/* 160 */ 0x00, 0x00,}
+/* 112 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 120 */ 0x00, 0x00, 0x00, 0x10, 0x00, 0x04, 0x04, 0x00,\
+/* 128 */ 0x00, 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10,\
+/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10,\
+/* 144 */ 0x00, 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,\
+/* 160 */ 0x00, 0x00, 0x00,}
/* The sqlite3P2Values() routine is able to run faster if it knows
** the value of the largest JUMP opcode. The smaller the maximum
@@ -12728,7 +12763,7 @@ typedef struct VdbeOpList VdbeOpList;
** generated this include file strives to group all JUMP opcodes
** together near the beginning of the list.
*/
-#define SQLITE_MX_JUMP_OPCODE 70 /* Maximum JUMP opcode */
+#define SQLITE_MX_JUMP_OPCODE 71 /* Maximum JUMP opcode */
/************** End of opcodes.h *********************************************/
/************** Continuing where we left off in vdbe.h ***********************/
@@ -13061,10 +13096,13 @@ SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager);
SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager);
SQLITE_PRIVATE int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen);
SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager);
+SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager);
# ifdef SQLITE_ENABLE_SNAPSHOT
SQLITE_PRIVATE int sqlite3PagerSnapshotGet(Pager *pPager, sqlite3_snapshot **ppSnapshot);
SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager *pPager, sqlite3_snapshot *pSnapshot);
# endif
+#else
+# define sqlite3PagerUseWal(x) 0
#endif
#ifdef SQLITE_ENABLE_ZIPVFS
@@ -13697,7 +13735,7 @@ SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *);
** databases may be attached.
*/
struct Db {
- char *zName; /* Name of this database */
+ char *zDbSName; /* Name of this database. (schema name, not filename) */
Btree *pBt; /* The B*Tree structure for this database file */
u8 safety_level; /* How aggressive at syncing data to disk */
u8 bSyncSet; /* True if "PRAGMA synchronous=N" has been run */
@@ -14333,6 +14371,7 @@ struct CollSeq {
** operator is NULL. It is added to certain comparison operators to
** prove that the operands are always NOT NULL.
*/
+#define SQLITE_KEEPNULL 0x08 /* Used by vector == or <> */
#define SQLITE_JUMPIFNULL 0x10 /* jumps if either operand is NULL */
#define SQLITE_STOREP2 0x20 /* Store result in reg[P2] rather than jump */
#define SQLITE_NULLEQ 0x80 /* NULL=NULL */
@@ -14897,9 +14936,11 @@ struct Expr {
int iTable; /* TK_COLUMN: cursor number of table holding column
** TK_REGISTER: register number
** TK_TRIGGER: 1 -> new, 0 -> old
- ** EP_Unlikely: 134217728 times likelihood */
+ ** EP_Unlikely: 134217728 times likelihood
+ ** TK_SELECT: 1st register of result vector */
ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid.
- ** TK_VARIABLE: variable number (always >= 1). */
+ ** TK_VARIABLE: variable number (always >= 1).
+ ** TK_SELECT_COLUMN: column of the result vector */
i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */
i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */
u8 op2; /* TK_REGISTER: original value of Expr.op
@@ -14935,6 +14976,7 @@ struct Expr {
#define EP_CanBeNull 0x100000 /* Can be null despite NOT NULL constraint */
#define EP_Subquery 0x200000 /* Tree contains a TK_SELECT operator */
#define EP_Alias 0x400000 /* Is an alias for a result set column */
+#define EP_Leaf 0x800000 /* Expr.pLeft, .pRight, .u.pSelect all NULL */
/*
** Combinations of two or more EP_* flags
@@ -15380,7 +15422,7 @@ struct Select {
*/
struct SelectDest {
u8 eDest; /* How to dispose of the results. On of SRT_* above. */
- char affSdst; /* Affinity used when eDest==SRT_Set */
+ char *zAffSdst; /* Affinity used when eDest==SRT_Set */
int iSDParm; /* A parameter used by the eDest disposal method */
int iSdst; /* Base register where results are written */
int nSdst; /* Number of registers allocated */
@@ -15486,36 +15528,23 @@ struct Parse {
u8 okConstFactor; /* OK to factor out constants */
u8 disableLookaside; /* Number of times lookaside has been disabled */
u8 nColCache; /* Number of entries in aColCache[] */
- int aTempReg[8]; /* Holding area for temporary registers */
int nRangeReg; /* Size of the temporary register block */
int iRangeReg; /* First register in temporary register block */
int nErr; /* Number of errors seen */
int nTab; /* Number of previously allocated VDBE cursors */
int nMem; /* Number of memory cells used so far */
- int nSet; /* Number of sets used so far */
- int nOnce; /* Number of OP_Once instructions so far */
int nOpAlloc; /* Number of slots allocated for Vdbe.aOp[] */
int szOpAlloc; /* Bytes of memory space allocated for Vdbe.aOp[] */
- int iFixedOp; /* Never back out opcodes iFixedOp-1 or earlier */
int ckBase; /* Base register of data during check constraints */
int iSelfTab; /* Table of an index whose exprs are being coded */
int iCacheLevel; /* ColCache valid when aColCache[].iLevel<=iCacheLevel */
int iCacheCnt; /* Counter used to generate aColCache[].lru values */
int nLabel; /* Number of labels used */
int *aLabel; /* Space to hold the labels */
- struct yColCache {
- int iTable; /* Table cursor number */
- i16 iColumn; /* Table column number */
- u8 tempReg; /* iReg is a temp register that needs to be freed */
- int iLevel; /* Nesting level */
- int iReg; /* Reg with value of this column. 0 means none. */
- int lru; /* Least recently used entry has the smallest value */
- } aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */
ExprList *pConstExpr;/* Constant expressions */
Token constraintName;/* Name of the constraint currently being parsed */
yDbMask writeMask; /* Start a write transaction on these databases */
yDbMask cookieMask; /* Bitmask of schema verified databases */
- int cookieValue[SQLITE_MAX_ATTACHED+2]; /* Values of cookies to verify */
int regRowid; /* Register holding rowid of CREATE TABLE entry */
int regRoot; /* Register holding root page number for new objects */
int nMaxArg; /* Max args passed to user function by sub-program */
@@ -15528,8 +15557,6 @@ struct Parse {
TableLock *aTableLock; /* Required table locks for shared-cache mode */
#endif
AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */
-
- /* Information used while coding trigger programs. */
Parse *pToplevel; /* Parse structure for main program (or NULL) */
Table *pTriggerTab; /* Table triggers are being coded for */
int addrCrTab; /* Address of OP_CreateTable opcode on CREATE TABLE */
@@ -15540,6 +15567,25 @@ struct Parse {
u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */
u8 disableTriggers; /* True to disable triggers */
+ /**************************************************************************
+ ** Fields above must be initialized to zero. The fields that follow,
+ ** down to the beginning of the recursive section, do not need to be
+ ** initialized as they will be set before being used. The boundary is
+ ** determined by offsetof(Parse,aColCache).
+ **************************************************************************/
+
+ struct yColCache {
+ int iTable; /* Table cursor number */
+ i16 iColumn; /* Table column number */
+ u8 tempReg; /* iReg is a temp register that needs to be freed */
+ int iLevel; /* Nesting level */
+ int iReg; /* Reg with value of this column. 0 means none. */
+ int lru; /* Least recently used entry has the smallest value */
+ } aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */
+ int aTempReg[8]; /* Holding area for temporary registers */
+ Token sNameToken; /* Token with unqualified schema object name */
+ Token sLastToken; /* The last token parsed */
+
/************************************************************************
** Above is constant between recursions. Below is reset before and after
** each recursion. The boundary between these two regions is determined
@@ -15555,7 +15601,6 @@ struct Parse {
u8 declareVtab; /* True if inside sqlite3_declare_vtab() */
int nVtabLock; /* Number of virtual tables to lock */
#endif
- int nAlias; /* Number of aliased result set columns */
int nHeight; /* Expression tree height of current sub-select */
#ifndef SQLITE_OMIT_EXPLAIN
int iSelectId; /* ID of current select for EXPLAIN output */
@@ -15567,8 +15612,6 @@ struct Parse {
Table *pNewTable; /* A table being constructed by CREATE TABLE */
Trigger *pNewTrigger; /* Trigger under construct by a CREATE TRIGGER */
const char *zAuthContext; /* The 6th parameter to db->xAuth callbacks */
- Token sNameToken; /* Token with unqualified schema object name */
- Token sLastToken; /* The last token parsed */
#ifndef SQLITE_OMIT_VIRTUALTABLE
Token sArg; /* Complete text of a module argument */
Table **apVtabLock; /* Pointer to virtual tables needing locking */
@@ -15580,6 +15623,14 @@ struct Parse {
};
/*
+** Sizes and pointers of various parts of the Parse object.
+*/
+#define PARSE_HDR_SZ offsetof(Parse,aColCache) /* Recursive part w/o aColCache*/
+#define PARSE_RECURSE_SZ offsetof(Parse,nVar) /* Recursive part */
+#define PARSE_TAIL_SZ (sizeof(Parse)-PARSE_RECURSE_SZ) /* Non-recursive part */
+#define PARSE_TAIL(X) (((char*)(X))+PARSE_RECURSE_SZ) /* Pointer to tail */
+
+/*
** Return true if currently inside an sqlite3_declare_vtab() call.
*/
#ifdef SQLITE_OMIT_VIRTUALTABLE
@@ -15827,6 +15878,7 @@ struct Sqlite3Config {
int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */
#endif
int bLocaltimeFault; /* True to fail localtime() calls */
+ int iOnceResetThreshold; /* When to reset OP_Once counters */
};
/*
@@ -16112,6 +16164,7 @@ SQLITE_PRIVATE void *sqlite3TestTextToPtr(const char*);
#if defined(SQLITE_DEBUG)
SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView*, const Expr*, u8);
+SQLITE_PRIVATE void sqlite3TreeViewBareExprList(TreeView*, const ExprList*, const char*);
SQLITE_PRIVATE void sqlite3TreeViewExprList(TreeView*, const ExprList*, u8, const char*);
SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView*, const Select*, u8);
SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView*, const With*, u8);
@@ -16140,9 +16193,10 @@ SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*, const Token*);
SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*);
SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*);
SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*);
-SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*);
+SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32);
SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*);
SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*);
+SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(Parse*,ExprList*,IdList*,Expr*);
SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int);
SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,Token*,int);
SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,ExprSpan*);
@@ -16178,7 +16232,6 @@ SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u8,Select*);
SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*,
sqlite3_vfs**,char**,char **);
SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3*,const char*);
-SQLITE_PRIVATE int sqlite3CodeOnce(Parse *);
#ifdef SQLITE_OMIT_BUILTIN_TEST
# define sqlite3FaultSim(X) SQLITE_OK
@@ -16301,8 +16354,8 @@ SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,struct SrcList_ite
SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*);
SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*);
SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*);
-SQLITE_PRIVATE void sqlite3Vacuum(Parse*);
-SQLITE_PRIVATE int sqlite3RunVacuum(char**, sqlite3*);
+SQLITE_PRIVATE void sqlite3Vacuum(Parse*,Token*);
+SQLITE_PRIVATE int sqlite3RunVacuum(char**, sqlite3*, int);
SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3*, Token*);
SQLITE_PRIVATE int sqlite3ExprCompare(Expr*, Expr*, int);
SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList*, ExprList*, int);
@@ -16478,6 +16531,7 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3*, Index*);
SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int);
SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2);
SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity);
+SQLITE_PRIVATE char sqlite3TableColumnAffinity(Table*,int);
SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr);
SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8);
SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*);
@@ -16543,7 +16597,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable(Parse*, SrcList*, Token*);
SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *, int *);
SQLITE_PRIVATE void sqlite3NestedParse(Parse*, const char*, ...);
SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*);
-SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *, Expr *, int, int);
+SQLITE_PRIVATE int sqlite3CodeSubselect(Parse*, Expr *, int, int);
SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*);
SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p);
SQLITE_PRIVATE int sqlite3MatchSpanName(const char*, const char*, const char*, const char*);
@@ -16598,12 +16652,20 @@ SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *, SrcList *, int, int);
SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *);
SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *, Pgno, const u8 *);
+#ifndef SQLITE_OMIT_SUBQUERY
+SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse*, Expr*);
+#else
+# define sqlite3ExprCheckIN(x,y) SQLITE_OK
+#endif
+
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void);
-SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(Parse*,Index*,UnpackedRecord**,Expr*,u8,int,int*);
+SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(
+ Parse*,Index*,UnpackedRecord**,Expr*,int,int,int*);
SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr(Parse*, Expr*, u8, sqlite3_value**);
SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord*);
SQLITE_PRIVATE int sqlite3Stat4Column(sqlite3*, const void*, int, int, sqlite3_value**);
+SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3*, Index*, int);
#endif
/*
@@ -16756,7 +16818,7 @@ SQLITE_PRIVATE void sqlite3EndBenignMalloc(void);
#define IN_INDEX_NOOP_OK 0x0001 /* OK to return IN_INDEX_NOOP */
#define IN_INDEX_MEMBERSHIP 0x0002 /* IN operator used for membership test */
#define IN_INDEX_LOOP 0x0004 /* IN operator used as a loop */
-SQLITE_PRIVATE int sqlite3FindInIndex(Parse *, Expr *, u32, int*);
+SQLITE_PRIVATE int sqlite3FindInIndex(Parse *, Expr *, u32, int*, int*);
SQLITE_PRIVATE int sqlite3JournalOpen(sqlite3_vfs *, const char *, sqlite3_file *, int, int);
SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *);
@@ -16861,6 +16923,11 @@ SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread*, void**);
SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3*);
#endif
+SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr);
+SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr);
+SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr*, int);
+SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(Parse*,Expr*,int);
+
#endif /* SQLITEINT_H */
/************** End of sqliteInt.h *******************************************/
@@ -16946,16 +17013,13 @@ SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = {
**
** (x & ~(map[x]&0x20))
**
-** Standard function tolower() is implemented using the sqlite3UpperToLower[]
+** The equivalent of tolower() is implemented using the sqlite3UpperToLower[]
** array. tolower() is used more often than toupper() by SQLite.
**
-** Bit 0x40 is set if the character non-alphanumeric and can be used in an
+** Bit 0x40 is set if the character is non-alphanumeric and can be used in an
** SQLite identifier. Identifiers are alphanumerics, "_", "$", and any
** non-ASCII UTF character. Hence the test for whether or not a character is
** part of an identifier is 0x46.
-**
-** SQLite's versions are identical to the standard versions assuming a
-** locale of "C". They are implemented as macros in sqliteInt.h.
*/
#ifdef SQLITE_ASCII
SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = {
@@ -17028,7 +17092,7 @@ SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = {
#endif
/* Statement journals spill to disk when their size exceeds the following
-** threashold (in bytes). 0 means that statement journals are created and
+** threshold (in bytes). 0 means that statement journals are created and
** written to disk immediately (the default behavior for SQLite versions
** before 3.12.0). -1 means always keep the entire statement journal in
** memory. (The statement journal is also always held entirely in memory
@@ -17092,7 +17156,8 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
#ifndef SQLITE_OMIT_BUILTIN_TEST
0, /* xTestCallback */
#endif
- 0 /* bLocaltimeFault */
+ 0, /* bLocaltimeFault */
+ 0x7ffffffe /* iOnceResetThreshold */
};
/*
@@ -17115,7 +17180,7 @@ SQLITE_PRIVATE const Token sqlite3IntTokens[] = {
** The value of the "pending" byte must be 0x40000000 (1 byte past the
** 1-gibabyte boundary) in a compatible database. SQLite never uses
** the database page that contains the pending byte. It never attempts
-** to read or write that page. The pending byte page is set assign
+** to read or write that page. The pending byte page is set aside
** for use by the VFS layers as space for managing file locks.
**
** During testing, it is often desirable to move the pending byte to
@@ -17564,7 +17629,7 @@ static const char * const azCompileOpt[] = {
** The name can optionally begin with "SQLITE_" but the "SQLITE_" prefix
** is not required for a match.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName){
+SQLITE_API int sqlite3_compileoption_used(const char *zOptName){
int i, n;
#if SQLITE_ENABLE_API_ARMOR
@@ -17592,7 +17657,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName){
** Return the N-th compile-time option string. If N is out of range,
** return a NULL pointer.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N){
+SQLITE_API const char *sqlite3_compileoption_get(int N){
if( N>=0 && N<ArraySize(azCompileOpt) ){
return azCompileOpt[N];
}
@@ -17675,9 +17740,6 @@ typedef unsigned Bool;
/* Opaque type used by code in vdbesort.c */
typedef struct VdbeSorter VdbeSorter;
-/* Opaque type used by the explainer */
-typedef struct Explain Explain;
-
/* Elements of the linked list at Vdbe.pAuxData */
typedef struct AuxData AuxData;
@@ -17752,6 +17814,12 @@ struct VdbeCursor {
** aType[] and nField+1 array slots for aOffset[] */
};
+
+/*
+** A value for VdbeCursor.cacheStatus that means the cache is always invalid.
+*/
+#define CACHE_STALE 0
+
/*
** When a sub-program is executed (OP_Program), a structure of this type
** is allocated to store the current value of the program counter, as
@@ -17780,7 +17848,6 @@ struct VdbeFrame {
Op *aOp; /* Program instructions for parent frame */
i64 *anExec; /* Event counters from parent frame */
Mem *aMem; /* Array of memory cells for parent frame */
- u8 *aOnceFlag; /* Array of OP_Once flags for parent frame */
VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */
void *token; /* Copy of SubProgram.token */
i64 lastRowid; /* Last insert rowid (sqlite3.lastRowid) */
@@ -17789,7 +17856,6 @@ struct VdbeFrame {
int pc; /* Program Counter in parent (calling) frame */
int nOp; /* Size of aOp array */
int nMem; /* Number of entries in aMem */
- int nOnceFlag; /* Number of entries in aOnceFlag */
int nChildMem; /* Number of memory cells for child frame */
int nChildCsr; /* Number of cursors for child frame */
int nChange; /* Statement changes (Vdbe.nChange) */
@@ -17799,11 +17865,6 @@ struct VdbeFrame {
#define VdbeFrameMem(p) ((Mem *)&((u8 *)p)[ROUND8(sizeof(VdbeFrame))])
/*
-** A value for VdbeCursor.cacheValid that means the cache is always invalid.
-*/
-#define CACHE_STALE 0
-
-/*
** Internally, the vdbe manipulates nearly all SQL values as Mem
** structures. Each Mem struct may cache multiple representations (string,
** integer etc.) of the same value.
@@ -17943,18 +18004,6 @@ struct sqlite3_context {
sqlite3_value *argv[1]; /* Argument set */
};
-/*
-** An Explain object accumulates indented output which is helpful
-** in describing recursive data structures.
-*/
-struct Explain {
- Vdbe *pVdbe; /* Attach the explanation to this Vdbe */
- StrAccum str; /* The string being accumulated */
- int nIndent; /* Number of elements in aIndent */
- u16 aIndent[100]; /* Levels of indentation */
- char zBase[100]; /* Initial space */
-};
-
/* A bitfield type for use inside of structures. Always follow with :N where
** N is the number of bits.
*/
@@ -17979,34 +18028,47 @@ struct ScanStatus {
*/
struct Vdbe {
sqlite3 *db; /* The database connection that owns this statement */
+ Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */
+ Parse *pParse; /* Parsing context used to create this Vdbe */
+ ynVar nVar; /* Number of entries in aVar[] */
+ ynVar nzVar; /* Number of entries in azVar[] */
+ u32 magic; /* Magic number for sanity checking */
+ int nMem; /* Number of memory locations currently allocated */
+ int nCursor; /* Number of slots in apCsr[] */
+ u32 cacheCtr; /* VdbeCursor row cache generation counter */
+ int pc; /* The program counter */
+ int rc; /* Value to return */
+ int nChange; /* Number of db changes made since last reset */
+ int iStatement; /* Statement number (or 0 if has not opened stmt) */
+ i64 iCurrentTime; /* Value of julianday('now') for this statement */
+ i64 nFkConstraint; /* Number of imm. FK constraints this VM */
+ i64 nStmtDefCons; /* Number of def. constraints when stmt started */
+ i64 nStmtDefImmCons; /* Number of def. imm constraints when stmt started */
+
+ /* When allocating a new Vdbe object, all of the fields below should be
+ ** initialized to zero or NULL */
+
Op *aOp; /* Space to hold the virtual machine's program */
Mem *aMem; /* The memory locations */
Mem **apArg; /* Arguments to currently executing user function */
Mem *aColName; /* Column names to return */
Mem *pResultSet; /* Pointer to an array of results */
- Parse *pParse; /* Parsing context used to create this Vdbe */
- int nMem; /* Number of memory locations currently allocated */
- int nOp; /* Number of instructions in the program */
- int nCursor; /* Number of slots in apCsr[] */
- u32 magic; /* Magic number for sanity checking */
char *zErrMsg; /* Error message written here */
- Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */
VdbeCursor **apCsr; /* One element of this array for each open cursor */
Mem *aVar; /* Values for the OP_Variable opcode. */
char **azVar; /* Name of variables */
- ynVar nVar; /* Number of entries in aVar[] */
- ynVar nzVar; /* Number of entries in azVar[] */
- u32 cacheCtr; /* VdbeCursor row cache generation counter */
- int pc; /* The program counter */
- int rc; /* Value to return */
+#ifndef SQLITE_OMIT_TRACE
+ i64 startTime; /* Time when query started - used for profiling */
+#endif
+ int nOp; /* Number of instructions in the program */
#ifdef SQLITE_DEBUG
int rcApp; /* errcode set by sqlite3_result_error_code() */
#endif
u16 nResColumn; /* Number of columns in one row of the result set */
u8 errorAction; /* Recovery action to do in case of an error */
+ u8 minWriteFileFormat; /* Minimum file format for writable database files */
bft expired:1; /* True if the VM needs to be recompiled */
bft doingRerun:1; /* True if rerunning after an auto-reprepare */
- u8 minWriteFileFormat; /* Minimum file format for writable database files */
bft explain:2; /* True if EXPLAIN present on SQL command */
bft changeCntOn:1; /* True to update the change-counter */
bft runOnlyOnce:1; /* Automatically expire on reset */
@@ -18014,18 +18076,9 @@ struct Vdbe {
bft readOnly:1; /* True for statements that do not write */
bft bIsReader:1; /* True for statements that read */
bft isPrepareV2:1; /* True if prepared with prepare_v2() */
- int nChange; /* Number of db changes made since last reset */
yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */
yDbMask lockMask; /* Subset of btreeMask that requires a lock */
- int iStatement; /* Statement number (or 0 if has not opened stmt) */
u32 aCounter[5]; /* Counters used by sqlite3_stmt_status() */
-#ifndef SQLITE_OMIT_TRACE
- i64 startTime; /* Time when query started - used for profiling */
-#endif
- i64 iCurrentTime; /* Value of julianday('now') for this statement */
- i64 nFkConstraint; /* Number of imm. FK constraints this VM */
- i64 nStmtDefCons; /* Number of def. constraints when stmt started */
- i64 nStmtDefImmCons; /* Number of def. imm constraints when stmt started */
char *zSql; /* Text of the SQL statement that generated this */
void *pFree; /* Free this when deleting the vdbe */
VdbeFrame *pFrame; /* Parent frame */
@@ -18033,8 +18086,6 @@ struct Vdbe {
int nFrame; /* Number of frames in pFrame list */
u32 expmask; /* Binding to these vars invalidates VM */
SubProgram *pProgram; /* Linked list of all sub-programs used by VM */
- int nOnceFlag; /* Size of array aOnceFlag[] */
- u8 *aOnceFlag; /* Flags for OP_Once */
AuxData *pAuxData; /* Linked list of auxdata allocations */
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
i64 *anExec; /* Number of times each op has been executed */
@@ -18046,10 +18097,11 @@ struct Vdbe {
/*
** The following are allowed values for Vdbe.magic
*/
-#define VDBE_MAGIC_INIT 0x26bceaa5 /* Building a VDBE program */
-#define VDBE_MAGIC_RUN 0xbdf20da3 /* VDBE is ready to execute */
-#define VDBE_MAGIC_HALT 0x519c2973 /* VDBE has completed execution */
-#define VDBE_MAGIC_DEAD 0xb606c3c8 /* The VDBE has been deallocated */
+#define VDBE_MAGIC_INIT 0x16bceaa5 /* Building a VDBE program */
+#define VDBE_MAGIC_RUN 0x2df20da3 /* VDBE is ready to execute */
+#define VDBE_MAGIC_HALT 0x319c2973 /* VDBE has completed execution */
+#define VDBE_MAGIC_RESET 0x48fa9f76 /* Reset and ready to run again */
+#define VDBE_MAGIC_DEAD 0x5606c3c8 /* The VDBE has been deallocated */
/*
** Structure used to store the context required by the
@@ -18066,8 +18118,8 @@ struct PreUpdate {
int iNewReg; /* Register for new.* values */
i64 iKey1; /* First key value passed to hook */
i64 iKey2; /* Second key value passed to hook */
- int iPKey; /* If not negative index of IPK column */
Mem *aNew; /* Array of new.* values */
+ Table *pTab; /* Schema object being upated */
};
/*
@@ -18302,7 +18354,7 @@ SQLITE_PRIVATE void sqlite3StatusHighwater(int op, int X){
/*
** Query status information.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_status64(
+SQLITE_API int sqlite3_status64(
int op,
sqlite3_int64 *pCurrent,
sqlite3_int64 *pHighwater,
@@ -18327,7 +18379,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_status64(
(void)pMutex; /* Prevent warning when SQLITE_THREADSAFE=0 */
return SQLITE_OK;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){
+SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){
sqlite3_int64 iCur = 0, iHwtr = 0;
int rc;
#ifdef SQLITE_ENABLE_API_ARMOR
@@ -18344,7 +18396,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwa
/*
** Query status information for a single database connection
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
+SQLITE_API int sqlite3_db_status(
sqlite3 *db, /* The database connection whose status is desired */
int op, /* Status verb */
int *pCurrent, /* Write current value here */
@@ -20021,7 +20073,7 @@ static sqlite3_vfs * SQLITE_WSD vfsList = 0;
** Locate a VFS by name. If no name is given, simply return the
** first VFS on the list.
*/
-SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfs){
+SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfs){
sqlite3_vfs *pVfs = 0;
#if SQLITE_THREADSAFE
sqlite3_mutex *mutex;
@@ -20067,7 +20119,7 @@ static void vfsUnlink(sqlite3_vfs *pVfs){
** VFS multiple times. The new VFS becomes the default if makeDflt is
** true.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){
+SQLITE_API int sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){
MUTEX_LOGIC(sqlite3_mutex *mutex;)
#ifndef SQLITE_OMIT_AUTOINIT
int rc = sqlite3_initialize();
@@ -20095,7 +20147,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDf
/*
** Unregister a VFS so that it is no longer accessible.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs *pVfs){
+SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){
#if SQLITE_THREADSAFE
sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
#endif
@@ -22446,7 +22498,7 @@ SQLITE_PRIVATE int sqlite3MutexEnd(void){
/*
** Retrieve a pointer to a static mutex or allocate a new dynamic one.
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int id){
+SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int id){
#ifndef SQLITE_OMIT_AUTOINIT
if( id<=SQLITE_MUTEX_RECURSIVE && sqlite3_initialize() ) return 0;
if( id>SQLITE_MUTEX_RECURSIVE && sqlite3MutexInit() ) return 0;
@@ -22467,7 +22519,7 @@ SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int id){
/*
** Free a dynamic mutex.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex *p){
+SQLITE_API void sqlite3_mutex_free(sqlite3_mutex *p){
if( p ){
assert( sqlite3GlobalConfig.mutex.xMutexFree );
sqlite3GlobalConfig.mutex.xMutexFree(p);
@@ -22478,7 +22530,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex *p){
** Obtain the mutex p. If some other thread already has the mutex, block
** until it can be obtained.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex *p){
+SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex *p){
if( p ){
assert( sqlite3GlobalConfig.mutex.xMutexEnter );
sqlite3GlobalConfig.mutex.xMutexEnter(p);
@@ -22489,7 +22541,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex *p){
** Obtain the mutex p. If successful, return SQLITE_OK. Otherwise, if another
** thread holds the mutex and it cannot be obtained, return SQLITE_BUSY.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex *p){
+SQLITE_API int sqlite3_mutex_try(sqlite3_mutex *p){
int rc = SQLITE_OK;
if( p ){
assert( sqlite3GlobalConfig.mutex.xMutexTry );
@@ -22504,7 +22556,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex *p){
** is not currently entered. If a NULL pointer is passed as an argument
** this function is a no-op.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex *p){
+SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex *p){
if( p ){
assert( sqlite3GlobalConfig.mutex.xMutexLeave );
sqlite3GlobalConfig.mutex.xMutexLeave(p);
@@ -22516,11 +22568,11 @@ SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex *p){
** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
** intended for use inside assert() statements.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex *p){
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex *p){
assert( p==0 || sqlite3GlobalConfig.mutex.xMutexHeld );
return p==0 || sqlite3GlobalConfig.mutex.xMutexHeld(p);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex *p){
+SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex *p){
assert( p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld );
return p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld(p);
}
@@ -23552,8 +23604,8 @@ static int winMutex_isNt = -1; /* <0 means "need to query" */
*/
static LONG SQLITE_WIN32_VOLATILE winMutex_lock = 0;
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_is_nt(void); /* os_win.c */
-SQLITE_API void SQLITE_STDCALL sqlite3_win32_sleep(DWORD milliseconds); /* os_win.c */
+SQLITE_API int sqlite3_win32_is_nt(void); /* os_win.c */
+SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds); /* os_win.c */
static int winMutexInit(void){
/* The first to increment to 1 does actual initialization */
@@ -23853,7 +23905,7 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
** held by SQLite. An example of non-essential memory is memory used to
** cache database pages that are not currently in use.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int n){
+SQLITE_API int sqlite3_release_memory(int n){
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
return sqlite3PcacheReleaseMemory(n);
#else
@@ -23912,7 +23964,7 @@ SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void){
** that was invoked when memory usage grew too large. Now it is a
** no-op.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_memory_alarm(
+SQLITE_API int sqlite3_memory_alarm(
void(*xCallback)(void *pArg, sqlite3_int64 used,int N),
void *pArg,
sqlite3_int64 iThreshold
@@ -23928,7 +23980,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_memory_alarm(
** Set the soft heap-size limit for the library. Passing a zero or
** negative value indicates no limit.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 n){
+SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 n){
sqlite3_int64 priorLimit;
sqlite3_int64 excess;
sqlite3_int64 nUsed;
@@ -23950,7 +24002,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64
if( excess>0 ) sqlite3_release_memory((int)(excess & 0x7fffffff));
return priorLimit;
}
-SQLITE_API void SQLITE_STDCALL sqlite3_soft_heap_limit(int n){
+SQLITE_API void sqlite3_soft_heap_limit(int n){
if( n<0 ) n = 0;
sqlite3_soft_heap_limit64(n);
}
@@ -24019,7 +24071,7 @@ SQLITE_PRIVATE void sqlite3MallocEnd(void){
/*
** Return the amount of memory currently checked out.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void){
+SQLITE_API sqlite3_int64 sqlite3_memory_used(void){
sqlite3_int64 res, mx;
sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, 0);
return res;
@@ -24030,7 +24082,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void){
** checked out since either the beginning of this process
** or since the most recent reset.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag){
+SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag){
sqlite3_int64 res, mx;
sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, resetFlag);
return mx;
@@ -24110,13 +24162,13 @@ SQLITE_PRIVATE void *sqlite3Malloc(u64 n){
** First make sure the memory subsystem is initialized, then do the
** allocation.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int n){
+SQLITE_API void *sqlite3_malloc(int n){
#ifndef SQLITE_OMIT_AUTOINIT
if( sqlite3_initialize() ) return 0;
#endif
return n<=0 ? 0 : sqlite3Malloc(n);
}
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64 n){
+SQLITE_API void *sqlite3_malloc64(sqlite3_uint64 n){
#ifndef SQLITE_OMIT_AUTOINIT
if( sqlite3_initialize() ) return 0;
#endif
@@ -24259,7 +24311,7 @@ SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, void *p){
return db->lookaside.sz;
}
}
-SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void *p){
+SQLITE_API sqlite3_uint64 sqlite3_msize(void *p){
assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
return p ? sqlite3GlobalConfig.m.xSize(p) : 0;
@@ -24268,7 +24320,7 @@ SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void *p){
/*
** Free memory previously obtained from sqlite3Malloc().
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_free(void *p){
+SQLITE_API void sqlite3_free(void *p){
if( p==0 ) return; /* IMP: R-49053-54554 */
assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
@@ -24377,14 +24429,14 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){
** The public interface to sqlite3Realloc. Make sure that the memory
** subsystem is initialized prior to invoking sqliteRealloc.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void *pOld, int n){
+SQLITE_API void *sqlite3_realloc(void *pOld, int n){
#ifndef SQLITE_OMIT_AUTOINIT
if( sqlite3_initialize() ) return 0;
#endif
if( n<0 ) n = 0; /* IMP: R-26507-47431 */
return sqlite3Realloc(pOld, n);
}
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void *pOld, sqlite3_uint64 n){
+SQLITE_API void *sqlite3_realloc64(void *pOld, sqlite3_uint64 n){
#ifndef SQLITE_OMIT_AUTOINIT
if( sqlite3_initialize() ) return 0;
#endif
@@ -25611,7 +25663,7 @@ SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3 *db, const char *zFormat, ...){
** Print into memory obtained from sqlite3_malloc(). Omit the internal
** %-conversion extensions.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char *zFormat, va_list ap){
+SQLITE_API char *sqlite3_vmprintf(const char *zFormat, va_list ap){
char *z;
char zBase[SQLITE_PRINT_BUF_SIZE];
StrAccum acc;
@@ -25635,7 +25687,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char *zFormat, va_list ap
** Print into memory obtained from sqlite3_malloc()(). Omit the internal
** %-conversion extensions.
*/
-SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char *zFormat, ...){
+SQLITE_API char *sqlite3_mprintf(const char *zFormat, ...){
va_list ap;
char *z;
#ifndef SQLITE_OMIT_AUTOINIT
@@ -25660,7 +25712,7 @@ SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char *zFormat, ...){
**
** sqlite3_vsnprintf() is the varargs version.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_list ap){
+SQLITE_API char *sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_list ap){
StrAccum acc;
if( n<=0 ) return zBuf;
#ifdef SQLITE_ENABLE_API_ARMOR
@@ -25674,7 +25726,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int n, char *zBuf, const char
sqlite3VXPrintf(&acc, zFormat, ap);
return sqlite3StrAccumFinish(&acc);
}
-SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){
+SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){
char *z;
va_list ap;
va_start(ap,zFormat);
@@ -25710,7 +25762,7 @@ static void renderLogMsg(int iErrCode, const char *zFormat, va_list ap){
/*
** Format and write a message to the log if logging is enabled.
*/
-SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...){
+SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...){
va_list ap; /* Vararg list */
if( sqlite3GlobalConfig.xLog ){
va_start(ap, zFormat);
@@ -25875,7 +25927,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 m
/*
-** Generate a human-readable description of a the Select object.
+** Generate a human-readable description of a Select object.
*/
SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 moreToFollow){
int n = 0;
@@ -26206,6 +26258,15 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
sqlite3TreeViewExpr(pView, pExpr->pRight, 0);
break;
}
+ case TK_VECTOR: {
+ sqlite3TreeViewBareExprList(pView, pExpr->x.pList, "VECTOR");
+ break;
+ }
+ case TK_SELECT_COLUMN: {
+ sqlite3TreeViewLine(pView, "SELECT-COLUMN %d", pExpr->iColumn);
+ sqlite3TreeViewSelect(pView, pExpr->pLeft->x.pSelect, 0);
+ break;
+ }
default: {
sqlite3TreeViewLine(pView, "op=%d", pExpr->op);
break;
@@ -26222,21 +26283,20 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
sqlite3TreeViewPop(pView);
}
+
/*
** Generate a human-readable explanation of an expression list.
*/
-SQLITE_PRIVATE void sqlite3TreeViewExprList(
+SQLITE_PRIVATE void sqlite3TreeViewBareExprList(
TreeView *pView,
const ExprList *pList,
- u8 moreToFollow,
const char *zLabel
){
- int i;
- pView = sqlite3TreeViewPush(pView, moreToFollow);
if( zLabel==0 || zLabel[0]==0 ) zLabel = "LIST";
if( pList==0 ){
sqlite3TreeViewLine(pView, "%s (empty)", zLabel);
}else{
+ int i;
sqlite3TreeViewLine(pView, "%s", zLabel);
for(i=0; i<pList->nExpr; i++){
int j = pList->a[i].u.x.iOrderByCol;
@@ -26248,6 +26308,15 @@ SQLITE_PRIVATE void sqlite3TreeViewExprList(
if( j ) sqlite3TreeViewPop(pView);
}
}
+}
+SQLITE_PRIVATE void sqlite3TreeViewExprList(
+ TreeView *pView,
+ const ExprList *pList,
+ u8 moreToFollow,
+ const char *zLabel
+){
+ pView = sqlite3TreeViewPush(pView, moreToFollow);
+ sqlite3TreeViewBareExprList(pView, pList, zLabel);
sqlite3TreeViewPop(pView);
}
@@ -26287,7 +26356,7 @@ static SQLITE_WSD struct sqlite3PrngType {
/*
** Return N random bytes.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *pBuf){
+SQLITE_API void sqlite3_randomness(int N, void *pBuf){
unsigned char t;
unsigned char *zBuf = pBuf;
@@ -27490,7 +27559,7 @@ SQLITE_PRIVATE void sqlite3TokenInit(Token *p, char *z){
** case-independent fashion, using the same definition of "case
** independence" that SQLite uses internally when comparing identifiers.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *zLeft, const char *zRight){
+SQLITE_API int sqlite3_stricmp(const char *zLeft, const char *zRight){
if( zLeft==0 ){
return zRight ? -1 : 0;
}else if( zRight==0 ){
@@ -27511,7 +27580,7 @@ SQLITE_PRIVATE int sqlite3StrICmp(const char *zLeft, const char *zRight){
}
return c;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *zLeft, const char *zRight, int N){
+SQLITE_API int sqlite3_strnicmp(const char *zLeft, const char *zRight, int N){
register unsigned char *a, *b;
if( zLeft==0 ){
return zRight ? -1 : 0;
@@ -28509,36 +28578,21 @@ SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){
return sqlite3AddInt64(pA, -iB);
}
}
-#define TWOPOWER32 (((i64)1)<<32)
-#define TWOPOWER31 (((i64)1)<<31)
SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){
i64 iA = *pA;
- i64 iA1, iA0, iB1, iB0, r;
-
- iA1 = iA/TWOPOWER32;
- iA0 = iA % TWOPOWER32;
- iB1 = iB/TWOPOWER32;
- iB0 = iB % TWOPOWER32;
- if( iA1==0 ){
- if( iB1==0 ){
- *pA *= iB;
- return 0;
- }
- r = iA0*iB1;
- }else if( iB1==0 ){
- r = iA1*iB0;
- }else{
- /* If both iA1 and iB1 are non-zero, overflow will result */
- return 1;
- }
- testcase( r==(-TWOPOWER31)-1 );
- testcase( r==(-TWOPOWER31) );
- testcase( r==TWOPOWER31 );
- testcase( r==TWOPOWER31-1 );
- if( r<(-TWOPOWER31) || r>=TWOPOWER31 ) return 1;
- r *= TWOPOWER32;
- if( sqlite3AddInt64(&r, iA0*iB0) ) return 1;
- *pA = r;
+ if( iB>0 ){
+ if( iA>LARGEST_INT64/iB ) return 1;
+ if( iA<SMALLEST_INT64/iB ) return 1;
+ }else if( iB<0 ){
+ if( iA>0 ){
+ if( iB<SMALLEST_INT64/iA ) return 1;
+ }else if( iA<0 ){
+ if( iB==SMALLEST_INT64 ) return 1;
+ if( iA==SMALLEST_INT64 ) return 1;
+ if( -iA>LARGEST_INT64/-iB ) return 1;
+ }
+ }
+ *pA = iA*iB;
return 0;
}
@@ -28733,7 +28787,11 @@ static unsigned int strHash(const char *z){
unsigned int h = 0;
unsigned char c;
while( (c = (unsigned char)*z++)!=0 ){ /*OPTIMIZATION-IF-TRUE*/
- h = (h<<3) ^ h ^ sqlite3UpperToLower[c];
+ /* Knuth multiplicative hashing. (Sorting & Searching, p. 510).
+ ** 0x9e3779b1 is 2654435761 which is the closest prime number to
+ ** (2**32)*golden_ratio, where golden_ratio = (sqrt(5) - 1)/2. */
+ h += sqlite3UpperToLower[c];
+ h *= 0x9e3779b1;
}
return h;
}
@@ -28993,13 +29051,13 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 33 */ "NotExists" OpHelp("intkey=r[P3]"),
/* 34 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"),
/* 35 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"),
- /* 36 */ "Ne" OpHelp("if r[P1]!=r[P3] goto P2"),
- /* 37 */ "Eq" OpHelp("if r[P1]==r[P3] goto P2"),
- /* 38 */ "Gt" OpHelp("if r[P1]>r[P3] goto P2"),
- /* 39 */ "Le" OpHelp("if r[P1]<=r[P3] goto P2"),
- /* 40 */ "Lt" OpHelp("if r[P1]<r[P3] goto P2"),
- /* 41 */ "Ge" OpHelp("if r[P1]>=r[P3] goto P2"),
- /* 42 */ "Last" OpHelp(""),
+ /* 36 */ "Ne" OpHelp("IF r[P3]!=r[P1]"),
+ /* 37 */ "Eq" OpHelp("IF r[P3]==r[P1]"),
+ /* 38 */ "Gt" OpHelp("IF r[P3]>r[P1]"),
+ /* 39 */ "Le" OpHelp("IF r[P3]<=r[P1]"),
+ /* 40 */ "Lt" OpHelp("IF r[P3]<r[P1]"),
+ /* 41 */ "Ge" OpHelp("IF r[P3]>=r[P1]"),
+ /* 42 */ "ElseNotEq" OpHelp(""),
/* 43 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"),
/* 44 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"),
/* 45 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<<r[P1]"),
@@ -29010,115 +29068,116 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 50 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"),
/* 51 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"),
/* 52 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"),
- /* 53 */ "SorterSort" OpHelp(""),
+ /* 53 */ "Last" OpHelp(""),
/* 54 */ "BitNot" OpHelp("r[P1]= ~r[P1]"),
- /* 55 */ "Sort" OpHelp(""),
- /* 56 */ "Rewind" OpHelp(""),
- /* 57 */ "IdxLE" OpHelp("key=r[P3@P4]"),
- /* 58 */ "IdxGT" OpHelp("key=r[P3@P4]"),
- /* 59 */ "IdxLT" OpHelp("key=r[P3@P4]"),
- /* 60 */ "IdxGE" OpHelp("key=r[P3@P4]"),
- /* 61 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"),
- /* 62 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"),
- /* 63 */ "Program" OpHelp(""),
- /* 64 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
- /* 65 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"),
- /* 66 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]-=P3, goto P2"),
- /* 67 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"),
- /* 68 */ "IncrVacuum" OpHelp(""),
- /* 69 */ "VNext" OpHelp(""),
- /* 70 */ "Init" OpHelp("Start at P2"),
- /* 71 */ "Return" OpHelp(""),
- /* 72 */ "EndCoroutine" OpHelp(""),
- /* 73 */ "HaltIfNull" OpHelp("if r[P3]=null halt"),
- /* 74 */ "Halt" OpHelp(""),
- /* 75 */ "Integer" OpHelp("r[P2]=P1"),
- /* 76 */ "Int64" OpHelp("r[P2]=P4"),
- /* 77 */ "String" OpHelp("r[P2]='P4' (len=P1)"),
- /* 78 */ "Null" OpHelp("r[P2..P3]=NULL"),
- /* 79 */ "SoftNull" OpHelp("r[P1]=NULL"),
- /* 80 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"),
- /* 81 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"),
- /* 82 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"),
- /* 83 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"),
- /* 84 */ "SCopy" OpHelp("r[P2]=r[P1]"),
- /* 85 */ "IntCopy" OpHelp("r[P2]=r[P1]"),
- /* 86 */ "ResultRow" OpHelp("output=r[P1@P2]"),
- /* 87 */ "CollSeq" OpHelp(""),
- /* 88 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"),
- /* 89 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"),
- /* 90 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"),
- /* 91 */ "RealAffinity" OpHelp(""),
- /* 92 */ "Cast" OpHelp("affinity(r[P1])"),
- /* 93 */ "Permutation" OpHelp(""),
- /* 94 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"),
- /* 95 */ "Column" OpHelp("r[P3]=PX"),
- /* 96 */ "Affinity" OpHelp("affinity(r[P1@P2])"),
+ /* 55 */ "SorterSort" OpHelp(""),
+ /* 56 */ "Sort" OpHelp(""),
+ /* 57 */ "Rewind" OpHelp(""),
+ /* 58 */ "IdxLE" OpHelp("key=r[P3@P4]"),
+ /* 59 */ "IdxGT" OpHelp("key=r[P3@P4]"),
+ /* 60 */ "IdxLT" OpHelp("key=r[P3@P4]"),
+ /* 61 */ "IdxGE" OpHelp("key=r[P3@P4]"),
+ /* 62 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"),
+ /* 63 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"),
+ /* 64 */ "Program" OpHelp(""),
+ /* 65 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
+ /* 66 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"),
+ /* 67 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]-=P3, goto P2"),
+ /* 68 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"),
+ /* 69 */ "IncrVacuum" OpHelp(""),
+ /* 70 */ "VNext" OpHelp(""),
+ /* 71 */ "Init" OpHelp("Start at P2"),
+ /* 72 */ "Return" OpHelp(""),
+ /* 73 */ "EndCoroutine" OpHelp(""),
+ /* 74 */ "HaltIfNull" OpHelp("if r[P3]=null halt"),
+ /* 75 */ "Halt" OpHelp(""),
+ /* 76 */ "Integer" OpHelp("r[P2]=P1"),
+ /* 77 */ "Int64" OpHelp("r[P2]=P4"),
+ /* 78 */ "String" OpHelp("r[P2]='P4' (len=P1)"),
+ /* 79 */ "Null" OpHelp("r[P2..P3]=NULL"),
+ /* 80 */ "SoftNull" OpHelp("r[P1]=NULL"),
+ /* 81 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"),
+ /* 82 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"),
+ /* 83 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"),
+ /* 84 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"),
+ /* 85 */ "SCopy" OpHelp("r[P2]=r[P1]"),
+ /* 86 */ "IntCopy" OpHelp("r[P2]=r[P1]"),
+ /* 87 */ "ResultRow" OpHelp("output=r[P1@P2]"),
+ /* 88 */ "CollSeq" OpHelp(""),
+ /* 89 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"),
+ /* 90 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"),
+ /* 91 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"),
+ /* 92 */ "RealAffinity" OpHelp(""),
+ /* 93 */ "Cast" OpHelp("affinity(r[P1])"),
+ /* 94 */ "Permutation" OpHelp(""),
+ /* 95 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"),
+ /* 96 */ "Column" OpHelp("r[P3]=PX"),
/* 97 */ "String8" OpHelp("r[P2]='P4'"),
- /* 98 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"),
- /* 99 */ "Count" OpHelp("r[P2]=count()"),
- /* 100 */ "ReadCookie" OpHelp(""),
- /* 101 */ "SetCookie" OpHelp(""),
- /* 102 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"),
- /* 103 */ "OpenRead" OpHelp("root=P2 iDb=P3"),
- /* 104 */ "OpenWrite" OpHelp("root=P2 iDb=P3"),
- /* 105 */ "OpenAutoindex" OpHelp("nColumn=P2"),
- /* 106 */ "OpenEphemeral" OpHelp("nColumn=P2"),
- /* 107 */ "SorterOpen" OpHelp(""),
- /* 108 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"),
- /* 109 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"),
- /* 110 */ "Close" OpHelp(""),
- /* 111 */ "ColumnsUsed" OpHelp(""),
- /* 112 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"),
- /* 113 */ "NewRowid" OpHelp("r[P2]=rowid"),
- /* 114 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"),
- /* 115 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"),
- /* 116 */ "Delete" OpHelp(""),
- /* 117 */ "ResetCount" OpHelp(""),
- /* 118 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"),
- /* 119 */ "SorterData" OpHelp("r[P2]=data"),
- /* 120 */ "RowKey" OpHelp("r[P2]=key"),
- /* 121 */ "RowData" OpHelp("r[P2]=data"),
- /* 122 */ "Rowid" OpHelp("r[P2]=rowid"),
- /* 123 */ "NullRow" OpHelp(""),
- /* 124 */ "SorterInsert" OpHelp(""),
- /* 125 */ "IdxInsert" OpHelp("key=r[P2]"),
- /* 126 */ "IdxDelete" OpHelp("key=r[P2@P3]"),
- /* 127 */ "Seek" OpHelp("Move P3 to P1.rowid"),
- /* 128 */ "IdxRowid" OpHelp("r[P2]=rowid"),
- /* 129 */ "Destroy" OpHelp(""),
- /* 130 */ "Clear" OpHelp(""),
- /* 131 */ "ResetSorter" OpHelp(""),
- /* 132 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"),
- /* 133 */ "Real" OpHelp("r[P2]=P4"),
- /* 134 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"),
- /* 135 */ "ParseSchema" OpHelp(""),
- /* 136 */ "LoadAnalysis" OpHelp(""),
- /* 137 */ "DropTable" OpHelp(""),
- /* 138 */ "DropIndex" OpHelp(""),
- /* 139 */ "DropTrigger" OpHelp(""),
- /* 140 */ "IntegrityCk" OpHelp(""),
- /* 141 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
- /* 142 */ "Param" OpHelp(""),
- /* 143 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
- /* 144 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
- /* 145 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"),
- /* 146 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 147 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 148 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
- /* 149 */ "Expire" OpHelp(""),
- /* 150 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
- /* 151 */ "VBegin" OpHelp(""),
- /* 152 */ "VCreate" OpHelp(""),
- /* 153 */ "VDestroy" OpHelp(""),
- /* 154 */ "VOpen" OpHelp(""),
- /* 155 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
- /* 156 */ "VRename" OpHelp(""),
- /* 157 */ "Pagecount" OpHelp(""),
- /* 158 */ "MaxPgcnt" OpHelp(""),
- /* 159 */ "CursorHint" OpHelp(""),
- /* 160 */ "Noop" OpHelp(""),
- /* 161 */ "Explain" OpHelp(""),
+ /* 98 */ "Affinity" OpHelp("affinity(r[P1@P2])"),
+ /* 99 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"),
+ /* 100 */ "Count" OpHelp("r[P2]=count()"),
+ /* 101 */ "ReadCookie" OpHelp(""),
+ /* 102 */ "SetCookie" OpHelp(""),
+ /* 103 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"),
+ /* 104 */ "OpenRead" OpHelp("root=P2 iDb=P3"),
+ /* 105 */ "OpenWrite" OpHelp("root=P2 iDb=P3"),
+ /* 106 */ "OpenAutoindex" OpHelp("nColumn=P2"),
+ /* 107 */ "OpenEphemeral" OpHelp("nColumn=P2"),
+ /* 108 */ "SorterOpen" OpHelp(""),
+ /* 109 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"),
+ /* 110 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"),
+ /* 111 */ "Close" OpHelp(""),
+ /* 112 */ "ColumnsUsed" OpHelp(""),
+ /* 113 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"),
+ /* 114 */ "NewRowid" OpHelp("r[P2]=rowid"),
+ /* 115 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"),
+ /* 116 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"),
+ /* 117 */ "Delete" OpHelp(""),
+ /* 118 */ "ResetCount" OpHelp(""),
+ /* 119 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"),
+ /* 120 */ "SorterData" OpHelp("r[P2]=data"),
+ /* 121 */ "RowKey" OpHelp("r[P2]=key"),
+ /* 122 */ "RowData" OpHelp("r[P2]=data"),
+ /* 123 */ "Rowid" OpHelp("r[P2]=rowid"),
+ /* 124 */ "NullRow" OpHelp(""),
+ /* 125 */ "SorterInsert" OpHelp(""),
+ /* 126 */ "IdxInsert" OpHelp("key=r[P2]"),
+ /* 127 */ "IdxDelete" OpHelp("key=r[P2@P3]"),
+ /* 128 */ "Seek" OpHelp("Move P3 to P1.rowid"),
+ /* 129 */ "IdxRowid" OpHelp("r[P2]=rowid"),
+ /* 130 */ "Destroy" OpHelp(""),
+ /* 131 */ "Clear" OpHelp(""),
+ /* 132 */ "Real" OpHelp("r[P2]=P4"),
+ /* 133 */ "ResetSorter" OpHelp(""),
+ /* 134 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"),
+ /* 135 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"),
+ /* 136 */ "ParseSchema" OpHelp(""),
+ /* 137 */ "LoadAnalysis" OpHelp(""),
+ /* 138 */ "DropTable" OpHelp(""),
+ /* 139 */ "DropIndex" OpHelp(""),
+ /* 140 */ "DropTrigger" OpHelp(""),
+ /* 141 */ "IntegrityCk" OpHelp(""),
+ /* 142 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
+ /* 143 */ "Param" OpHelp(""),
+ /* 144 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
+ /* 145 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
+ /* 146 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"),
+ /* 147 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 148 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 149 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
+ /* 150 */ "Expire" OpHelp(""),
+ /* 151 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
+ /* 152 */ "VBegin" OpHelp(""),
+ /* 153 */ "VCreate" OpHelp(""),
+ /* 154 */ "VDestroy" OpHelp(""),
+ /* 155 */ "VOpen" OpHelp(""),
+ /* 156 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
+ /* 157 */ "VRename" OpHelp(""),
+ /* 158 */ "Pagecount" OpHelp(""),
+ /* 159 */ "MaxPgcnt" OpHelp(""),
+ /* 160 */ "CursorHint" OpHelp(""),
+ /* 161 */ "Noop" OpHelp(""),
+ /* 162 */ "Explain" OpHelp(""),
};
return azName[i];
}
@@ -34856,6 +34915,27 @@ static UnixUnusedFd *findReusableFd(const char *zPath, int flags){
}
/*
+** Find the mode, uid and gid of file zFile.
+*/
+static int getFileMode(
+ const char *zFile, /* File name */
+ mode_t *pMode, /* OUT: Permissions of zFile */
+ uid_t *pUid, /* OUT: uid of zFile. */
+ gid_t *pGid /* OUT: gid of zFile. */
+){
+ struct stat sStat; /* Output of stat() on database file */
+ int rc = SQLITE_OK;
+ if( 0==osStat(zFile, &sStat) ){
+ *pMode = sStat.st_mode & 0777;
+ *pUid = sStat.st_uid;
+ *pGid = sStat.st_gid;
+ }else{
+ rc = SQLITE_IOERR_FSTAT;
+ }
+ return rc;
+}
+
+/*
** This function is called by unixOpen() to determine the unix permissions
** to create new files with. If no error occurs, then SQLITE_OK is returned
** and a value suitable for passing as the third argument to open(2) is
@@ -34890,7 +34970,6 @@ static int findCreateFileMode(
if( flags & (SQLITE_OPEN_WAL|SQLITE_OPEN_MAIN_JOURNAL) ){
char zDb[MAX_PATHNAME+1]; /* Database file path */
int nDb; /* Number of valid bytes in zDb */
- struct stat sStat; /* Output of stat() on database file */
/* zPath is a path to a WAL or journal file. The following block derives
** the path to the associated database file from zPath. This block handles
@@ -34921,15 +35000,18 @@ static int findCreateFileMode(
memcpy(zDb, zPath, nDb);
zDb[nDb] = '\0';
- if( 0==osStat(zDb, &sStat) ){
- *pMode = sStat.st_mode & 0777;
- *pUid = sStat.st_uid;
- *pGid = sStat.st_gid;
- }else{
- rc = SQLITE_IOERR_FSTAT;
- }
+ rc = getFileMode(zDb, pMode, pUid, pGid);
}else if( flags & SQLITE_OPEN_DELETEONCLOSE ){
*pMode = 0600;
+ }else if( flags & SQLITE_OPEN_URI ){
+ /* If this is a main database file and the file was opened using a URI
+ ** filename, check for the "modeof" parameter. If present, interpret
+ ** its value as a filename and try to copy the mode, uid and gid from
+ ** that file. */
+ const char *z = sqlite3_uri_parameter(zPath, "modeof");
+ if( z ){
+ rc = getFileMode(z, pMode, pUid, pGid);
+ }
}
return rc;
}
@@ -36810,7 +36892,7 @@ static int proxyClose(sqlite3_file *id) {
** necessarily been initialized when this routine is called, and so they
** should not be used.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
+SQLITE_API int sqlite3_os_init(void){
/*
** The following macro defines an initializer for an sqlite3_vfs object.
** The name of the VFS is NAME. The pAppData is a pointer to a pointer
@@ -36909,7 +36991,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
** to release dynamically allocated objects. But not on unix.
** This routine is a no-op for unix.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){
+SQLITE_API int sqlite3_os_end(void){
return SQLITE_OK;
}
@@ -38344,7 +38426,7 @@ static const char *winNextSystemCall(sqlite3_vfs *p, const char *zName){
** "pnLargest" argument, if non-zero, will be used to return the size of the
** largest committed free block in the heap, in bytes.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_compact_heap(LPUINT pnLargest){
+SQLITE_API int sqlite3_win32_compact_heap(LPUINT pnLargest){
int rc = SQLITE_OK;
UINT nLargest = 0;
HANDLE hHeap;
@@ -38384,7 +38466,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_win32_compact_heap(LPUINT pnLargest){
** the sqlite3_memory_used() function does not return zero, SQLITE_BUSY will
** be returned and no changes will be made to the Win32 native heap.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_reset_heap(){
+SQLITE_API int sqlite3_win32_reset_heap(){
int rc;
MUTEX_LOGIC( sqlite3_mutex *pMaster; ) /* The main static mutex */
MUTEX_LOGIC( sqlite3_mutex *pMem; ) /* The memsys static mutex */
@@ -38429,7 +38511,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_win32_reset_heap(){
** (if available).
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_win32_write_debug(const char *zBuf, int nBuf){
+SQLITE_API void sqlite3_win32_write_debug(const char *zBuf, int nBuf){
char zDbgBuf[SQLITE_WIN32_DBG_BUF_SIZE];
int nMin = MIN(nBuf, (SQLITE_WIN32_DBG_BUF_SIZE - 1)); /* may be negative. */
if( nMin<-1 ) nMin = -1; /* all negative values become -1. */
@@ -38475,7 +38557,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_win32_write_debug(const char *zBuf, int n
static HANDLE sleepObj = NULL;
#endif
-SQLITE_API void SQLITE_STDCALL sqlite3_win32_sleep(DWORD milliseconds){
+SQLITE_API void sqlite3_win32_sleep(DWORD milliseconds){
#if SQLITE_OS_WINRT
if ( sleepObj==NULL ){
sleepObj = osCreateEventExW(NULL, NULL, CREATE_EVENT_MANUAL_RESET,
@@ -38524,7 +38606,7 @@ SQLITE_PRIVATE DWORD sqlite3Win32Wait(HANDLE hObject){
** This function determines if the machine is running a version of Windows
** based on the NT kernel.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_is_nt(void){
+SQLITE_API int sqlite3_win32_is_nt(void){
#if SQLITE_OS_WINRT
/*
** NOTE: The WinRT sub-platform is always assumed to be based on the NT
@@ -38912,7 +38994,7 @@ static char *winUtf8ToMbcs(const char *zText, int useAnsi){
/*
** This is a public wrapper for the winUtf8ToUnicode() function.
*/
-SQLITE_API LPWSTR SQLITE_STDCALL sqlite3_win32_utf8_to_unicode(const char *zText){
+SQLITE_API LPWSTR sqlite3_win32_utf8_to_unicode(const char *zText){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !zText ){
(void)SQLITE_MISUSE_BKPT;
@@ -38928,7 +39010,7 @@ SQLITE_API LPWSTR SQLITE_STDCALL sqlite3_win32_utf8_to_unicode(const char *zText
/*
** This is a public wrapper for the winUnicodeToUtf8() function.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_win32_unicode_to_utf8(LPCWSTR zWideText){
+SQLITE_API char *sqlite3_win32_unicode_to_utf8(LPCWSTR zWideText){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !zWideText ){
(void)SQLITE_MISUSE_BKPT;
@@ -38944,7 +39026,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_win32_unicode_to_utf8(LPCWSTR zWideText)
/*
** This is a public wrapper for the winMbcsToUtf8() function.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8(const char *zText){
+SQLITE_API char *sqlite3_win32_mbcs_to_utf8(const char *zText){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !zText ){
(void)SQLITE_MISUSE_BKPT;
@@ -38960,7 +39042,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8(const char *zText){
/*
** This is a public wrapper for the winMbcsToUtf8() function.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8_v2(const char *zText, int useAnsi){
+SQLITE_API char *sqlite3_win32_mbcs_to_utf8_v2(const char *zText, int useAnsi){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !zText ){
(void)SQLITE_MISUSE_BKPT;
@@ -38976,7 +39058,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8_v2(const char *zText,
/*
** This is a public wrapper for the winUtf8ToMbcs() function.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs(const char *zText){
+SQLITE_API char *sqlite3_win32_utf8_to_mbcs(const char *zText){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !zText ){
(void)SQLITE_MISUSE_BKPT;
@@ -38992,7 +39074,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs(const char *zText){
/*
** This is a public wrapper for the winUtf8ToMbcs() function.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs_v2(const char *zText, int useAnsi){
+SQLITE_API char *sqlite3_win32_utf8_to_mbcs_v2(const char *zText, int useAnsi){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !zText ){
(void)SQLITE_MISUSE_BKPT;
@@ -39012,7 +39094,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs_v2(const char *zText,
** argument is the name of the directory to use. The return value will be
** SQLITE_OK if successful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_set_directory(DWORD type, LPCWSTR zValue){
+SQLITE_API int sqlite3_win32_set_directory(DWORD type, LPCWSTR zValue){
char **ppDirectory = 0;
#ifndef SQLITE_OMIT_AUTOINIT
int rc = sqlite3_initialize();
@@ -40605,6 +40687,12 @@ static int winFileControl(sqlite3_file *id, int op, void *pArg){
OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
return SQLITE_OK;
}
+ case SQLITE_FCNTL_WIN32_GET_HANDLE: {
+ LPHANDLE phFile = (LPHANDLE)pArg;
+ *phFile = pFile->h;
+ OSTRACE(("FCNTL file=%p, rc=SQLITE_OK\n", pFile->h));
+ return SQLITE_OK;
+ }
#ifdef SQLITE_TEST
case SQLITE_FCNTL_WIN32_SET_HANDLE: {
LPHANDLE phFile = (LPHANDLE)pArg;
@@ -42930,7 +43018,7 @@ static int winGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){
/*
** Initialize and deinitialize the operating system interface.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
+SQLITE_API int sqlite3_os_init(void){
static sqlite3_vfs winVfs = {
3, /* iVersion */
sizeof(winFile), /* szOsFile */
@@ -43061,7 +43149,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){
return SQLITE_OK;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){
+SQLITE_API int sqlite3_os_end(void){
#if SQLITE_OS_WINRT
if( sleepObj!=NULL ){
osCloseHandle(sleepObj);
@@ -43952,7 +44040,7 @@ static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit(
assert( pPage!=0 );
pPgHdr = (PgHdr*)pPage->pExtra;
assert( pPgHdr->pPage==0 );
- memset(pPgHdr, 0, sizeof(PgHdr));
+ memset(&pPgHdr->pDirty, 0, sizeof(PgHdr) - offsetof(PgHdr,pDirty));
pPgHdr->pPage = pPage;
pPgHdr->pData = pPage->pBuf;
pPgHdr->pExtra = (void *)&pPgHdr[1];
@@ -44646,7 +44734,7 @@ static int pcache1InitBulk(PCache1 *pCache){
szBulk = -1024 * (i64)pcache1.nInitPage;
}
if( szBulk > pCache->szAlloc*(i64)pCache->nMax ){
- szBulk = pCache->szAlloc*pCache->nMax;
+ szBulk = pCache->szAlloc*(i64)pCache->nMax;
}
zBulk = pCache->pBulk = sqlite3Malloc( szBulk );
sqlite3EndBenignMalloc();
@@ -44999,12 +45087,30 @@ static void pcache1TruncateUnsafe(
PCache1 *pCache, /* The cache to truncate */
unsigned int iLimit /* Drop pages with this pgno or larger */
){
- TESTONLY( unsigned int nPage = 0; ) /* To assert pCache->nPage is correct */
- unsigned int h;
+ TESTONLY( int nPage = 0; ) /* To assert pCache->nPage is correct */
+ unsigned int h, iStop;
assert( sqlite3_mutex_held(pCache->pGroup->mutex) );
- for(h=0; h<pCache->nHash; h++){
- PgHdr1 **pp = &pCache->apHash[h];
+ assert( pCache->iMaxKey >= iLimit );
+ assert( pCache->nHash > 0 );
+ if( pCache->iMaxKey - iLimit < pCache->nHash ){
+ /* If we are just shaving the last few pages off the end of the
+ ** cache, then there is no point in scanning the entire hash table.
+ ** Only scan those hash slots that might contain pages that need to
+ ** be removed. */
+ h = iLimit % pCache->nHash;
+ iStop = pCache->iMaxKey % pCache->nHash;
+ TESTONLY( nPage = -10; ) /* Disable the pCache->nPage validity check */
+ }else{
+ /* This is the general case where many pages are being removed.
+ ** It is necessary to scan the entire hash table */
+ h = pCache->nHash/2;
+ iStop = h - 1;
+ }
+ for(;;){
+ PgHdr1 **pp;
PgHdr1 *pPage;
+ assert( h<pCache->nHash );
+ pp = &pCache->apHash[h];
while( (pPage = *pp)!=0 ){
if( pPage->iKey>=iLimit ){
pCache->nPage--;
@@ -45013,11 +45119,13 @@ static void pcache1TruncateUnsafe(
pcache1FreePage(pPage);
}else{
pp = &pPage->pNext;
- TESTONLY( nPage++; )
+ TESTONLY( if( nPage>=0 ) nPage++; )
}
}
+ if( h==iStop ) break;
+ h = (h+1) % pCache->nHash;
}
- assert( pCache->nPage==nPage );
+ assert( nPage<0 || pCache->nPage==(unsigned)nPage );
}
/******************************************************************************/
@@ -45494,7 +45602,7 @@ static void pcache1Destroy(sqlite3_pcache *p){
PGroup *pGroup = pCache->pGroup;
assert( pCache->bPurgeable || (pCache->nMax==0 && pCache->nMin==0) );
pcache1EnterMutex(pGroup);
- pcache1TruncateUnsafe(pCache, 0);
+ if( pCache->nPage ) pcache1TruncateUnsafe(pCache, 0);
assert( pGroup->nMaxPage >= pCache->nMax );
pGroup->nMaxPage -= pCache->nMax;
assert( pGroup->nMinPage >= pCache->nMin );
@@ -47075,9 +47183,10 @@ static const unsigned char aJournalMagic[] = {
** rollback journal. Otherwise false.
*/
#ifndef SQLITE_OMIT_WAL
-static int pagerUseWal(Pager *pPager){
+SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager){
return (pPager->pWal!=0);
}
+# define pagerUseWal(x) sqlite3PagerUseWal(x)
#else
# define pagerUseWal(x) 0
# define pagerRollbackWal(x) 0
@@ -52914,7 +53023,11 @@ SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int nSavepoint){
** savepoint. If no errors occur, SQLITE_OK is returned.
*/
SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){
- int rc = pPager->errCode; /* Return code */
+ int rc = pPager->errCode;
+
+#ifdef SQLITE_ENABLE_ZIPVFS
+ if( op==SAVEPOINT_RELEASE ) rc = SQLITE_OK;
+#endif
assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK );
assert( iSavepoint>=0 || op==SAVEPOINT_ROLLBACK );
@@ -52955,6 +53068,20 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){
rc = pagerPlaybackSavepoint(pPager, pSavepoint);
assert(rc!=SQLITE_DONE);
}
+
+#ifdef SQLITE_ENABLE_ZIPVFS
+ /* If the cache has been modified but the savepoint cannot be rolled
+ ** back journal_mode=off, put the pager in the error state. This way,
+ ** if the VFS used by this pager includes ZipVFS, the entire transaction
+ ** can be rolled back at the ZipVFS level. */
+ else if(
+ pPager->journalMode==PAGER_JOURNALMODE_OFF
+ && pPager->eState>=PAGER_WRITER_CACHEMOD
+ ){
+ pPager->errCode = SQLITE_ABORT;
+ pPager->eState = PAGER_ERROR;
+ }
+#endif
}
return rc;
@@ -57066,7 +57193,7 @@ SQLITE_PRIVATE void sqlite3WalSnapshotOpen(Wal *pWal, sqlite3_snapshot *pSnapsho
** Return a +ve value if snapshot p1 is newer than p2. A -ve value if
** p1 is older than p2 and zero if p1 and p2 are the same snapshot.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_snapshot_cmp(sqlite3_snapshot *p1, sqlite3_snapshot *p2){
+SQLITE_API int sqlite3_snapshot_cmp(sqlite3_snapshot *p1, sqlite3_snapshot *p2){
WalIndexHdr *pHdr1 = (WalIndexHdr*)p1;
WalIndexHdr *pHdr2 = (WalIndexHdr*)p2;
@@ -58203,7 +58330,7 @@ static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0;
** The shared cache setting effects only future calls to
** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int enable){
+SQLITE_API int sqlite3_enable_shared_cache(int enable){
sqlite3GlobalConfig.sharedCacheEnabled = enable;
return SQLITE_OK;
}
@@ -58880,7 +59007,7 @@ static int btreeMoveto(
){
int rc; /* Status code */
UnpackedRecord *pIdxKey; /* Unpacked index key */
- char aSpace[200]; /* Temp space for pIdxKey - to avoid a malloc */
+ char aSpace[384]; /* Temp space for pIdxKey - to avoid a malloc */
char *pFree = 0;
if( pKey ){
@@ -59722,8 +59849,11 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){
if( data[iPtr+1]==0 && data[iPtr]==0 ){
iFreeBlk = 0; /* Shortcut for the case when the freelist is empty */
}else{
- while( (iFreeBlk = get2byte(&data[iPtr]))>0 && iFreeBlk<iStart ){
- if( iFreeBlk<iPtr+4 ) return SQLITE_CORRUPT_BKPT;
+ while( (iFreeBlk = get2byte(&data[iPtr]))<iStart ){
+ if( iFreeBlk<iPtr+4 ){
+ if( iFreeBlk==0 ) break;
+ return SQLITE_CORRUPT_BKPT;
+ }
iPtr = iFreeBlk;
}
if( iFreeBlk>iLast ) return SQLITE_CORRUPT_BKPT;
@@ -62715,7 +62845,7 @@ static int accessPayload(
&& (bEnd || a==ovflSize) /* (6) */
&& pBt->inTransaction==TRANS_READ /* (4) */
&& (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (3) */
- && pBt->pPage1->aData[19]==0x01 /* (5) */
+ && 0==sqlite3PagerUseWal(pBt->pPager) /* (5) */
&& &pBuf[-4]>=pBufStart /* (7) */
){
u8 aSave[4];
@@ -64214,8 +64344,6 @@ static int fillInCell(
nHeader += putVarint32(&pCell[nHeader], nPayload);
nHeader += putVarint(&pCell[nHeader], *(u64*)&pX->nKey);
}else{
- assert( pX->nData==0 );
- assert( pX->nZero==0 );
assert( pX->nKey<=0x7fffffff && pX->pKey!=0 );
nSrc = nPayload = (int)pX->nKey;
pSrc = pX->pKey;
@@ -67915,22 +68043,16 @@ static Btree *findBtree(sqlite3 *pErrorDb, sqlite3 *pDb, const char *zDb){
int i = sqlite3FindDbName(pDb, zDb);
if( i==1 ){
- Parse *pParse;
+ Parse sParse;
int rc = 0;
- pParse = sqlite3StackAllocZero(pErrorDb, sizeof(*pParse));
- if( pParse==0 ){
- sqlite3ErrorWithMsg(pErrorDb, SQLITE_NOMEM, "out of memory");
- rc = SQLITE_NOMEM_BKPT;
- }else{
- pParse->db = pDb;
- if( sqlite3OpenTempDatabase(pParse) ){
- sqlite3ErrorWithMsg(pErrorDb, pParse->rc, "%s", pParse->zErrMsg);
- rc = SQLITE_ERROR;
- }
- sqlite3DbFree(pErrorDb, pParse->zErrMsg);
- sqlite3ParserReset(pParse);
- sqlite3StackFree(pErrorDb, pParse);
+ memset(&sParse, 0, sizeof(sParse));
+ sParse.db = pDb;
+ if( sqlite3OpenTempDatabase(&sParse) ){
+ sqlite3ErrorWithMsg(pErrorDb, sParse.rc, "%s", sParse.zErrMsg);
+ rc = SQLITE_ERROR;
}
+ sqlite3DbFree(pErrorDb, sParse.zErrMsg);
+ sqlite3ParserReset(&sParse);
if( rc ){
return 0;
}
@@ -67976,7 +68098,7 @@ static int checkReadTransaction(sqlite3 *db, Btree *p){
** If an error occurs, NULL is returned and an error code and error message
** stored in database handle pDestDb.
*/
-SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
sqlite3* pDestDb, /* Database to write to */
const char *zDestDb, /* Name of database within pDestDb */
sqlite3* pSrcDb, /* Database connection to read from */
@@ -68028,7 +68150,6 @@ SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
p->isAttached = 0;
if( 0==p->pSrc || 0==p->pDest
- || setDestPgsz(p)==SQLITE_NOMEM
|| checkReadTransaction(pDestDb, p->pDest)!=SQLITE_OK
){
/* One (or both) of the named databases did not exist or an OOM
@@ -68184,7 +68305,7 @@ static void attachBackupObject(sqlite3_backup *p){
/*
** Copy nPage pages from the source b-tree to the destination.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage){
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){
int rc;
int destMode; /* Destination journal mode */
int pgszSrc = 0; /* Source page size */
@@ -68216,14 +68337,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage){
rc = SQLITE_OK;
}
- /* Lock the destination database, if it is not locked already. */
- if( SQLITE_OK==rc && p->bDestLocked==0
- && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2))
- ){
- p->bDestLocked = 1;
- sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema);
- }
-
/* If there is no open read-transaction on the source database, open
** one now. If a transaction is opened here, then it will be closed
** before this function exits.
@@ -68233,6 +68346,24 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage){
bCloseTrans = 1;
}
+ /* If the destination database has not yet been locked (i.e. if this
+ ** is the first call to backup_step() for the current backup operation),
+ ** try to set its page size to the same as the source database. This
+ ** is especially important on ZipVFS systems, as in that case it is
+ ** not possible to create a database file that uses one page size by
+ ** writing to it with another. */
+ if( p->bDestLocked==0 && rc==SQLITE_OK && setDestPgsz(p)==SQLITE_NOMEM ){
+ rc = SQLITE_NOMEM;
+ }
+
+ /* Lock the destination database, if it is not locked already. */
+ if( SQLITE_OK==rc && p->bDestLocked==0
+ && SQLITE_OK==(rc = sqlite3BtreeBeginTrans(p->pDest, 2))
+ ){
+ p->bDestLocked = 1;
+ sqlite3BtreeGetMeta(p->pDest, BTREE_SCHEMA_VERSION, &p->iDestSchema);
+ }
+
/* Do not allow backup if the destination database is in WAL mode
** and the page sizes are different between source and destination */
pgszSrc = sqlite3BtreeGetPageSize(p->pSrc);
@@ -68428,7 +68559,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage){
/*
** Release all resources associated with an sqlite3_backup* handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p){
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p){
sqlite3_backup **pp; /* Ptr to head of pagers backup list */
sqlite3 *pSrcDb; /* Source database connection */
int rc; /* Value to return */
@@ -68480,7 +68611,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p){
** Return the number of pages still to be backed up as of the most recent
** call to sqlite3_backup_step().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p){
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p){
#ifdef SQLITE_ENABLE_API_ARMOR
if( p==0 ){
(void)SQLITE_MISUSE_BKPT;
@@ -68494,7 +68625,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p){
** Return the total number of pages in the source database as of the most
** recent call to sqlite3_backup_step().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p){
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p){
#ifdef SQLITE_ENABLE_API_ARMOR
if( p==0 ){
(void)SQLITE_MISUSE_BKPT;
@@ -68821,18 +68952,18 @@ SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int szNew){
** Return SQLITE_OK on success or SQLITE_NOMEM if malloc fails.
*/
SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){
- int f;
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
assert( (pMem->flags&MEM_RowSet)==0 );
- ExpandBlob(pMem);
- f = pMem->flags;
- if( (f&(MEM_Str|MEM_Blob)) && (pMem->szMalloc==0 || pMem->z!=pMem->zMalloc) ){
- if( sqlite3VdbeMemGrow(pMem, pMem->n + 2, 1) ){
- return SQLITE_NOMEM_BKPT;
+ if( (pMem->flags & (MEM_Str|MEM_Blob))!=0 ){
+ if( ExpandBlob(pMem) ) return SQLITE_NOMEM;
+ if( pMem->szMalloc==0 || pMem->z!=pMem->zMalloc ){
+ if( sqlite3VdbeMemGrow(pMem, pMem->n + 2, 1) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+ pMem->z[pMem->n] = 0;
+ pMem->z[pMem->n+1] = 0;
+ pMem->flags |= MEM_Term;
}
- pMem->z[pMem->n] = 0;
- pMem->z[pMem->n+1] = 0;
- pMem->flags |= MEM_Term;
}
pMem->flags &= ~MEM_Ephem;
#ifdef SQLITE_DEBUG
@@ -68848,25 +68979,24 @@ SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem *pMem){
*/
#ifndef SQLITE_OMIT_INCRBLOB
SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *pMem){
- if( pMem->flags & MEM_Zero ){
- int nByte;
- assert( pMem->flags&MEM_Blob );
- assert( (pMem->flags&MEM_RowSet)==0 );
- assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
-
- /* Set nByte to the number of bytes required to store the expanded blob. */
- nByte = pMem->n + pMem->u.nZero;
- if( nByte<=0 ){
- nByte = 1;
- }
- if( sqlite3VdbeMemGrow(pMem, nByte, 1) ){
- return SQLITE_NOMEM_BKPT;
- }
+ int nByte;
+ assert( pMem->flags & MEM_Zero );
+ assert( pMem->flags&MEM_Blob );
+ assert( (pMem->flags&MEM_RowSet)==0 );
+ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
- memset(&pMem->z[pMem->n], 0, pMem->u.nZero);
- pMem->n += pMem->u.nZero;
- pMem->flags &= ~(MEM_Zero|MEM_Term);
+ /* Set nByte to the number of bytes required to store the expanded blob. */
+ nByte = pMem->n + pMem->u.nZero;
+ if( nByte<=0 ){
+ nByte = 1;
}
+ if( sqlite3VdbeMemGrow(pMem, nByte, 1) ){
+ return SQLITE_NOMEM_BKPT;
+ }
+
+ memset(&pMem->z[pMem->n], 0, pMem->u.nZero);
+ pMem->n += pMem->u.nZero;
+ pMem->flags &= ~(MEM_Zero|MEM_Term);
return SQLITE_OK;
}
#endif
@@ -68926,6 +69056,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){
if( sqlite3VdbeMemClearAndResize(pMem, nByte) ){
+ pMem->enc = 0;
return SQLITE_NOMEM_BKPT;
}
@@ -69207,7 +69338,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){
}
}
assert( (pMem->flags & (MEM_Int|MEM_Real|MEM_Null))!=0 );
- pMem->flags &= ~(MEM_Str|MEM_Blob);
+ pMem->flags &= ~(MEM_Str|MEM_Blob|MEM_Zero);
return SQLITE_OK;
}
@@ -69225,7 +69356,7 @@ SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){
if( (pMem->flags & MEM_Blob)==0 ){
sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding);
assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
- MemSetTypeFlag(pMem, MEM_Blob);
+ if( pMem->flags & MEM_Str ) MemSetTypeFlag(pMem, MEM_Blob);
}else{
pMem->flags &= ~(MEM_TypeMask&~MEM_Blob);
}
@@ -69650,9 +69781,6 @@ static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){
assert( (pVal->flags & (MEM_Null))==0 );
if( pVal->flags & (MEM_Blob|MEM_Str) ){
pVal->flags |= MEM_Str;
- if( pVal->flags & MEM_Zero ){
- sqlite3VdbeMemExpandBlob(pVal);
- }
if( pVal->enc != (enc & ~SQLITE_UTF16_ALIGNED) ){
sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED);
}
@@ -69905,10 +70033,7 @@ static int valueFromExpr(
const char *zNeg = "";
int rc = SQLITE_OK;
- if( !pExpr ){
- *ppVal = 0;
- return SQLITE_OK;
- }
+ assert( pExpr!=0 );
while( (op = pExpr->op)==TK_UPLUS || op==TK_SPAN ) pExpr = pExpr->pLeft;
if( NEVER(op==TK_REGISTER) ) op = pExpr->op2;
@@ -70032,7 +70157,7 @@ SQLITE_PRIVATE int sqlite3ValueFromExpr(
u8 affinity, /* Affinity to use */
sqlite3_value **ppVal /* Write the new value here */
){
- return valueFromExpr(db, pExpr, enc, affinity, ppVal, 0);
+ return pExpr ? valueFromExpr(db, pExpr, enc, affinity, ppVal, 0) : 0;
}
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
@@ -70152,9 +70277,9 @@ static int stat4ValueFromExpr(
** structures intended to be compared against sample index keys stored
** in the sqlite_stat4 table.
**
-** A single call to this function attempts to populates field iVal (leftmost
-** is 0 etc.) of the unpacked record with a value extracted from expression
-** pExpr. Extraction of values is possible if:
+** A single call to this function populates zero or more fields of the
+** record starting with field iVal (fields are numbered from left to
+** right starting with 0). A single field is populated if:
**
** * (pExpr==0). In this case the value is assumed to be an SQL NULL,
**
@@ -70163,10 +70288,14 @@ static int stat4ValueFromExpr(
** * The sqlite3ValueFromExpr() function is able to extract a value
** from the expression (i.e. the expression is a literal value).
**
-** If a value can be extracted, the affinity passed as the 5th argument
-** is applied to it before it is copied into the UnpackedRecord. Output
-** parameter *pbOk is set to true if a value is extracted, or false
-** otherwise.
+** Or, if pExpr is a TK_VECTOR, one field is populated for each of the
+** vector components that match either of the two latter criteria listed
+** above.
+**
+** Before any value is appended to the record, the affinity of the
+** corresponding column within index pIdx is applied to it. Before
+** this function returns, output parameter *pnExtract is set to the
+** number of values appended to the record.
**
** When this function is called, *ppRec must either point to an object
** allocated by an earlier call to this function, or must be NULL. If it
@@ -70182,22 +70311,33 @@ SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(
Index *pIdx, /* Index being probed */
UnpackedRecord **ppRec, /* IN/OUT: Probe record */
Expr *pExpr, /* The expression to extract a value from */
- u8 affinity, /* Affinity to use */
+ int nElem, /* Maximum number of values to append */
int iVal, /* Array element to populate */
- int *pbOk /* OUT: True if value was extracted */
+ int *pnExtract /* OUT: Values appended to the record */
){
- int rc;
- sqlite3_value *pVal = 0;
- struct ValueNewStat4Ctx alloc;
+ int rc = SQLITE_OK;
+ int nExtract = 0;
+
+ if( pExpr==0 || pExpr->op!=TK_SELECT ){
+ int i;
+ struct ValueNewStat4Ctx alloc;
- alloc.pParse = pParse;
- alloc.pIdx = pIdx;
- alloc.ppRec = ppRec;
- alloc.iVal = iVal;
+ alloc.pParse = pParse;
+ alloc.pIdx = pIdx;
+ alloc.ppRec = ppRec;
- rc = stat4ValueFromExpr(pParse, pExpr, affinity, &alloc, &pVal);
- assert( pVal==0 || pVal->db==pParse->db );
- *pbOk = (pVal!=0);
+ for(i=0; i<nElem; i++){
+ sqlite3_value *pVal = 0;
+ Expr *pElem = (pExpr ? sqlite3VectorFieldSubexpr(pExpr, i) : 0);
+ u8 aff = sqlite3IndexColumnAffinity(pParse->db, pIdx, iVal+i);
+ alloc.iVal = iVal+i;
+ rc = stat4ValueFromExpr(pParse, pElem, aff, &alloc, &pVal);
+ if( !pVal ) break;
+ nExtract++;
+ }
+ }
+
+ *pnExtract = nExtract;
return rc;
}
@@ -70360,8 +70500,9 @@ SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){
SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){
sqlite3 *db = pParse->db;
Vdbe *p;
- p = sqlite3DbMallocZero(db, sizeof(Vdbe) );
+ p = sqlite3DbMallocRawNN(db, sizeof(Vdbe) );
if( p==0 ) return 0;
+ memset(&p->aOp, 0, sizeof(Vdbe)-offsetof(Vdbe,aOp));
p->db = db;
if( db->pVdbe ){
db->pVdbe->pPrev = p;
@@ -70523,9 +70664,8 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
if( p->db->flags & SQLITE_VdbeAddopTrace ){
int jj, kk;
Parse *pParse = p->pParse;
- for(jj=kk=0; jj<SQLITE_N_COLCACHE; jj++){
+ for(jj=kk=0; jj<pParse->nColCache; jj++){
struct yColCache *x = pParse->aColCache + jj;
- if( x->iLevel>pParse->iCacheLevel || x->iReg==0 ) continue;
printf(" r[%d]={%d:%d}", x->iReg, x->iTable, x->iColumn);
kk++;
}
@@ -70713,7 +70853,6 @@ SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){
if( p->aLabel ){
p->aLabel[j] = v->nOp;
}
- p->iFixedOp = v->nOp - 1;
}
/*
@@ -71104,7 +71243,8 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, u32 addr, int val){
sqlite3VdbeGetOp(p,addr)->p3 = val;
}
SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u8 p5){
- if( !p->db->mallocFailed ) p->aOp[p->nOp-1].p5 = p5;
+ assert( p->nOp>0 || p->db->mallocFailed );
+ if( p->nOp>0 ) p->aOp[p->nOp-1].p5 = p5;
}
/*
@@ -71112,7 +71252,6 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u8 p5){
** the address of the next instruction to be coded.
*/
SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe *p, int addr){
- p->pParse->iFixedOp = p->nOp - 1;
sqlite3VdbeChangeP2(p, addr, p->nOp);
}
@@ -71235,7 +71374,7 @@ SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe *p, int addr){
** then remove it. Return true if and only if an opcode was removed.
*/
SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe *p, u8 op){
- if( (p->nOp-1)>(p->pParse->iFixedOp) && p->aOp[p->nOp-1].opcode==op ){
+ if( p->nOp>0 && p->aOp[p->nOp-1].opcode==op ){
return sqlite3VdbeChangeToNoop(p, p->nOp-1);
}else{
return 0;
@@ -71433,12 +71572,21 @@ static int displayComment(
const char *zSynopsis;
int nOpName;
int ii, jj;
+ char zAlt[50];
zOpName = sqlite3OpcodeName(pOp->opcode);
nOpName = sqlite3Strlen30(zOpName);
if( zOpName[nOpName+1] ){
int seenCom = 0;
char c;
zSynopsis = zOpName += nOpName + 1;
+ if( strncmp(zSynopsis,"IF ",3)==0 ){
+ if( pOp->p5 & SQLITE_STOREP2 ){
+ sqlite3_snprintf(sizeof(zAlt), zAlt, "r[P2] = (%s)", zSynopsis+3);
+ }else{
+ sqlite3_snprintf(sizeof(zAlt), zAlt, "if %s goto P2", zSynopsis+3);
+ }
+ zSynopsis = zAlt;
+ }
for(ii=jj=0; jj<nTemp-1 && (c = zSynopsis[ii])!=0; ii++){
if( c=='P' ){
c = zSynopsis[++ii];
@@ -71790,6 +71938,21 @@ SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, Op *pOp){
#endif
/*
+** Initialize an array of N Mem element.
+*/
+static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){
+ while( (N--)>0 ){
+ p->db = db;
+ p->flags = flags;
+ p->szMalloc = 0;
+#ifdef SQLITE_DEBUG
+ p->pScopyFrom = 0;
+#endif
+ p++;
+ }
+}
+
+/*
** Release an array of N Mem elements
*/
static void releaseMemArray(Mem *p, int N){
@@ -72000,6 +72163,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
pMem->flags = MEM_Str|MEM_Term;
zP4 = displayP4(pOp, pMem->z, pMem->szMalloc);
if( zP4!=pMem->z ){
+ pMem->n = 0;
sqlite3VdbeMemSetStr(pMem, zP4, -1, SQLITE_UTF8, 0);
}else{
assert( pMem->z!=0 );
@@ -72142,7 +72306,7 @@ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){
int i;
#endif
assert( p!=0 );
- assert( p->magic==VDBE_MAGIC_INIT );
+ assert( p->magic==VDBE_MAGIC_INIT || p->magic==VDBE_MAGIC_RESET );
/* There should be at least one opcode.
*/
@@ -72199,7 +72363,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
int nMem; /* Number of VM memory registers */
int nCursor; /* Number of cursors required */
int nArg; /* Number of arguments in subprograms */
- int nOnce; /* Number of OP_Once instructions */
int n; /* Loop counter */
struct ReusableSpace x; /* Reusable bulk memory */
@@ -72214,8 +72377,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
nMem = pParse->nMem;
nCursor = pParse->nTab;
nArg = pParse->nMaxArg;
- nOnce = pParse->nOnce;
- if( nOnce==0 ) nOnce = 1; /* Ensure at least one byte in p->aOnceFlag[] */
/* Each cursor uses a memory cell. The first cursor (cursor 0) can
** use aMem[0] which is not otherwise used by the VDBE program. Allocate
@@ -72234,10 +72395,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
assert( EIGHT_BYTE_ALIGNMENT(x.pSpace) );
x.nFree = ROUNDDOWN8(pParse->szOpAlloc - n); /* Bytes of unused memory */
assert( x.nFree>=0 );
- if( x.nFree>0 ){
- memset(x.pSpace, 0, x.nFree);
- assert( EIGHT_BYTE_ALIGNMENT(&x.pSpace[x.nFree]) );
- }
+ assert( EIGHT_BYTE_ALIGNMENT(&x.pSpace[x.nFree]) );
resolveP2Values(p, &nArg);
p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort);
@@ -72262,36 +72420,34 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
p->aVar = allocSpace(&x, p->aVar, nVar*sizeof(Mem));
p->apArg = allocSpace(&x, p->apArg, nArg*sizeof(Mem*));
p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*));
- p->aOnceFlag = allocSpace(&x, p->aOnceFlag, nOnce);
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
p->anExec = allocSpace(&x, p->anExec, p->nOp*sizeof(i64));
#endif
if( x.nNeeded==0 ) break;
- x.pSpace = p->pFree = sqlite3DbMallocZero(db, x.nNeeded);
+ x.pSpace = p->pFree = sqlite3DbMallocRawNN(db, x.nNeeded);
x.nFree = x.nNeeded;
}while( !db->mallocFailed );
- p->nCursor = nCursor;
- p->nOnceFlag = nOnce;
- if( p->aVar ){
- p->nVar = (ynVar)nVar;
- for(n=0; n<nVar; n++){
- p->aVar[n].flags = MEM_Null;
- p->aVar[n].db = db;
- }
- }
p->nzVar = pParse->nzVar;
p->azVar = pParse->azVar;
pParse->nzVar = 0;
pParse->azVar = 0;
- if( p->aMem ){
+ p->explain = pParse->explain;
+ if( db->mallocFailed ){
+ p->nVar = 0;
+ p->nCursor = 0;
+ p->nMem = 0;
+ }else{
+ p->nCursor = nCursor;
+ p->nVar = (ynVar)nVar;
+ initMemArray(p->aVar, nVar, db, MEM_Null);
p->nMem = nMem;
- for(n=0; n<nMem; n++){
- p->aMem[n].flags = MEM_Undefined;
- p->aMem[n].db = db;
- }
+ initMemArray(p->aMem, nMem, db, MEM_Undefined);
+ memset(p->apCsr, 0, nCursor*sizeof(VdbeCursor*));
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ memset(p->anExec, 0, p->nOp*sizeof(i64));
+#endif
}
- p->explain = pParse->explain;
sqlite3VdbeRewind(p);
}
@@ -72360,8 +72516,6 @@ SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
v->anExec = pFrame->anExec;
#endif
- v->aOnceFlag = pFrame->aOnceFlag;
- v->nOnceFlag = pFrame->nOnceFlag;
v->aOp = pFrame->aOp;
v->nOp = pFrame->nOp;
v->aMem = pFrame->aMem;
@@ -72445,13 +72599,9 @@ SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){
sqlite3DbFree(db, p->aColName);
n = nResColumn*COLNAME_N;
p->nResColumn = (u16)nResColumn;
- p->aColName = pColName = (Mem*)sqlite3DbMallocZero(db, sizeof(Mem)*n );
+ p->aColName = pColName = (Mem*)sqlite3DbMallocRawNN(db, sizeof(Mem)*n );
if( p->aColName==0 ) return;
- while( n-- > 0 ){
- pColName->flags = MEM_Null;
- pColName->db = p->db;
- pColName++;
- }
+ initMemArray(p->aColName, n, p->db, MEM_Null);
}
/*
@@ -72902,7 +73052,6 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
if( db->mallocFailed ){
p->rc = SQLITE_NOMEM_BKPT;
}
- if( p->aOnceFlag ) memset(p->aOnceFlag, 0, p->nOnceFlag);
closeAllCursors(p);
if( p->magic!=VDBE_MAGIC_RUN ){
return SQLITE_OK;
@@ -73214,7 +73363,7 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
}
#endif
p->iCurrentTime = 0;
- p->magic = VDBE_MAGIC_INIT;
+ p->magic = VDBE_MAGIC_RESET;
return p->rc & db->errMask;
}
@@ -73278,19 +73427,21 @@ SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
SubProgram *pSub, *pNext;
int i;
assert( p->db==0 || p->db==db );
- releaseMemArray(p->aVar, p->nVar);
releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
for(pSub=p->pProgram; pSub; pSub=pNext){
pNext = pSub->pNext;
vdbeFreeOpArray(db, pSub->aOp, pSub->nOp);
sqlite3DbFree(db, pSub);
}
- for(i=p->nzVar-1; i>=0; i--) sqlite3DbFree(db, p->azVar[i]);
- sqlite3DbFree(db, p->azVar);
+ if( p->magic!=VDBE_MAGIC_INIT ){
+ releaseMemArray(p->aVar, p->nVar);
+ for(i=p->nzVar-1; i>=0; i--) sqlite3DbFree(db, p->azVar[i]);
+ sqlite3DbFree(db, p->azVar);
+ sqlite3DbFree(db, p->pFree);
+ }
vdbeFreeOpArray(db, p->aOp, p->nOp);
sqlite3DbFree(db, p->aColName);
sqlite3DbFree(db, p->zSql);
- sqlite3DbFree(db, p->pFree);
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
for(i=0; i<p->nScan; i++){
sqlite3DbFree(db, p->aScan[i].zName);
@@ -74047,14 +74198,48 @@ static int vdbeCompareMemString(
}
/*
+** The input pBlob is guaranteed to be a Blob that is not marked
+** with MEM_Zero. Return true if it could be a zero-blob.
+*/
+static int isAllZero(const char *z, int n){
+ int i;
+ for(i=0; i<n; i++){
+ if( z[i] ) return 0;
+ }
+ return 1;
+}
+
+/*
** Compare two blobs. Return negative, zero, or positive if the first
** is less than, equal to, or greater than the second, respectively.
** If one blob is a prefix of the other, then the shorter is the lessor.
*/
static SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem *pB2){
- int c = memcmp(pB1->z, pB2->z, pB1->n>pB2->n ? pB2->n : pB1->n);
+ int c;
+ int n1 = pB1->n;
+ int n2 = pB2->n;
+
+ /* It is possible to have a Blob value that has some non-zero content
+ ** followed by zero content. But that only comes up for Blobs formed
+ ** by the OP_MakeRecord opcode, and such Blobs never get passed into
+ ** sqlite3MemCompare(). */
+ assert( (pB1->flags & MEM_Zero)==0 || n1==0 );
+ assert( (pB2->flags & MEM_Zero)==0 || n2==0 );
+
+ if( (pB1->flags|pB2->flags) & MEM_Zero ){
+ if( pB1->flags & pB2->flags & MEM_Zero ){
+ return pB1->u.nZero - pB2->u.nZero;
+ }else if( pB1->flags & MEM_Zero ){
+ if( !isAllZero(pB2->z, pB2->n) ) return -1;
+ return pB1->u.nZero - n2;
+ }else{
+ if( !isAllZero(pB1->z, pB1->n) ) return +1;
+ return n1 - pB2->u.nZero;
+ }
+ }
+ c = memcmp(pB1->z, pB2->z, n1>n2 ? n2 : n1);
if( c ) return c;
- return pB1->n - pB2->n;
+ return n1 - n2;
}
/*
@@ -74360,6 +74545,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
/* RHS is a blob */
else if( pRhs->flags & MEM_Blob ){
+ assert( (pRhs->flags & MEM_Zero)==0 || pRhs->n==0 );
getVarint32(&aKey1[idx1], serial_type);
testcase( serial_type==12 );
if( serial_type<12 || (serial_type & 0x01) ){
@@ -74371,6 +74557,12 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
if( (d1+nStr) > (unsigned)nKey1 ){
pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
return 0; /* Corruption */
+ }else if( pRhs->flags & MEM_Zero ){
+ if( !isAllZero((const char*)&aKey1[d1],nStr) ){
+ rc = 1;
+ }else{
+ rc = nStr - pRhs->u.nZero;
+ }
}else{
int nCmp = MIN(nStr, pRhs->n);
rc = memcmp(&aKey1[d1], pRhs->z, nCmp);
@@ -74441,7 +74633,7 @@ static int vdbeRecordCompareInt(
int res;
u32 y;
u64 x;
- i64 v = pPKey2->aMem[0].u.i;
+ i64 v;
i64 lhs;
vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo);
@@ -74500,6 +74692,7 @@ static int vdbeRecordCompareInt(
return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2);
}
+ v = pPKey2->aMem[0].u.i;
if( v>lhs ){
res = pPKey2->r1;
}else if( v<lhs ){
@@ -74903,7 +75096,7 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(
preupdate.keyinfo.aSortOrder = (u8*)&fakeSortOrder;
preupdate.iKey1 = iKey1;
preupdate.iKey2 = iKey2;
- preupdate.iPKey = pTab->iPKey;
+ preupdate.pTab = pTab;
db->pPreUpdate = &preupdate;
db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2);
@@ -74950,7 +75143,7 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(
** collating sequences are registered or if an authorizer function is
** added or changed.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_expired(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_expired(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe*)pStmt;
return p==0 || p->expired;
}
@@ -75019,7 +75212,7 @@ static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){
** This routine sets the error code and string returned by
** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt){
int rc;
if( pStmt==0 ){
/* IMPLEMENTATION-OF: R-57228-12904 Invoking sqlite3_finalize() on a NULL
@@ -75046,7 +75239,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt){
** This routine sets the error code and string returned by
** sqlite3_errcode(), sqlite3_errmsg() and sqlite3_errmsg16().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt){
int rc;
if( pStmt==0 ){
rc = SQLITE_OK;
@@ -75067,7 +75260,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt){
/*
** Set all the parameters in the compiled SQL statement to NULL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){
int i;
int rc = SQLITE_OK;
Vdbe *p = (Vdbe*)pStmt;
@@ -75091,10 +75284,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt *pStmt){
** The following routines extract information from a Mem or sqlite3_value
** structure.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value *pVal){
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value *pVal){
Mem *p = (Mem*)pVal;
if( p->flags & (MEM_Blob|MEM_Str) ){
- if( sqlite3VdbeMemExpandBlob(p)!=SQLITE_OK ){
+ if( ExpandBlob(p)!=SQLITE_OK ){
assert( p->flags==MEM_Null && p->z==0 );
return 0;
}
@@ -75104,36 +75297,36 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value *pVal){
return sqlite3_value_text(pVal);
}
}
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes(sqlite3_value *pVal){
+SQLITE_API int sqlite3_value_bytes(sqlite3_value *pVal){
return sqlite3ValueBytes(pVal, SQLITE_UTF8);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes16(sqlite3_value *pVal){
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value *pVal){
return sqlite3ValueBytes(pVal, SQLITE_UTF16NATIVE);
}
-SQLITE_API double SQLITE_STDCALL sqlite3_value_double(sqlite3_value *pVal){
+SQLITE_API double sqlite3_value_double(sqlite3_value *pVal){
return sqlite3VdbeRealValue((Mem*)pVal);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_value_int(sqlite3_value *pVal){
+SQLITE_API int sqlite3_value_int(sqlite3_value *pVal){
return (int)sqlite3VdbeIntValue((Mem*)pVal);
}
-SQLITE_API sqlite_int64 SQLITE_STDCALL sqlite3_value_int64(sqlite3_value *pVal){
+SQLITE_API sqlite_int64 sqlite3_value_int64(sqlite3_value *pVal){
return sqlite3VdbeIntValue((Mem*)pVal);
}
-SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value *pVal){
+SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value *pVal){
Mem *pMem = (Mem*)pVal;
return ((pMem->flags & MEM_Subtype) ? pMem->eSubtype : 0);
}
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value *pVal){
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value *pVal){
return (const unsigned char *)sqlite3ValueText(pVal, SQLITE_UTF8);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value* pVal){
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value* pVal){
return sqlite3ValueText(pVal, SQLITE_UTF16NATIVE);
}
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value *pVal){
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value *pVal){
return sqlite3ValueText(pVal, SQLITE_UTF16BE);
}
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value *pVal){
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value *pVal){
return sqlite3ValueText(pVal, SQLITE_UTF16LE);
}
#endif /* SQLITE_OMIT_UTF16 */
@@ -75141,7 +75334,7 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value *pVal
** fundamental datatypes: 64-bit signed integer 64-bit IEEE floating
** point number string BLOB NULL
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value* pVal){
+SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){
static const u8 aType[] = {
SQLITE_BLOB, /* 0x00 */
SQLITE_NULL, /* 0x01 */
@@ -75181,7 +75374,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value* pVal){
/* Make a copy of an sqlite3_value object
*/
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value *pOrig){
+SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value *pOrig){
sqlite3_value *pNew;
if( pOrig==0 ) return 0;
pNew = sqlite3_malloc( sizeof(*pNew) );
@@ -75204,7 +75397,7 @@ SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value *
/* Destroy an sqlite3_value object previously obtained from
** sqlite3_value_dup().
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_value_free(sqlite3_value *pOld){
+SQLITE_API void sqlite3_value_free(sqlite3_value *pOld){
sqlite3ValueFree(pOld);
}
@@ -75247,7 +75440,7 @@ static int invokeValueDestructor(
if( pCtx ) sqlite3_result_error_toobig(pCtx);
return SQLITE_TOOBIG;
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob(
+SQLITE_API void sqlite3_result_blob(
sqlite3_context *pCtx,
const void *z,
int n,
@@ -75257,7 +75450,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_blob(
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
setResultStrOrError(pCtx, z, n, 0, xDel);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64(
+SQLITE_API void sqlite3_result_blob64(
sqlite3_context *pCtx,
const void *z,
sqlite3_uint64 n,
@@ -75271,43 +75464,43 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64(
setResultStrOrError(pCtx, z, (int)n, 0, xDel);
}
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_double(sqlite3_context *pCtx, double rVal){
+SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetDouble(pCtx->pOut, rVal);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){
+SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
pCtx->isError = SQLITE_ERROR;
pCtx->fErrorOrAux = 1;
sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){
+SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
pCtx->isError = SQLITE_ERROR;
pCtx->fErrorOrAux = 1;
sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT);
}
#endif
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int(sqlite3_context *pCtx, int iVal){
+SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){
+SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetInt64(pCtx->pOut, iVal);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_null(sqlite3_context *pCtx){
+SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetNull(pCtx->pOut);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){
+SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){
Mem *pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pOut->db->mutex) );
pOut->eSubtype = eSubtype & 0xff;
pOut->flags |= MEM_Subtype;
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text(
+SQLITE_API void sqlite3_result_text(
sqlite3_context *pCtx,
const char *z,
int n,
@@ -75316,7 +75509,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_text(
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text64(
+SQLITE_API void sqlite3_result_text64(
sqlite3_context *pCtx,
const char *z,
sqlite3_uint64 n,
@@ -75333,7 +75526,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_text64(
}
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(
+SQLITE_API void sqlite3_result_text16(
sqlite3_context *pCtx,
const void *z,
int n,
@@ -75342,7 +75535,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(
+SQLITE_API void sqlite3_result_text16be(
sqlite3_context *pCtx,
const void *z,
int n,
@@ -75351,7 +75544,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(
+SQLITE_API void sqlite3_result_text16le(
sqlite3_context *pCtx,
const void *z,
int n,
@@ -75361,15 +75554,15 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(
setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel);
}
#endif /* SQLITE_OMIT_UTF16 */
-SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
+SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemCopy(pCtx->pOut, pValue);
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetZeroBlob(pCtx->pOut, n);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
+SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
Mem *pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pOut->db->mutex) );
if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){
@@ -75378,7 +75571,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context *pCtx, u
sqlite3VdbeMemSetZeroBlob(pCtx->pOut, (int)n);
return SQLITE_OK;
}
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
+SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
pCtx->isError = errCode;
pCtx->fErrorOrAux = 1;
#ifdef SQLITE_DEBUG
@@ -75391,7 +75584,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context *pCtx,
}
/* Force an SQLITE_TOOBIG error. */
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context *pCtx){
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
pCtx->isError = SQLITE_TOOBIG;
pCtx->fErrorOrAux = 1;
@@ -75400,7 +75593,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context *pCtx
}
/* An SQLITE_NOMEM error. */
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_nomem(sqlite3_context *pCtx){
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
sqlite3VdbeMemSetNull(pCtx->pOut);
pCtx->isError = SQLITE_NOMEM_BKPT;
@@ -75424,7 +75617,7 @@ static int doWalCallbacks(sqlite3 *db){
nEntry = sqlite3PagerWalCallback(sqlite3BtreePager(pBt));
sqlite3BtreeLeave(pBt);
if( db->xWalCallback && nEntry>0 && rc==SQLITE_OK ){
- rc = db->xWalCallback(db->pWalArg, db, db->aDb[i].zName, nEntry);
+ rc = db->xWalCallback(db->pWalArg, db, db->aDb[i].zDbSName, nEntry);
}
}
}
@@ -75572,7 +75765,7 @@ end_of_step:
** sqlite3Step() to do most of the work. If a schema error occurs,
** call sqlite3Reprepare() and try again.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){
int rc = SQLITE_OK; /* Result from sqlite3Step() */
int rc2 = SQLITE_OK; /* Result from sqlite3Reprepare() */
Vdbe *v = (Vdbe*)pStmt; /* the prepared statement */
@@ -75623,7 +75816,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt *pStmt){
** Extract the user data from a sqlite3_context structure and return a
** pointer to it.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context *p){
+SQLITE_API void *sqlite3_user_data(sqlite3_context *p){
assert( p && p->pFunc );
return p->pFunc->pUserData;
}
@@ -75638,7 +75831,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context *p){
** sqlite3_create_function16() routines that originally registered the
** application defined function.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context *p){
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){
assert( p && p->pOut );
return p->pOut->db;
}
@@ -75714,7 +75907,7 @@ static SQLITE_NOINLINE void *createAggContext(sqlite3_context *p, int nByte){
** context is allocated on the first call. Subsequent calls return the
** same context that was returned on prior calls.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context *p, int nByte){
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){
assert( p && p->pFunc && p->pFunc->xFinalize );
assert( sqlite3_mutex_held(p->pOut->db->mutex) );
testcase( nByte<0 );
@@ -75729,7 +75922,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context *p, in
** Return the auxiliary data pointer, if any, for the iArg'th argument to
** the user-function defined by pCtx.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){
AuxData *pAuxData;
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
@@ -75750,7 +75943,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context *pCtx, int i
** argument to the user-function defined by pCtx. Any previous value is
** deleted by calling the delete function specified when it was set.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata(
+SQLITE_API void sqlite3_set_auxdata(
sqlite3_context *pCtx,
int iArg,
void *pAux,
@@ -75805,7 +75998,7 @@ failed:
** implementations should keep their own counts within their aggregate
** context.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context *p){
+SQLITE_API int sqlite3_aggregate_count(sqlite3_context *p){
assert( p && p->pMem && p->pFunc && p->pFunc->xFinalize );
return p->pMem->n;
}
@@ -75814,7 +76007,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context *p){
/*
** Return the number of columns in the result set for the statement pStmt.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){
Vdbe *pVm = (Vdbe *)pStmt;
return pVm ? pVm->nResColumn : 0;
}
@@ -75823,7 +76016,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt){
** Return the number of values available from the current row of the
** currently executing statement pStmt.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt){
Vdbe *pVm = (Vdbe *)pStmt;
if( pVm==0 || pVm->pResultSet==0 ) return 0;
return pVm->nResColumn;
@@ -75877,14 +76070,13 @@ static Mem *columnMem(sqlite3_stmt *pStmt, int i){
Mem *pOut;
pVm = (Vdbe *)pStmt;
- if( pVm && pVm->pResultSet!=0 && i<pVm->nResColumn && i>=0 ){
- sqlite3_mutex_enter(pVm->db->mutex);
+ if( pVm==0 ) return (Mem*)columnNullValue();
+ assert( pVm->db );
+ sqlite3_mutex_enter(pVm->db->mutex);
+ if( pVm->pResultSet!=0 && i<pVm->nResColumn && i>=0 ){
pOut = &pVm->pResultSet[i];
}else{
- if( pVm && ALWAYS(pVm->db) ){
- sqlite3_mutex_enter(pVm->db->mutex);
- sqlite3Error(pVm->db, SQLITE_RANGE);
- }
+ sqlite3Error(pVm->db, SQLITE_RANGE);
pOut = (Mem*)columnNullValue();
}
return pOut;
@@ -75917,6 +76109,8 @@ static void columnMallocFailure(sqlite3_stmt *pStmt)
*/
Vdbe *p = (Vdbe *)pStmt;
if( p ){
+ assert( p->db!=0 );
+ assert( sqlite3_mutex_held(p->db->mutex) );
p->rc = sqlite3ApiExit(p->db, p->rc);
sqlite3_mutex_leave(p->db->mutex);
}
@@ -75926,7 +76120,7 @@ static void columnMallocFailure(sqlite3_stmt *pStmt)
** The following routines are used to access elements of the current row
** in the result set.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt *pStmt, int i){
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt *pStmt, int i){
const void *val;
val = sqlite3_value_blob( columnMem(pStmt,i) );
/* Even though there is no encoding conversion, value_blob() might
@@ -75936,37 +76130,37 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt *pStmt, i
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt *pStmt, int i){
int val = sqlite3_value_bytes( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes16(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt *pStmt, int i){
int val = sqlite3_value_bytes16( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API double SQLITE_STDCALL sqlite3_column_double(sqlite3_stmt *pStmt, int i){
+SQLITE_API double sqlite3_column_double(sqlite3_stmt *pStmt, int i){
double val = sqlite3_value_double( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_column_int(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_column_int(sqlite3_stmt *pStmt, int i){
int val = sqlite3_value_int( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API sqlite_int64 SQLITE_STDCALL sqlite3_column_int64(sqlite3_stmt *pStmt, int i){
+SQLITE_API sqlite_int64 sqlite3_column_int64(sqlite3_stmt *pStmt, int i){
sqlite_int64 val = sqlite3_value_int64( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_column_text(sqlite3_stmt *pStmt, int i){
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt *pStmt, int i){
const unsigned char *val = sqlite3_value_text( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt *pStmt, int i){
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt *pStmt, int i){
Mem *pOut = columnMem(pStmt, i);
if( pOut->flags&MEM_Static ){
pOut->flags &= ~MEM_Static;
@@ -75976,13 +76170,13 @@ SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt *pStm
return (sqlite3_value *)pOut;
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_text16(sqlite3_stmt *pStmt, int i){
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt *pStmt, int i){
const void *val = sqlite3_value_text16( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return val;
}
#endif /* SQLITE_OMIT_UTF16 */
-SQLITE_API int SQLITE_STDCALL sqlite3_column_type(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_column_type(sqlite3_stmt *pStmt, int i){
int iType = sqlite3_value_type( columnMem(pStmt,i) );
columnMallocFailure(pStmt);
return iType;
@@ -76046,12 +76240,12 @@ static const void *columnName(
** Return the name of the Nth column of the result set returned by SQL
** statement pStmt.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_name(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_NAME);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_NAME);
}
@@ -76071,12 +76265,12 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt *pStmt,
** Return the column declaration type (if applicable) of the 'i'th column
** of the result set of SQL statement pStmt.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_decltype(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DECLTYPE);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DECLTYPE);
}
@@ -76089,12 +76283,12 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt *pS
** NULL is returned if the result column is an expression or constant or
** anything else which is not an unambiguous reference to a database column.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_database_name(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_DATABASE);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_DATABASE);
}
@@ -76105,12 +76299,12 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stm
** NULL is returned if the result column is an expression or constant or
** anything else which is not an unambiguous reference to a database column.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_table_name(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_TABLE);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_TABLE);
}
@@ -76121,12 +76315,12 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt *
** NULL is returned if the result column is an expression or constant or
** anything else which is not an unambiguous reference to a database column.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N){
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text, COLNAME_COLUMN);
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){
return columnName(
pStmt, N, (const void*(*)(Mem*))sqlite3_value_text16, COLNAME_COLUMN);
}
@@ -76227,7 +76421,7 @@ static int bindText(
/*
** Bind a blob value to an SQL statement variable.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(
+SQLITE_API int sqlite3_bind_blob(
sqlite3_stmt *pStmt,
int i,
const void *zData,
@@ -76239,7 +76433,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(
#endif
return bindText(pStmt, i, zData, nData, xDel, 0);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64(
+SQLITE_API int sqlite3_bind_blob64(
sqlite3_stmt *pStmt,
int i,
const void *zData,
@@ -76253,7 +76447,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64(
return bindText(pStmt, i, zData, (int)nData, xDel, 0);
}
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){
int rc;
Vdbe *p = (Vdbe *)pStmt;
rc = vdbeUnbind(p, i);
@@ -76263,10 +76457,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt *pStmt, int i, do
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){
return sqlite3_bind_int64(p, i, (i64)iValue);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){
int rc;
Vdbe *p = (Vdbe *)pStmt;
rc = vdbeUnbind(p, i);
@@ -76276,7 +76470,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sql
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt *pStmt, int i){
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){
int rc;
Vdbe *p = (Vdbe*)pStmt;
rc = vdbeUnbind(p, i);
@@ -76285,7 +76479,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt *pStmt, int i){
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(
+SQLITE_API int sqlite3_bind_text(
sqlite3_stmt *pStmt,
int i,
const char *zData,
@@ -76294,7 +76488,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(
){
return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF8);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64(
+SQLITE_API int sqlite3_bind_text64(
sqlite3_stmt *pStmt,
int i,
const char *zData,
@@ -76311,7 +76505,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64(
}
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(
+SQLITE_API int sqlite3_bind_text16(
sqlite3_stmt *pStmt,
int i,
const void *zData,
@@ -76321,7 +76515,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(
return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF16NATIVE);
}
#endif /* SQLITE_OMIT_UTF16 */
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){
int rc;
switch( sqlite3_value_type((sqlite3_value*)pValue) ){
case SQLITE_INTEGER: {
@@ -76352,7 +76546,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt *pStmt, int i, con
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){
int rc;
Vdbe *p = (Vdbe *)pStmt;
rc = vdbeUnbind(p, i);
@@ -76362,7 +76556,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i,
}
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){
+SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){
int rc;
Vdbe *p = (Vdbe *)pStmt;
sqlite3_mutex_enter(p->db->mutex);
@@ -76381,7 +76575,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i
** Return the number of wildcards that can be potentially bound to.
** This routine is added to support DBD::SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe*)pStmt;
return p ? p->nVar : 0;
}
@@ -76392,7 +76586,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){
**
** The result is always UTF-8.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){
Vdbe *p = (Vdbe*)pStmt;
if( p==0 || i<1 || i>p->nzVar ){
return 0;
@@ -76420,7 +76614,7 @@ SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe *p, const char *zName, int nNa
}
return 0;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){
return sqlite3VdbeParameterIndex((Vdbe*)pStmt, zName, sqlite3Strlen30(zName));
}
@@ -76454,7 +76648,7 @@ SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *pFromStmt, sqlite3_stmt
** an SQLITE_ERROR is returned. Nothing else can go wrong, so otherwise
** SQLITE_OK is returned.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){
+SQLITE_API int sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt, sqlite3_stmt *pToStmt){
Vdbe *pFrom = (Vdbe*)pFromStmt;
Vdbe *pTo = (Vdbe*)pToStmt;
if( pFrom->nVar!=pTo->nVar ){
@@ -76476,7 +76670,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt *pFromStmt,
** the first argument to the sqlite3_prepare() that was used to create
** the statement in the first place.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt *pStmt){
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt *pStmt){
return pStmt ? ((Vdbe*)pStmt)->db : 0;
}
@@ -76484,16 +76678,16 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt *pStmt){
** Return true if the prepared statement is guaranteed to not modify the
** database.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt){
return pStmt ? ((Vdbe*)pStmt)->readOnly : 1;
}
/*
** Return true if the prepared statement is in need of being reset.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt *pStmt){
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){
Vdbe *v = (Vdbe*)pStmt;
- return v!=0 && v->pc>=0 && v->magic==VDBE_MAGIC_RUN;
+ return v!=0 && v->magic==VDBE_MAGIC_RUN && v->pc>=0;
}
/*
@@ -76502,7 +76696,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt *pStmt){
** prepared statement for the database connection. Return NULL if there
** are no more.
*/
-SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){
sqlite3_stmt *pNext;
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(pDb) ){
@@ -76523,7 +76717,7 @@ SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_
/*
** Return the value of a status counter for a prepared statement
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){
Vdbe *pVdbe = (Vdbe*)pStmt;
u32 v;
#ifdef SQLITE_ENABLE_API_ARMOR
@@ -76540,7 +76734,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, i
/*
** Return the SQL associated with a prepared statement
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt){
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe *)pStmt;
return p ? p->zSql : 0;
}
@@ -76554,7 +76748,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt){
** The SQLITE_TRACE_SIZE_LIMIT puts an upper bound on the size of
** expanded bound parameters.
*/
-SQLITE_API char *SQLITE_STDCALL sqlite3_expanded_sql(sqlite3_stmt *pStmt){
+SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt){
#ifdef SQLITE_OMIT_TRACE
return 0;
#else
@@ -76596,7 +76790,7 @@ static UnpackedRecord *vdbeUnpackRecord(
** This function is called from within a pre-update callback to retrieve
** a field of the row currently being updated or deleted.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
+SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
PreUpdate *p = db->pPreUpdate;
int rc = SQLITE_OK;
@@ -76634,9 +76828,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlit
if( iIdx>=p->pUnpacked->nField ){
*ppValue = (sqlite3_value *)columnNullValue();
}else{
+ Mem *pMem = *ppValue = &p->pUnpacked->aMem[iIdx];
*ppValue = &p->pUnpacked->aMem[iIdx];
- if( iIdx==p->iPKey ){
- sqlite3VdbeMemSetInt64(*ppValue, p->iKey1);
+ if( iIdx==p->pTab->iPKey ){
+ sqlite3VdbeMemSetInt64(pMem, p->iKey1);
+ }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){
+ if( pMem->flags & MEM_Int ){
+ sqlite3VdbeMemRealify(pMem);
+ }
}
}
@@ -76651,7 +76850,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlit
** This function is called from within a pre-update callback to retrieve
** the number of columns in the row being updated, deleted or inserted.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_count(sqlite3 *db){
+SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){
PreUpdate *p = db->pPreUpdate;
return (p ? p->keyinfo.nField : 0);
}
@@ -76669,7 +76868,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_count(sqlite3 *db){
** For the purposes of the previous paragraph, a foreign key CASCADE, SET NULL
** or SET DEFAULT action is considered a trigger.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_depth(sqlite3 *db){
+SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){
PreUpdate *p = db->pPreUpdate;
return (p ? p->v->nFrame : 0);
}
@@ -76680,7 +76879,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_depth(sqlite3 *db){
** This function is called from within a pre-update callback to retrieve
** a field of the row currently being updated or inserted.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
+SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
PreUpdate *p = db->pPreUpdate;
int rc = SQLITE_OK;
Mem *pMem;
@@ -76700,7 +76899,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlit
UnpackedRecord *pUnpack = p->pNewUnpacked;
if( !pUnpack ){
Mem *pData = &p->v->aMem[p->iNewReg];
- rc = sqlite3VdbeMemExpandBlob(pData);
+ rc = ExpandBlob(pData);
if( rc!=SQLITE_OK ) goto preupdate_new_out;
pUnpack = vdbeUnpackRecord(&p->keyinfo, pData->n, pData->z);
if( !pUnpack ){
@@ -76713,7 +76912,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlit
pMem = (sqlite3_value *)columnNullValue();
}else{
pMem = &pUnpack->aMem[iIdx];
- if( iIdx==p->iPKey ){
+ if( iIdx==p->pTab->iPKey ){
sqlite3VdbeMemSetInt64(pMem, p->iKey2);
}
}
@@ -76734,7 +76933,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlit
assert( iIdx>=0 && iIdx<p->pCsr->nField );
pMem = &p->aNew[iIdx];
if( pMem->flags==0 ){
- if( iIdx==p->iPKey ){
+ if( iIdx==p->pTab->iPKey ){
sqlite3VdbeMemSetInt64(pMem, p->iKey2);
}else{
rc = sqlite3VdbeMemCopy(pMem, &p->v->aMem[p->iNewReg+1+iIdx]);
@@ -76754,7 +76953,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlit
/*
** Return status data for a single loop within query pStmt.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
+SQLITE_API int sqlite3_stmt_scanstatus(
sqlite3_stmt *pStmt, /* Prepared statement being queried */
int idx, /* Index of loop to report on */
int iScanStatusOp, /* Which metric to return */
@@ -76813,7 +77012,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
/*
** Zero all counters associated with the sqlite3_stmt_scanstatus() data.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){
+SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe*)pStmt;
memset(p->anExec, 0, p->nOp * sizeof(i64));
}
@@ -77340,7 +77539,7 @@ static void applyAffinity(
** is appropriate. But only do the conversion if it is possible without
** loss of information and return the revised type of the argument.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value *pVal){
+SQLITE_API int sqlite3_value_numeric_type(sqlite3_value *pVal){
int eType = sqlite3_value_type(pVal);
if( eType==SQLITE_TEXT ){
Mem *pMem = (Mem*)pVal;
@@ -77682,7 +77881,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
sqlite3 *db = p->db; /* The database */
u8 resetSchemaOnFault = 0; /* Reset schema after an error if positive */
u8 encoding = ENC(db); /* The database encoding */
- int iCompare = 0; /* Result of last OP_Compare operation */
+ int iCompare = 0; /* Result of last comparison */
unsigned nVmStep = 0; /* Number of virtual machine steps */
#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
unsigned nProgressLimit = 0;/* Invoke xProgress() when nVmStep reaches this */
@@ -78014,7 +78213,7 @@ case OP_Yield: { /* in1, jump */
}
/* Opcode: HaltIfNull P1 P2 P3 P4 P5
-** Synopsis: if r[P3]=null halt
+** Synopsis: if r[P3]=null halt
**
** Check the value in register P3. If it is NULL then Halt using
** parameter P1, P2, and P4 as if this were a Halt instruction. If the
@@ -78227,7 +78426,7 @@ case OP_String: { /* out2 */
}
/* Opcode: Null P1 P2 P3 * *
-** Synopsis: r[P2..P3]=NULL
+** Synopsis: r[P2..P3]=NULL
**
** Write a NULL into registers P2. If P3 greater than P2, then also write
** NULL into register P3 and every register in between P2 and P3. If P3
@@ -78245,18 +78444,20 @@ case OP_Null: { /* out2 */
cnt = pOp->p3-pOp->p2;
assert( pOp->p3<=(p->nMem+1 - p->nCursor) );
pOut->flags = nullFlag = pOp->p1 ? (MEM_Null|MEM_Cleared) : MEM_Null;
+ pOut->n = 0;
while( cnt>0 ){
pOut++;
memAboutToChange(p, pOut);
sqlite3VdbeMemSetNull(pOut);
pOut->flags = nullFlag;
+ pOut->n = 0;
cnt--;
}
break;
}
/* Opcode: SoftNull P1 * * * *
-** Synopsis: r[P1]=NULL
+** Synopsis: r[P1]=NULL
**
** Set register P1 to have the value NULL as seen by the OP_MakeRecord
** instruction, but do not free any string or blob memory associated with
@@ -78309,7 +78510,7 @@ case OP_Variable: { /* out2 */
}
/* Opcode: Move P1 P2 P3 * *
-** Synopsis: r[P2@P3]=r[P1@P3]
+** Synopsis: r[P2@P3]=r[P1@P3]
**
** Move the P3 values in register P1..P1+P3-1 over into
** registers P2..P2+P3-1. Registers P1..P1+P3-1 are
@@ -78419,7 +78620,7 @@ case OP_IntCopy: { /* out2 */
}
/* Opcode: ResultRow P1 P2 * * *
-** Synopsis: output=r[P1@P2]
+** Synopsis: output=r[P1@P2]
**
** The registers P1 through P1+P2-1 contain a single row of
** results. This opcode causes the sqlite3_step() call to terminate
@@ -78552,14 +78753,14 @@ case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */
}
/* Opcode: Add P1 P2 P3 * *
-** Synopsis: r[P3]=r[P1]+r[P2]
+** Synopsis: r[P3]=r[P1]+r[P2]
**
** Add the value in register P1 to the value in register P2
** and store the result in register P3.
** If either input is NULL, the result is NULL.
*/
/* Opcode: Multiply P1 P2 P3 * *
-** Synopsis: r[P3]=r[P1]*r[P2]
+** Synopsis: r[P3]=r[P1]*r[P2]
**
**
** Multiply the value in register P1 by the value in register P2
@@ -78567,14 +78768,14 @@ case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */
** If either input is NULL, the result is NULL.
*/
/* Opcode: Subtract P1 P2 P3 * *
-** Synopsis: r[P3]=r[P2]-r[P1]
+** Synopsis: r[P3]=r[P2]-r[P1]
**
** Subtract the value in register P1 from the value in register P2
** and store the result in register P3.
** If either input is NULL, the result is NULL.
*/
/* Opcode: Divide P1 P2 P3 * *
-** Synopsis: r[P3]=r[P2]/r[P1]
+** Synopsis: r[P3]=r[P2]/r[P1]
**
** Divide the value in register P1 by the value in register P2
** and store the result in register P3 (P3=P2/P1). If the value in
@@ -78582,7 +78783,7 @@ case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */
** NULL, the result is NULL.
*/
/* Opcode: Remainder P1 P2 P3 * *
-** Synopsis: r[P3]=r[P2]%r[P1]
+** Synopsis: r[P3]=r[P2]%r[P1]
**
** Compute the remainder after integer register P2 is divided by
** register P1 and store the result in register P3.
@@ -78815,21 +79016,21 @@ case OP_Function: {
}
/* Opcode: BitAnd P1 P2 P3 * *
-** Synopsis: r[P3]=r[P1]&r[P2]
+** Synopsis: r[P3]=r[P1]&r[P2]
**
** Take the bit-wise AND of the values in register P1 and P2 and
** store the result in register P3.
** If either input is NULL, the result is NULL.
*/
/* Opcode: BitOr P1 P2 P3 * *
-** Synopsis: r[P3]=r[P1]|r[P2]
+** Synopsis: r[P3]=r[P1]|r[P2]
**
** Take the bit-wise OR of the values in register P1 and P2 and
** store the result in register P3.
** If either input is NULL, the result is NULL.
*/
/* Opcode: ShiftLeft P1 P2 P3 * *
-** Synopsis: r[P3]=r[P2]<<r[P1]
+** Synopsis: r[P3]=r[P2]<<r[P1]
**
** Shift the integer value in register P2 to the left by the
** number of bits specified by the integer in register P1.
@@ -78837,7 +79038,7 @@ case OP_Function: {
** If either input is NULL, the result is NULL.
*/
/* Opcode: ShiftRight P1 P2 P3 * *
-** Synopsis: r[P3]=r[P2]>>r[P1]
+** Synopsis: r[P3]=r[P2]>>r[P1]
**
** Shift the integer value in register P2 to the right by the
** number of bits specified by the integer in register P1.
@@ -78897,7 +79098,7 @@ case OP_ShiftRight: { /* same as TK_RSHIFT, in1, in2, out3 */
}
/* Opcode: AddImm P1 P2 * * *
-** Synopsis: r[P1]=r[P1]+P2
+** Synopsis: r[P1]=r[P1]+P2
**
** Add the constant P2 to the value in register P1.
** The result is always an integer.
@@ -78989,15 +79190,12 @@ case OP_Cast: { /* in1 */
}
#endif /* SQLITE_OMIT_CAST */
-/* Opcode: Lt P1 P2 P3 P4 P5
-** Synopsis: if r[P1]<r[P3] goto P2
-**
-** Compare the values in register P1 and P3. If reg(P3)<reg(P1) then
-** jump to address P2.
+/* Opcode: Eq P1 P2 P3 P4 P5
+** Synopsis: IF r[P3]==r[P1]
**
-** If the SQLITE_JUMPIFNULL bit of P5 is set and either reg(P1) or
-** reg(P3) is NULL then take the jump. If the SQLITE_JUMPIFNULL
-** bit is clear then fall through if either operand is NULL.
+** Compare the values in register P1 and P3. If reg(P3)==reg(P1) then
+** jump to address P2. Or if the SQLITE_STOREP2 flag is set in P5, then
+** store the result of comparison in register P2.
**
** The SQLITE_AFF_MASK portion of P5 must be an affinity character -
** SQLITE_AFF_TEXT, SQLITE_AFF_INTEGER, and so forth. An attempt is made
@@ -79011,61 +79209,78 @@ case OP_Cast: { /* in1 */
** the values are compared. If both values are blobs then memcmp() is
** used to determine the results of the comparison. If both values
** are text, then the appropriate collating function specified in
-** P4 is used to do the comparison. If P4 is not specified then
+** P4 is used to do the comparison. If P4 is not specified then
** memcmp() is used to compare text string. If both values are
** numeric, then a numeric comparison is used. If the two values
** are of different types, then numbers are considered less than
** strings and strings are considered less than blobs.
**
-** If the SQLITE_STOREP2 bit of P5 is set, then do not jump. Instead,
-** store a boolean result (either 0, or 1, or NULL) in register P2.
+** If SQLITE_NULLEQ is set in P5 then the result of comparison is always either
+** true or false and is never NULL. If both operands are NULL then the result
+** of comparison is true. If either operand is NULL then the result is false.
+** If neither operand is NULL the result is the same as it would be if
+** the SQLITE_NULLEQ flag were omitted from P5.
**
-** If the SQLITE_NULLEQ bit is set in P5, then NULL values are considered
-** equal to one another, provided that they do not have their MEM_Cleared
-** bit set.
+** If both SQLITE_STOREP2 and SQLITE_KEEPNULL flags are set then the
+** content of r[P2] is only changed if the new value is NULL or 0 (false).
+** In other words, a prior r[P2] value will not be overwritten by 1 (true).
*/
/* Opcode: Ne P1 P2 P3 P4 P5
-** Synopsis: if r[P1]!=r[P3] goto P2
+** Synopsis: IF r[P3]!=r[P1]
**
-** This works just like the Lt opcode except that the jump is taken if
-** the operands in registers P1 and P3 are not equal. See the Lt opcode for
+** This works just like the Eq opcode except that the jump is taken if
+** the operands in registers P1 and P3 are not equal. See the Eq opcode for
** additional information.
**
-** If SQLITE_NULLEQ is set in P5 then the result of comparison is always either
-** true or false and is never NULL. If both operands are NULL then the result
-** of comparison is false. If either operand is NULL then the result is true.
-** If neither operand is NULL the result is the same as it would be if
-** the SQLITE_NULLEQ flag were omitted from P5.
+** If both SQLITE_STOREP2 and SQLITE_KEEPNULL flags are set then the
+** content of r[P2] is only changed if the new value is NULL or 1 (true).
+** In other words, a prior r[P2] value will not be overwritten by 0 (false).
*/
-/* Opcode: Eq P1 P2 P3 P4 P5
-** Synopsis: if r[P1]==r[P3] goto P2
+/* Opcode: Lt P1 P2 P3 P4 P5
+** Synopsis: IF r[P3]<r[P1]
**
-** This works just like the Lt opcode except that the jump is taken if
-** the operands in registers P1 and P3 are equal.
-** See the Lt opcode for additional information.
+** Compare the values in register P1 and P3. If reg(P3)<reg(P1) then
+** jump to address P2. Or if the SQLITE_STOREP2 flag is set in P5 store
+** the result of comparison (0 or 1 or NULL) into register P2.
**
-** If SQLITE_NULLEQ is set in P5 then the result of comparison is always either
-** true or false and is never NULL. If both operands are NULL then the result
-** of comparison is true. If either operand is NULL then the result is false.
-** If neither operand is NULL the result is the same as it would be if
-** the SQLITE_NULLEQ flag were omitted from P5.
+** If the SQLITE_JUMPIFNULL bit of P5 is set and either reg(P1) or
+** reg(P3) is NULL then the take the jump. If the SQLITE_JUMPIFNULL
+** bit is clear then fall through if either operand is NULL.
+**
+** The SQLITE_AFF_MASK portion of P5 must be an affinity character -
+** SQLITE_AFF_TEXT, SQLITE_AFF_INTEGER, and so forth. An attempt is made
+** to coerce both inputs according to this affinity before the
+** comparison is made. If the SQLITE_AFF_MASK is 0x00, then numeric
+** affinity is used. Note that the affinity conversions are stored
+** back into the input registers P1 and P3. So this opcode can cause
+** persistent changes to registers P1 and P3.
+**
+** Once any conversions have taken place, and neither value is NULL,
+** the values are compared. If both values are blobs then memcmp() is
+** used to determine the results of the comparison. If both values
+** are text, then the appropriate collating function specified in
+** P4 is used to do the comparison. If P4 is not specified then
+** memcmp() is used to compare text string. If both values are
+** numeric, then a numeric comparison is used. If the two values
+** are of different types, then numbers are considered less than
+** strings and strings are considered less than blobs.
*/
/* Opcode: Le P1 P2 P3 P4 P5
-** Synopsis: if r[P1]<=r[P3] goto P2
+** Synopsis: IF r[P3]<=r[P1]
**
** This works just like the Lt opcode except that the jump is taken if
** the content of register P3 is less than or equal to the content of
** register P1. See the Lt opcode for additional information.
*/
/* Opcode: Gt P1 P2 P3 P4 P5
-** Synopsis: if r[P1]>r[P3] goto P2
+** Synopsis: IF r[P3]>r[P1]
**
** This works just like the Lt opcode except that the jump is taken if
** the content of register P3 is greater than the content of
** register P1. See the Lt opcode for additional information.
*/
/* Opcode: Ge P1 P2 P3 P4 P5
-** Synopsis: if r[P1]>=r[P3] goto P2
+** Synopsis: IF r[P3]>=r[P1]
**
** This works just like the Lt opcode except that the jump is taken if
** the content of register P3 is greater than or equal to the content of
@@ -79077,7 +79292,7 @@ case OP_Lt: /* same as TK_LT, jump, in1, in3 */
case OP_Le: /* same as TK_LE, jump, in1, in3 */
case OP_Gt: /* same as TK_GT, jump, in1, in3 */
case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
- int res; /* Result of the comparison of pIn1 against pIn3 */
+ int res, res2; /* Result of the comparison of pIn1 against pIn3 */
char affinity; /* Affinity to use for comparison */
u16 flags1; /* Copy of initial value of pIn1->flags */
u16 flags3; /* Copy of initial value of pIn3->flags */
@@ -79100,9 +79315,9 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
&& (flags3&MEM_Null)!=0
&& (flags3&MEM_Cleared)==0
){
- res = 0; /* Results are equal */
+ res = 0; /* Operands are equal */
}else{
- res = 1; /* Results are not equal */
+ res = 1; /* Operands are not equal */
}
}else{
/* SQLITE_NULLEQ is clear and at least one operand is NULL,
@@ -79111,6 +79326,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
*/
if( pOp->p5 & SQLITE_STOREP2 ){
pOut = &aMem[pOp->p2];
+ iCompare = 1; /* Operands are not equal */
memAboutToChange(p, pOut);
MemSetTypeFlag(pOut, MEM_Null);
REGISTER_TRACE(pOp->p2, pOut);
@@ -79129,12 +79345,21 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
if( (flags1 | flags3)&MEM_Str ){
if( (flags1 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){
applyNumericAffinity(pIn1,0);
+ testcase( flags3!=pIn3->flags ); /* Possible if pIn1==pIn3 */
flags3 = pIn3->flags;
}
if( (flags3 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){
applyNumericAffinity(pIn3,0);
}
}
+ /* Handle the common case of integer comparison here, as an
+ ** optimization, to avoid a call to sqlite3MemCompare() */
+ if( (pIn1->flags & pIn3->flags & MEM_Int)!=0 ){
+ if( pIn3->u.i > pIn1->u.i ){ res = +1; goto compare_op; }
+ if( pIn3->u.i < pIn1->u.i ){ res = -1; goto compare_op; }
+ res = 0;
+ goto compare_op;
+ }
}else if( affinity==SQLITE_AFF_TEXT ){
if( (flags1 & MEM_Str)==0 && (flags1 & (MEM_Int|MEM_Real))!=0 ){
testcase( pIn1->flags & MEM_Int );
@@ -79142,7 +79367,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
sqlite3VdbeMemStringify(pIn1, encoding, 1);
testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) );
flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask);
- flags3 = pIn3->flags;
+ assert( pIn1!=pIn3 );
}
if( (flags3 & MEM_Str)==0 && (flags3 & (MEM_Int|MEM_Real))!=0 ){
testcase( pIn3->flags & MEM_Int );
@@ -79153,23 +79378,16 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
}
}
assert( pOp->p4type==P4_COLLSEQ || pOp->p4.pColl==0 );
- if( flags1 & MEM_Zero ){
- sqlite3VdbeMemExpandBlob(pIn1);
- flags1 &= ~MEM_Zero;
- }
- if( flags3 & MEM_Zero ){
- sqlite3VdbeMemExpandBlob(pIn3);
- flags3 &= ~MEM_Zero;
- }
res = sqlite3MemCompare(pIn3, pIn1, pOp->p4.pColl);
}
+compare_op:
switch( pOp->opcode ){
- case OP_Eq: res = res==0; break;
- case OP_Ne: res = res!=0; break;
- case OP_Lt: res = res<0; break;
- case OP_Le: res = res<=0; break;
- case OP_Gt: res = res>0; break;
- default: res = res>=0; break;
+ case OP_Eq: res2 = res==0; break;
+ case OP_Ne: res2 = res; break;
+ case OP_Lt: res2 = res<0; break;
+ case OP_Le: res2 = res<=0; break;
+ case OP_Gt: res2 = res>0; break;
+ default: res2 = res>=0; break;
}
/* Undo any changes made by applyAffinity() to the input registers. */
@@ -79180,19 +79398,55 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
if( pOp->p5 & SQLITE_STOREP2 ){
pOut = &aMem[pOp->p2];
+ iCompare = res;
+ res2 = res2!=0; /* For this path res2 must be exactly 0 or 1 */
+ if( (pOp->p5 & SQLITE_KEEPNULL)!=0 ){
+ /* The KEEPNULL flag prevents OP_Eq from overwriting a NULL with 1
+ ** and prevents OP_Ne from overwriting NULL with 0. This flag
+ ** is only used in contexts where either:
+ ** (1) op==OP_Eq && (r[P2]==NULL || r[P2]==0)
+ ** (2) op==OP_Ne && (r[P2]==NULL || r[P2]==1)
+ ** Therefore it is not necessary to check the content of r[P2] for
+ ** NULL. */
+ assert( pOp->opcode==OP_Ne || pOp->opcode==OP_Eq );
+ assert( res2==0 || res2==1 );
+ testcase( res2==0 && pOp->opcode==OP_Eq );
+ testcase( res2==1 && pOp->opcode==OP_Eq );
+ testcase( res2==0 && pOp->opcode==OP_Ne );
+ testcase( res2==1 && pOp->opcode==OP_Ne );
+ if( (pOp->opcode==OP_Eq)==res2 ) break;
+ }
memAboutToChange(p, pOut);
MemSetTypeFlag(pOut, MEM_Int);
- pOut->u.i = res;
+ pOut->u.i = res2;
REGISTER_TRACE(pOp->p2, pOut);
}else{
VdbeBranchTaken(res!=0, (pOp->p5 & SQLITE_NULLEQ)?2:3);
- if( res ){
+ if( res2 ){
goto jump_to_p2;
}
}
break;
}
+/* Opcode: ElseNotEq * P2 * * *
+**
+** This opcode must immediately follow an OP_Lt or OP_Gt comparison operator.
+** If result of an OP_Eq comparison on the same two operands
+** would have be NULL or false (0), then then jump to P2.
+** If the result of an OP_Eq comparison on the two previous operands
+** would have been true (1), then fall through.
+*/
+case OP_ElseNotEq: { /* same as TK_ESCAPE, jump */
+ assert( pOp>aOp );
+ assert( pOp[-1].opcode==OP_Lt || pOp[-1].opcode==OP_Gt );
+ assert( pOp[-1].p5 & SQLITE_STOREP2 );
+ VdbeBranchTaken(iCompare!=0, 2);
+ if( iCompare!=0 ) goto jump_to_p2;
+ break;
+}
+
+
/* Opcode: Permutation * * * P4 *
**
** Set the permutation used by the OP_Compare operator to be the array
@@ -79388,22 +79642,18 @@ case OP_BitNot: { /* same as TK_BITNOT, in1, out2 */
/* Opcode: Once P1 P2 * * *
**
-** Check the "once" flag number P1. If it is set, jump to instruction P2.
-** Otherwise, set the flag and fall through to the next instruction.
-** In other words, this opcode causes all following opcodes up through P2
-** (but not including P2) to run just once and to be skipped on subsequent
-** times through the loop.
-**
-** All "once" flags are initially cleared whenever a prepared statement
-** first begins to run.
+** If the P1 value is equal to the P1 value on the OP_Init opcode at
+** instruction 0, then jump to P2. If the two P1 values differ, then
+** set the P1 value on this opcode to equal the P1 value on the OP_Init
+** and fall through.
*/
case OP_Once: { /* jump */
- assert( pOp->p1<p->nOnceFlag );
- VdbeBranchTaken(p->aOnceFlag[pOp->p1]!=0, 2);
- if( p->aOnceFlag[pOp->p1] ){
+ assert( p->aOp[0].opcode==OP_Init );
+ VdbeBranchTaken(p->aOp[0].p1==pOp->p1, 2);
+ if( p->aOp[0].p1==pOp->p1 ){
goto jump_to_p2;
}else{
- p->aOnceFlag[pOp->p1] = 1;
+ pOp->p1 = p->aOp[0].p1;
}
break;
}
@@ -79442,7 +79692,7 @@ case OP_IfNot: { /* jump, in1 */
}
/* Opcode: IsNull P1 P2 * * *
-** Synopsis: if r[P1]==NULL goto P2
+** Synopsis: if r[P1]==NULL goto P2
**
** Jump to P2 if the value in register P1 is NULL.
*/
@@ -79470,7 +79720,7 @@ case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */
}
/* Opcode: Column P1 P2 P3 P4 P5
-** Synopsis: r[P3]=PX
+** Synopsis: r[P3]=PX
**
** Interpret the data that cursor P1 points to as a structure built using
** the MakeRecord instruction. (See the MakeRecord opcode for additional
@@ -80236,12 +80486,12 @@ case OP_Transaction: {
rc = sqlite3BtreeBeginTrans(pBt, pOp->p2);
testcase( rc==SQLITE_BUSY_SNAPSHOT );
testcase( rc==SQLITE_BUSY_RECOVERY );
- if( (rc&0xff)==SQLITE_BUSY ){
- p->pc = (int)(pOp - aOp);
- p->rc = rc;
- goto vdbe_return;
- }
if( rc!=SQLITE_OK ){
+ if( (rc&0xff)==SQLITE_BUSY ){
+ p->pc = (int)(pOp - aOp);
+ p->rc = rc;
+ goto vdbe_return;
+ }
goto abort_due_to_error;
}
@@ -80268,10 +80518,9 @@ case OP_Transaction: {
}
/* Gather the schema version number for checking:
- ** IMPLEMENTATION-OF: R-32195-19465 The schema version is used by SQLite
- ** each time a query is executed to ensure that the internal cache of the
- ** schema used when compiling the SQL query matches the schema of the
- ** database against which the compiled query is actually executed.
+ ** IMPLEMENTATION-OF: R-03189-51135 As each SQL statement runs, the schema
+ ** version is checked to ensure that the schema has not changed since the
+ ** SQL statement was prepared.
*/
sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&iMeta);
iGen = db->aDb[pOp->p1].pSchema->iGeneration;
@@ -80932,7 +81181,6 @@ case OP_SeekGT: { /* jump, in3 */
#ifdef SQLITE_DEBUG
{ int i; for(i=0; i<r.nField; i++) assert( memIsValid(&r.aMem[i]) ); }
#endif
- ExpandBlob(r.aMem);
r.eqSeen = 0;
rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, &r, 0, 0, &res);
if( rc!=SQLITE_OK ){
@@ -80980,7 +81228,6 @@ seek_not_found:
}
break;
}
-
/* Opcode: Found P1 P2 P3 P4 *
** Synopsis: key=r[P3@P4]
@@ -81074,13 +81321,13 @@ case OP_Found: { /* jump, in3 */
r.pKeyInfo = pC->pKeyInfo;
r.nField = (u16)pOp->p4.i;
r.aMem = pIn3;
+#ifdef SQLITE_DEBUG
for(ii=0; ii<r.nField; ii++){
assert( memIsValid(&r.aMem[ii]) );
- ExpandBlob(&r.aMem[ii]);
-#ifdef SQLITE_DEBUG
+ assert( (r.aMem[ii].flags & MEM_Zero)==0 || r.aMem[ii].n==0 );
if( ii ) REGISTER_TRACE(pOp->p3+ii, &r.aMem[ii]);
-#endif
}
+#endif
pIdxKey = &r;
}else{
pIdxKey = sqlite3VdbeAllocUnpackedRecord(
@@ -81088,7 +81335,7 @@ case OP_Found: { /* jump, in3 */
);
if( pIdxKey==0 ) goto no_mem;
assert( pIn3->flags & MEM_Blob );
- ExpandBlob(pIn3);
+ (void)ExpandBlob(pIn3);
sqlite3VdbeRecordUnpack(pC->pKeyInfo, pIn3->n, pIn3->z, pIdxKey);
}
pIdxKey->default_rc = 0;
@@ -81413,7 +81660,7 @@ case OP_NewRowid: { /* out2 */
** for indices is OP_IdxInsert.
*/
/* Opcode: InsertInt P1 P2 P3 P4 P5
-** Synopsis: intkey=P3 data=r[P2]
+** Synopsis: intkey=P3 data=r[P2]
**
** This works exactly like OP_Insert except that the key is the
** integer value P3, not the value of the integer stored in register P3.
@@ -81455,7 +81702,7 @@ case OP_InsertInt: {
if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){
assert( pC->isTable );
assert( pC->iDb>=0 );
- zDb = db->aDb[pC->iDb].zName;
+ zDb = db->aDb[pC->iDb].zDbSName;
pTab = pOp->p4.pTab;
assert( HasRowid(pTab) );
op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT);
@@ -81529,7 +81776,7 @@ case OP_InsertInt: {
** P1 must not be pseudo-table. It has to be a real table with
** multiple rows.
**
-** If P4 is not NULL then it points to a Table struture. In this case either
+** If P4 is not NULL then it points to a Table object. In this case either
** the update or pre-update hook, or both, may be invoked. The P1 cursor must
** have been positioned using OP_NotFound prior to invoking this opcode in
** this case. Specifically, if one is configured, the pre-update hook is
@@ -81572,7 +81819,7 @@ case OP_Delete: {
if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){
assert( pC->iDb>=0 );
assert( pOp->p4.pTab!=0 );
- zDb = db->aDb[pC->iDb].zName;
+ zDb = db->aDb[pC->iDb].zDbSName;
pTab = pOp->p4.pTab;
if( (pOp->p5 & OPFLAG_SAVEPOSITION)!=0 && pC->isTable ){
pC->movetoTarget = sqlite3BtreeIntegerKey(pC->uc.pCursor);
@@ -81644,7 +81891,7 @@ case OP_ResetCount: {
}
/* Opcode: SorterCompare P1 P2 P3 P4
-** Synopsis: if key(P1)!=trim(r[P3],P4) goto P2
+** Synopsis: if key(P1)!=trim(r[P3],P4) goto P2
**
** P1 is a sorter cursor. This instruction compares a prefix of the
** record blob in register P3 against a prefix of the entry that
@@ -82120,9 +82367,6 @@ case OP_IdxInsert: { /* in2 */
}else{
x.nKey = pIn2->n;
x.pKey = pIn2->z;
- x.nData = 0;
- x.nZero = 0;
- x.pData = 0;
rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, pOp->p3,
((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0)
);
@@ -82171,7 +82415,7 @@ case OP_IdxDelete: {
}
/* Opcode: Seek P1 * P3 P4 *
-** Synopsis: Move P3 to P1.rowid
+** Synopsis: Move P3 to P1.rowid
**
** P1 is an open index cursor and P3 is a cursor on the corresponding
** table. This opcode does a deferred seek of the P3 table cursor
@@ -82542,7 +82786,7 @@ case OP_ParseSchema: {
initData.pzErrMsg = &p->zErrMsg;
zSql = sqlite3MPrintf(db,
"SELECT name, rootpage, sql FROM '%q'.%s WHERE %s ORDER BY rowid",
- db->aDb[iDb].zName, zMaster, pOp->p4.z);
+ db->aDb[iDb].zDbSName, zMaster, pOp->p4.z);
if( zSql==0 ){
rc = SQLITE_NOMEM_BKPT;
}else{
@@ -82678,7 +82922,7 @@ case OP_IntegrityCk: {
#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
/* Opcode: RowSetAdd P1 P2 * * *
-** Synopsis: rowset(P1)=r[P2]
+** Synopsis: rowset(P1)=r[P2]
**
** Insert the integer value held by register P2 into a boolean index
** held in register P1.
@@ -82698,7 +82942,7 @@ case OP_RowSetAdd: { /* in1, in2 */
}
/* Opcode: RowSetRead P1 P2 P3 * *
-** Synopsis: r[P3]=rowset(P1)
+** Synopsis: r[P3]=rowset(P1)
**
** Extract the smallest value from boolean index P1 and put that value into
** register P3. Or, if boolean index P1 is initially empty, leave P3
@@ -82847,8 +83091,7 @@ case OP_Program: { /* jump */
if( pProgram->nCsr==0 ) nMem++;
nByte = ROUND8(sizeof(VdbeFrame))
+ nMem * sizeof(Mem)
- + pProgram->nCsr * sizeof(VdbeCursor *)
- + pProgram->nOnce * sizeof(u8);
+ + pProgram->nCsr * sizeof(VdbeCursor *);
pFrame = sqlite3DbMallocZero(db, nByte);
if( !pFrame ){
goto no_mem;
@@ -82868,8 +83111,6 @@ case OP_Program: { /* jump */
pFrame->aOp = p->aOp;
pFrame->nOp = p->nOp;
pFrame->token = pProgram->token;
- pFrame->aOnceFlag = p->aOnceFlag;
- pFrame->nOnceFlag = p->nOnceFlag;
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
pFrame->anExec = p->anExec;
#endif
@@ -82903,13 +83144,10 @@ case OP_Program: { /* jump */
p->apCsr = (VdbeCursor **)&aMem[p->nMem];
p->aOp = aOp = pProgram->aOp;
p->nOp = pProgram->nOp;
- p->aOnceFlag = (u8 *)&p->apCsr[p->nCursor];
- p->nOnceFlag = pProgram->nOnce;
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
p->anExec = 0;
#endif
pOp = &aOp[-1];
- memset(p->aOnceFlag, 0, p->nOnceFlag);
break;
}
@@ -83371,15 +83609,14 @@ case OP_JournalMode: { /* out2 */
#endif /* SQLITE_OMIT_PRAGMA */
#if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH)
-/* Opcode: Vacuum * * * * *
+/* Opcode: Vacuum P1 * * * *
**
-** Vacuum the entire database. This opcode will cause other virtual
-** machines to be created and run. It may not be called from within
-** a transaction.
+** Vacuum the entire database P1. P1 is 0 for "main", and 2 or more
+** for an attached database. The "temp" database may not be vacuumed.
*/
case OP_Vacuum: {
assert( p->readOnly==0 );
- rc = sqlite3RunVacuum(&p->zErrMsg, db);
+ rc = sqlite3RunVacuum(&p->zErrMsg, db, pOp->p1);
if( rc ) goto abort_due_to_error;
break;
}
@@ -83877,8 +84114,8 @@ case OP_MaxPgcnt: { /* out2 */
#endif
-/* Opcode: Init * P2 * P4 *
-** Synopsis: Start at P2
+/* Opcode: Init P1 P2 * P4 *
+** Synopsis: Start at P2
**
** Programs contain a single instance of this opcode as the very first
** opcode.
@@ -83888,9 +84125,13 @@ case OP_MaxPgcnt: { /* out2 */
** Or if P4 is blank, use the string returned by sqlite3_sql().
**
** If P2 is not zero, jump to instruction P2.
+**
+** Increment the value of P1 so that OP_Once opcodes will jump the
+** first time they are evaluated for this run.
*/
case OP_Init: { /* jump */
char *zTrace;
+ int i;
/* If the P4 argument is not NULL, then it must be an SQL comment string.
** The "--" string is broken up to prevent false-positives with srcck1.c.
@@ -83902,6 +84143,7 @@ case OP_Init: { /* jump */
** sqlite3_expanded_sql(P) otherwise.
*/
assert( pOp->p4.z==0 || strncmp(pOp->p4.z, "-" "- ", 3)==0 );
+ assert( pOp==p->aOp ); /* Always instruction 0 */
#ifndef SQLITE_OMIT_TRACE
if( (db->mTrace & (SQLITE_TRACE_STMT|SQLITE_TRACE_LEGACY))!=0
@@ -83923,10 +84165,10 @@ case OP_Init: { /* jump */
#ifdef SQLITE_USE_FCNTL_TRACE
zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql);
if( zTrace ){
- int i;
- for(i=0; i<db->nDb; i++){
- if( DbMaskTest(p->btreeMask, i)==0 ) continue;
- sqlite3_file_control(db, db->aDb[i].zName, SQLITE_FCNTL_TRACE, zTrace);
+ int j;
+ for(j=0; j<db->nDb; j++){
+ if( DbMaskTest(p->btreeMask, j)==0 ) continue;
+ sqlite3_file_control(db, db->aDb[j].zDbSName, SQLITE_FCNTL_TRACE, zTrace);
}
}
#endif /* SQLITE_USE_FCNTL_TRACE */
@@ -83938,8 +84180,15 @@ case OP_Init: { /* jump */
}
#endif /* SQLITE_DEBUG */
#endif /* SQLITE_OMIT_TRACE */
- if( pOp->p2 ) goto jump_to_p2;
- break;
+ assert( pOp->p2>0 );
+ if( pOp->p1>=sqlite3GlobalConfig.iOnceResetThreshold ){
+ for(i=1; i<p->nOp; i++){
+ if( p->aOp[i].opcode==OP_Once ) p->aOp[i].p1 = 0;
+ }
+ pOp->p1 = 0;
+ }
+ pOp->p1++;
+ goto jump_to_p2;
}
#ifdef SQLITE_ENABLE_CURSOR_HINTS
@@ -84192,7 +84441,7 @@ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){
/*
** Open a blob handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
+SQLITE_API int sqlite3_blob_open(
sqlite3* db, /* The database connection */
const char *zDb, /* The attached database containing the blob */
const char *zTable, /* The table containing the blob */
@@ -84262,7 +84511,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
goto blob_open_out;
}
pBlob->pTab = pTab;
- pBlob->zDb = db->aDb[sqlite3SchemaToIndex(db, pTab->pSchema)].zName;
+ pBlob->zDb = db->aDb[sqlite3SchemaToIndex(db, pTab->pSchema)].zDbSName;
/* Now search pTab for the exact column. */
for(iCol=0; iCol<pTab->nCol; iCol++) {
@@ -84433,7 +84682,7 @@ blob_open_out:
** Close a blob handle that was previously created using
** sqlite3_blob_open().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *pBlob){
+SQLITE_API int sqlite3_blob_close(sqlite3_blob *pBlob){
Incrblob *p = (Incrblob *)pBlob;
int rc;
sqlite3 *db;
@@ -84526,14 +84775,14 @@ static int blobReadWrite(
/*
** Read data from a blob handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *pBlob, void *z, int n, int iOffset){
+SQLITE_API int sqlite3_blob_read(sqlite3_blob *pBlob, void *z, int n, int iOffset){
return blobReadWrite(pBlob, z, n, iOffset, sqlite3BtreeData);
}
/*
** Write data to a blob handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *pBlob, const void *z, int n, int iOffset){
+SQLITE_API int sqlite3_blob_write(sqlite3_blob *pBlob, const void *z, int n, int iOffset){
return blobReadWrite(pBlob, (void *)z, n, iOffset, sqlite3BtreePutData);
}
@@ -84543,7 +84792,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *pBlob, const void
** The Incrblob.nByte field is fixed for the lifetime of the Incrblob
** so no mutex is required for access.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *pBlob){
+SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *pBlob){
Incrblob *p = (Incrblob *)pBlob;
return (p && p->pStmt) ? p->nByte : 0;
}
@@ -84558,7 +84807,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *pBlob){
** subsequent calls to sqlite3_blob_xxx() functions (except blob_close())
** immediately return SQLITE_ABORT.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){
+SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){
int rc;
Incrblob *p = (Incrblob *)pBlob;
sqlite3 *db;
@@ -87814,17 +88063,17 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){
testcase( ExprHasProperty(pExpr, EP_TokenOnly) );
testcase( ExprHasProperty(pExpr, EP_Reduced) );
rc = pWalker->xExprCallback(pWalker, pExpr);
- if( rc==WRC_Continue
- && !ExprHasProperty(pExpr,EP_TokenOnly) ){
- if( sqlite3WalkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort;
- if( sqlite3WalkExpr(pWalker, pExpr->pRight) ) return WRC_Abort;
- if( ExprHasProperty(pExpr, EP_xIsSelect) ){
- if( sqlite3WalkSelect(pWalker, pExpr->x.pSelect) ) return WRC_Abort;
- }else{
- if( sqlite3WalkExprList(pWalker, pExpr->x.pList) ) return WRC_Abort;
- }
+ if( rc || ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){
+ return rc & WRC_Abort;
}
- return rc & WRC_Abort;
+ if( pExpr->pLeft && walkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort;
+ if( pExpr->pRight && walkExpr(pWalker, pExpr->pRight) ) return WRC_Abort;
+ if( ExprHasProperty(pExpr, EP_xIsSelect) ){
+ if( sqlite3WalkSelect(pWalker, pExpr->x.pSelect) ) return WRC_Abort;
+ }else if( pExpr->x.pList ){
+ if( sqlite3WalkExprList(pWalker, pExpr->x.pList) ) return WRC_Abort;
+ }
+ return WRC_Continue;
}
SQLITE_PRIVATE int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){
return pExpr ? walkExpr(pWalker,pExpr) : WRC_Continue;
@@ -88158,8 +88407,8 @@ static int lookupName(
zDb = 0;
}else{
for(i=0; i<db->nDb; i++){
- assert( db->aDb[i].zName );
- if( sqlite3StrICmp(db->aDb[i].zName,zDb)==0 ){
+ assert( db->aDb[i].zDbSName );
+ if( sqlite3StrICmp(db->aDb[i].zDbSName,zDb)==0 ){
pSchema = db->aDb[i].pSchema;
break;
}
@@ -88560,7 +88809,6 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
/* if( pSrcList==0 ) break; */
notValid(pParse, pNC, "the \".\" operator", NC_IdxExpr);
- /*notValid(pParse, pNC, "the \".\" operator", NC_PartIdx|NC_IsCheck, 1);*/
pRight = pExpr->pRight;
if( pRight->op==TK_ID ){
zDb = 0;
@@ -88583,14 +88831,12 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
int no_such_func = 0; /* True if no such function exists */
int wrong_num_args = 0; /* True if wrong number of arguments */
int is_agg = 0; /* True if is an aggregate function */
- int auth; /* Authorization to use the function */
int nId; /* Number of characters in function name */
const char *zId; /* The function name. */
FuncDef *pDef; /* Information about the function */
u8 enc = ENC(pParse->db); /* The database encoding */
assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
- notValid(pParse, pNC, "functions", NC_PartIdx);
zId = pExpr->u.zToken;
nId = sqlite3Strlen30(zId);
pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0);
@@ -88627,15 +88873,17 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
}
}
#ifndef SQLITE_OMIT_AUTHORIZATION
- auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0, pDef->zName, 0);
- if( auth!=SQLITE_OK ){
- if( auth==SQLITE_DENY ){
- sqlite3ErrorMsg(pParse, "not authorized to use function: %s",
- pDef->zName);
- pNC->nErr++;
+ {
+ int auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0,pDef->zName,0);
+ if( auth!=SQLITE_OK ){
+ if( auth==SQLITE_DENY ){
+ sqlite3ErrorMsg(pParse, "not authorized to use function: %s",
+ pDef->zName);
+ pNC->nErr++;
+ }
+ pExpr->op = TK_NULL;
+ return WRC_Prune;
}
- pExpr->op = TK_NULL;
- return WRC_Prune;
}
#endif
if( pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG) ){
@@ -88648,7 +88896,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
/* Date/time functions that use 'now', and other functions like
** sqlite_version() that might change over time cannot be used
** in an index. */
- notValid(pParse, pNC, "non-deterministic functions", NC_IdxExpr);
+ notValid(pParse, pNC, "non-deterministic functions",
+ NC_IdxExpr|NC_PartIdx);
}
}
if( is_agg && (pNC->ncFlags & NC_AllowAgg)==0 ){
@@ -88713,6 +88962,33 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
notValid(pParse, pNC, "parameters", NC_IsCheck|NC_PartIdx|NC_IdxExpr);
break;
}
+ case TK_EQ:
+ case TK_NE:
+ case TK_LT:
+ case TK_LE:
+ case TK_GT:
+ case TK_GE:
+ case TK_IS:
+ case TK_ISNOT: {
+ int nLeft, nRight;
+ if( pParse->db->mallocFailed ) break;
+ assert( pExpr->pRight!=0 );
+ assert( pExpr->pLeft!=0 );
+ nLeft = sqlite3ExprVectorSize(pExpr->pLeft);
+ nRight = sqlite3ExprVectorSize(pExpr->pRight);
+ if( nLeft!=nRight ){
+ testcase( pExpr->op==TK_EQ );
+ testcase( pExpr->op==TK_NE );
+ testcase( pExpr->op==TK_LT );
+ testcase( pExpr->op==TK_LE );
+ testcase( pExpr->op==TK_GT );
+ testcase( pExpr->op==TK_GE );
+ testcase( pExpr->op==TK_IS );
+ testcase( pExpr->op==TK_ISNOT );
+ sqlite3ErrorMsg(pParse, "row value misused");
+ }
+ break;
+ }
}
return (pParse->nErr || pParse->db->mallocFailed) ? WRC_Abort : WRC_Continue;
}
@@ -89455,6 +89731,18 @@ SQLITE_PRIVATE void sqlite3ResolveSelfReference(
*/
/* #include "sqliteInt.h" */
+/* Forward declarations */
+static void exprCodeBetween(Parse*,Expr*,int,void(*)(Parse*,Expr*,int,int),int);
+static int exprCodeVector(Parse *pParse, Expr *p, int *piToFree);
+
+/*
+** Return the affinity character for a single column of a table.
+*/
+SQLITE_PRIVATE char sqlite3TableColumnAffinity(Table *pTab, int iCol){
+ assert( iCol<pTab->nCol );
+ return iCol>=0 ? pTab->aCol[iCol].affinity : SQLITE_AFF_INTEGER;
+}
+
/*
** Return the 'affinity' of the expression pExpr if any.
**
@@ -89480,21 +89768,21 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr){
assert( pExpr->flags&EP_xIsSelect );
return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr);
}
+ if( op==TK_REGISTER ) op = pExpr->op2;
#ifndef SQLITE_OMIT_CAST
if( op==TK_CAST ){
assert( !ExprHasProperty(pExpr, EP_IntValue) );
return sqlite3AffinityType(pExpr->u.zToken, 0);
}
#endif
- if( (op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_REGISTER)
- && pExpr->pTab!=0
- ){
- /* op==TK_REGISTER && pExpr->pTab!=0 happens when pExpr was originally
- ** a TK_COLUMN but was previously evaluated and cached in a register */
- int j = pExpr->iColumn;
- if( j<0 ) return SQLITE_AFF_INTEGER;
- assert( pExpr->pTab && j<pExpr->pTab->nCol );
- return pExpr->pTab->aCol[j].affinity;
+ if( op==TK_AGG_COLUMN || op==TK_COLUMN ){
+ return sqlite3TableColumnAffinity(pExpr->pTab, pExpr->iColumn);
+ }
+ if( op==TK_SELECT_COLUMN ){
+ assert( pExpr->pLeft->flags&EP_xIsSelect );
+ return sqlite3ExprAffinity(
+ pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr
+ );
}
return pExpr->affinity;
}
@@ -89660,7 +89948,7 @@ static char comparisonAffinity(Expr *pExpr){
aff = sqlite3CompareAffinity(pExpr->pRight, aff);
}else if( ExprHasProperty(pExpr, EP_xIsSelect) ){
aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff);
- }else if( !aff ){
+ }else if( NEVER(aff==0) ){
aff = SQLITE_AFF_BLOB;
}
return aff;
@@ -89750,6 +90038,270 @@ static int codeCompare(
return addr;
}
+/*
+** Return true if expression pExpr is a vector, or false otherwise.
+**
+** A vector is defined as any expression that results in two or more
+** columns of result. Every TK_VECTOR node is an vector because the
+** parser will not generate a TK_VECTOR with fewer than two entries.
+** But a TK_SELECT might be either a vector or a scalar. It is only
+** considered a vector if it has two or more result columns.
+*/
+SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr){
+ return sqlite3ExprVectorSize(pExpr)>1;
+}
+
+/*
+** If the expression passed as the only argument is of type TK_VECTOR
+** return the number of expressions in the vector. Or, if the expression
+** is a sub-select, return the number of columns in the sub-select. For
+** any other type of expression, return 1.
+*/
+SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr){
+ u8 op = pExpr->op;
+ if( op==TK_REGISTER ) op = pExpr->op2;
+ if( op==TK_VECTOR ){
+ return pExpr->x.pList->nExpr;
+ }else if( op==TK_SELECT ){
+ return pExpr->x.pSelect->pEList->nExpr;
+ }else{
+ return 1;
+ }
+}
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Return a pointer to a subexpression of pVector that is the i-th
+** column of the vector (numbered starting with 0). The caller must
+** ensure that i is within range.
+**
+** If pVector is really a scalar (and "scalar" here includes subqueries
+** that return a single column!) then return pVector unmodified.
+**
+** pVector retains ownership of the returned subexpression.
+**
+** If the vector is a (SELECT ...) then the expression returned is
+** just the expression for the i-th term of the result set, and may
+** not be ready for evaluation because the table cursor has not yet
+** been positioned.
+*/
+SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr *pVector, int i){
+ assert( i<sqlite3ExprVectorSize(pVector) );
+ if( sqlite3ExprIsVector(pVector) ){
+ assert( pVector->op2==0 || pVector->op==TK_REGISTER );
+ if( pVector->op==TK_SELECT || pVector->op2==TK_SELECT ){
+ return pVector->x.pSelect->pEList->a[i].pExpr;
+ }else{
+ return pVector->x.pList->a[i].pExpr;
+ }
+ }
+ return pVector;
+}
+#endif /* !defined(SQLITE_OMIT_SUBQUERY) */
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Compute and return a new Expr object which when passed to
+** sqlite3ExprCode() will generate all necessary code to compute
+** the iField-th column of the vector expression pVector.
+**
+** It is ok for pVector to be a scalar (as long as iField==0).
+** In that case, this routine works like sqlite3ExprDup().
+**
+** The caller owns the returned Expr object and is responsible for
+** ensuring that the returned value eventually gets freed.
+**
+** The caller retains ownership of pVector. If pVector is a TK_SELECT,
+** then the returned object will reference pVector and so pVector must remain
+** valid for the life of the returned object. If pVector is a TK_VECTOR
+** or a scalar expression, then it can be deleted as soon as this routine
+** returns.
+**
+** A trick to cause a TK_SELECT pVector to be deleted together with
+** the returned Expr object is to attach the pVector to the pRight field
+** of the returned TK_SELECT_COLUMN Expr object.
+*/
+SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(
+ Parse *pParse, /* Parsing context */
+ Expr *pVector, /* The vector. List of expressions or a sub-SELECT */
+ int iField /* Which column of the vector to return */
+){
+ Expr *pRet;
+ if( pVector->op==TK_SELECT ){
+ assert( pVector->flags & EP_xIsSelect );
+ /* The TK_SELECT_COLUMN Expr node:
+ **
+ ** pLeft: pVector containing TK_SELECT
+ ** pRight: not used. But recursively deleted.
+ ** iColumn: Index of a column in pVector
+ ** pLeft->iTable: First in an array of register holding result, or 0
+ ** if the result is not yet computed.
+ **
+ ** sqlite3ExprDelete() specifically skips the recursive delete of
+ ** pLeft on TK_SELECT_COLUMN nodes. But pRight is followed, so pVector
+ ** can be attached to pRight to cause this node to take ownership of
+ ** pVector. Typically there will be multiple TK_SELECT_COLUMN nodes
+ ** with the same pLeft pointer to the pVector, but only one of them
+ ** will own the pVector.
+ */
+ pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0, 0);
+ if( pRet ){
+ pRet->iColumn = iField;
+ pRet->pLeft = pVector;
+ }
+ assert( pRet==0 || pRet->iTable==0 );
+ }else{
+ if( pVector->op==TK_VECTOR ) pVector = pVector->x.pList->a[iField].pExpr;
+ pRet = sqlite3ExprDup(pParse->db, pVector, 0);
+ }
+ return pRet;
+}
+#endif /* !define(SQLITE_OMIT_SUBQUERY) */
+
+/*
+** If expression pExpr is of type TK_SELECT, generate code to evaluate
+** it. Return the register in which the result is stored (or, if the
+** sub-select returns more than one column, the first in an array
+** of registers in which the result is stored).
+**
+** If pExpr is not a TK_SELECT expression, return 0.
+*/
+static int exprCodeSubselect(Parse *pParse, Expr *pExpr){
+ int reg = 0;
+#ifndef SQLITE_OMIT_SUBQUERY
+ if( pExpr->op==TK_SELECT ){
+ reg = sqlite3CodeSubselect(pParse, pExpr, 0, 0);
+ }
+#endif
+ return reg;
+}
+
+/*
+** Argument pVector points to a vector expression - either a TK_VECTOR
+** or TK_SELECT that returns more than one column. This function returns
+** the register number of a register that contains the value of
+** element iField of the vector.
+**
+** If pVector is a TK_SELECT expression, then code for it must have
+** already been generated using the exprCodeSubselect() routine. In this
+** case parameter regSelect should be the first in an array of registers
+** containing the results of the sub-select.
+**
+** If pVector is of type TK_VECTOR, then code for the requested field
+** is generated. In this case (*pRegFree) may be set to the number of
+** a temporary register to be freed by the caller before returning.
+**
+** Before returning, output parameter (*ppExpr) is set to point to the
+** Expr object corresponding to element iElem of the vector.
+*/
+static int exprVectorRegister(
+ Parse *pParse, /* Parse context */
+ Expr *pVector, /* Vector to extract element from */
+ int iField, /* Field to extract from pVector */
+ int regSelect, /* First in array of registers */
+ Expr **ppExpr, /* OUT: Expression element */
+ int *pRegFree /* OUT: Temp register to free */
+){
+ u8 op = pVector->op;
+ assert( op==TK_VECTOR || op==TK_REGISTER || op==TK_SELECT );
+ if( op==TK_REGISTER ){
+ *ppExpr = sqlite3VectorFieldSubexpr(pVector, iField);
+ return pVector->iTable+iField;
+ }
+ if( op==TK_SELECT ){
+ *ppExpr = pVector->x.pSelect->pEList->a[iField].pExpr;
+ return regSelect+iField;
+ }
+ *ppExpr = pVector->x.pList->a[iField].pExpr;
+ return sqlite3ExprCodeTemp(pParse, *ppExpr, pRegFree);
+}
+
+/*
+** Expression pExpr is a comparison between two vector values. Compute
+** the result of the comparison (1, 0, or NULL) and write that
+** result into register dest.
+**
+** The caller must satisfy the following preconditions:
+**
+** if pExpr->op==TK_IS: op==TK_EQ and p5==SQLITE_NULLEQ
+** if pExpr->op==TK_ISNOT: op==TK_NE and p5==SQLITE_NULLEQ
+** otherwise: op==pExpr->op and p5==0
+*/
+static void codeVectorCompare(
+ Parse *pParse, /* Code generator context */
+ Expr *pExpr, /* The comparison operation */
+ int dest, /* Write results into this register */
+ u8 op, /* Comparison operator */
+ u8 p5 /* SQLITE_NULLEQ or zero */
+){
+ Vdbe *v = pParse->pVdbe;
+ Expr *pLeft = pExpr->pLeft;
+ Expr *pRight = pExpr->pRight;
+ int nLeft = sqlite3ExprVectorSize(pLeft);
+ int i;
+ int regLeft = 0;
+ int regRight = 0;
+ u8 opx = op;
+ int addrDone = sqlite3VdbeMakeLabel(v);
+
+ assert( nLeft==sqlite3ExprVectorSize(pRight) );
+ assert( pExpr->op==TK_EQ || pExpr->op==TK_NE
+ || pExpr->op==TK_IS || pExpr->op==TK_ISNOT
+ || pExpr->op==TK_LT || pExpr->op==TK_GT
+ || pExpr->op==TK_LE || pExpr->op==TK_GE
+ );
+ assert( pExpr->op==op || (pExpr->op==TK_IS && op==TK_EQ)
+ || (pExpr->op==TK_ISNOT && op==TK_NE) );
+ assert( p5==0 || pExpr->op!=op );
+ assert( p5==SQLITE_NULLEQ || pExpr->op==op );
+
+ p5 |= SQLITE_STOREP2;
+ if( opx==TK_LE ) opx = TK_LT;
+ if( opx==TK_GE ) opx = TK_GT;
+
+ regLeft = exprCodeSubselect(pParse, pLeft);
+ regRight = exprCodeSubselect(pParse, pRight);
+
+ for(i=0; 1 /*Loop exits by "break"*/; i++){
+ int regFree1 = 0, regFree2 = 0;
+ Expr *pL, *pR;
+ int r1, r2;
+ assert( i>=0 && i<nLeft );
+ if( i>0 ) sqlite3ExprCachePush(pParse);
+ r1 = exprVectorRegister(pParse, pLeft, i, regLeft, &pL, &regFree1);
+ r2 = exprVectorRegister(pParse, pRight, i, regRight, &pR, &regFree2);
+ codeCompare(pParse, pL, pR, opx, r1, r2, dest, p5);
+ testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
+ testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
+ testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
+ testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
+ testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq);
+ testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne);
+ sqlite3ReleaseTempReg(pParse, regFree1);
+ sqlite3ReleaseTempReg(pParse, regFree2);
+ if( i>0 ) sqlite3ExprCachePop(pParse);
+ if( i==nLeft-1 ){
+ break;
+ }
+ if( opx==TK_EQ ){
+ sqlite3VdbeAddOp2(v, OP_IfNot, dest, addrDone); VdbeCoverage(v);
+ p5 |= SQLITE_KEEPNULL;
+ }else if( opx==TK_NE ){
+ sqlite3VdbeAddOp2(v, OP_If, dest, addrDone); VdbeCoverage(v);
+ p5 |= SQLITE_KEEPNULL;
+ }else{
+ assert( op==TK_LT || op==TK_GT || op==TK_LE || op==TK_GE );
+ sqlite3VdbeAddOp2(v, OP_ElseNotEq, 0, addrDone);
+ VdbeCoverageIf(v, op==TK_LT);
+ VdbeCoverageIf(v, op==TK_GT);
+ VdbeCoverageIf(v, op==TK_LE);
+ VdbeCoverageIf(v, op==TK_GE);
+ if( i==nLeft-2 ) opx = op;
+ }
+ }
+ sqlite3VdbeResolveLabel(v, addrDone);
+}
+
#if SQLITE_MAX_EXPR_DEPTH>0
/*
** Check that argument nHeight is less than or equal to the maximum
@@ -89885,7 +90437,7 @@ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){
** is allocated to hold the integer text and the dequote flag is ignored.
*/
SQLITE_PRIVATE Expr *sqlite3ExprAlloc(
- sqlite3 *db, /* Handle for sqlite3DbMallocZero() (may be null) */
+ sqlite3 *db, /* Handle for sqlite3DbMallocRawNN() */
int op, /* Expression opcode */
const Token *pToken, /* Token argument. Might be NULL */
int dequote /* True to dequote */
@@ -90103,7 +90655,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *
** instance of the wildcard, the next sequential variable number is
** assigned.
*/
-SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){
+SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n){
sqlite3 *db = pParse->db;
const char *z;
@@ -90112,19 +90664,19 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){
z = pExpr->u.zToken;
assert( z!=0 );
assert( z[0]!=0 );
+ assert( n==sqlite3Strlen30(z) );
if( z[1]==0 ){
/* Wildcard of the form "?". Assign the next variable number */
assert( z[0]=='?' );
pExpr->iColumn = (ynVar)(++pParse->nVar);
}else{
- ynVar x = 0;
- u32 n = sqlite3Strlen30(z);
+ ynVar x;
if( z[0]=='?' ){
/* Wildcard of the form "?nnn". Convert "nnn" to an integer and
** use it as the variable number */
i64 i;
int bOk = 0==sqlite3Atoi64(&z[1], &i, n-1, SQLITE_UTF8);
- pExpr->iColumn = x = (ynVar)i;
+ x = (ynVar)i;
testcase( i==0 );
testcase( i==1 );
testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]-1 );
@@ -90132,7 +90684,7 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){
if( bOk==0 || i<1 || i>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){
sqlite3ErrorMsg(pParse, "variable number must be between ?1 and ?%d",
db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]);
- x = 0;
+ return;
}
if( i>pParse->nVar ){
pParse->nVar = (int)i;
@@ -90143,33 +90695,31 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){
** has never appeared before, reuse the same variable number
*/
ynVar i;
- for(i=0; i<pParse->nzVar; i++){
+ for(i=x=0; i<pParse->nzVar; i++){
if( pParse->azVar[i] && strcmp(pParse->azVar[i],z)==0 ){
- pExpr->iColumn = x = (ynVar)i+1;
+ x = (ynVar)i+1;
break;
}
}
- if( x==0 ) x = pExpr->iColumn = (ynVar)(++pParse->nVar);
+ if( x==0 ) x = (ynVar)(++pParse->nVar);
}
- if( x>0 ){
- if( x>pParse->nzVar ){
- char **a;
- a = sqlite3DbRealloc(db, pParse->azVar, x*sizeof(a[0]));
- if( a==0 ){
- assert( db->mallocFailed ); /* Error reported through mallocFailed */
- return;
- }
- pParse->azVar = a;
- memset(&a[pParse->nzVar], 0, (x-pParse->nzVar)*sizeof(a[0]));
- pParse->nzVar = x;
- }
- if( z[0]!='?' || pParse->azVar[x-1]==0 ){
- sqlite3DbFree(db, pParse->azVar[x-1]);
- pParse->azVar[x-1] = sqlite3DbStrNDup(db, z, n);
+ pExpr->iColumn = x;
+ if( x>pParse->nzVar ){
+ char **a;
+ a = sqlite3DbRealloc(db, pParse->azVar, x*sizeof(a[0]));
+ if( a==0 ){
+ assert( db->mallocFailed ); /* Error reported through mallocFailed */
+ return;
}
+ pParse->azVar = a;
+ memset(&a[pParse->nzVar], 0, (x-pParse->nzVar)*sizeof(a[0]));
+ pParse->nzVar = x;
+ }
+ if( pParse->azVar[x-1]==0 ){
+ pParse->azVar[x-1] = sqlite3DbStrNDup(db, z, n);
}
}
- if( !pParse->nErr && pParse->nVar>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){
+ if( pParse->nVar>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){
sqlite3ErrorMsg(pParse, "too many SQL variables");
}
}
@@ -90181,18 +90731,25 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){
assert( p!=0 );
/* Sanity check: Assert that the IntValue is non-negative if it exists */
assert( !ExprHasProperty(p, EP_IntValue) || p->u.iValue>=0 );
- if( !ExprHasProperty(p, EP_TokenOnly) ){
+#ifdef SQLITE_DEBUG
+ if( ExprHasProperty(p, EP_Leaf) && !ExprHasProperty(p, EP_TokenOnly) ){
+ assert( p->pLeft==0 );
+ assert( p->pRight==0 );
+ assert( p->x.pSelect==0 );
+ }
+#endif
+ if( !ExprHasProperty(p, (EP_TokenOnly|EP_Leaf)) ){
/* The Expr.x union is never used at the same time as Expr.pRight */
assert( p->x.pList==0 || p->pRight==0 );
- sqlite3ExprDelete(db, p->pLeft);
+ if( p->pLeft && p->op!=TK_SELECT_COLUMN ) sqlite3ExprDeleteNN(db, p->pLeft);
sqlite3ExprDelete(db, p->pRight);
- if( ExprHasProperty(p, EP_MemToken) ) sqlite3DbFree(db, p->u.zToken);
if( ExprHasProperty(p, EP_xIsSelect) ){
sqlite3SelectDelete(db, p->x.pSelect);
}else{
sqlite3ExprListDelete(db, p->x.pList);
}
}
+ if( ExprHasProperty(p, EP_MemToken) ) sqlite3DbFree(db, p->u.zToken);
if( !ExprHasProperty(p, EP_Static) ){
sqlite3DbFree(db, p);
}
@@ -90369,7 +90926,7 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){
memcpy(zToken, p->u.zToken, nToken);
}
- if( 0==((p->flags|pNew->flags) & EP_TokenOnly) ){
+ if( 0==((p->flags|pNew->flags) & (EP_TokenOnly|EP_Leaf)) ){
/* Fill in the pNew->x.pSelect or pNew->x.pList member. */
if( ExprHasProperty(p, EP_xIsSelect) ){
pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, dupFlags);
@@ -90381,7 +90938,7 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){
/* Fill in pNew->pLeft and pNew->pRight. */
if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly) ){
zAlloc += dupedExprNodeSize(p, dupFlags);
- if( ExprHasProperty(pNew, EP_Reduced) ){
+ if( !ExprHasProperty(pNew, EP_TokenOnly|EP_Leaf) ){
pNew->pLeft = p->pLeft ?
exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc) : 0;
pNew->pRight = p->pRight ?
@@ -90391,8 +90948,12 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){
*pzBuffer = zAlloc;
}
}else{
- if( !ExprHasProperty(p, EP_TokenOnly) ){
- pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0);
+ if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){
+ if( pNew->op==TK_SELECT_COLUMN ){
+ pNew->pLeft = p->pLeft;
+ }else{
+ pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0);
+ }
pNew->pRight = sqlite3ExprDup(db, p->pRight, 0);
}
}
@@ -90634,6 +91195,60 @@ no_mem:
}
/*
+** pColumns and pExpr form a vector assignment which is part of the SET
+** clause of an UPDATE statement. Like this:
+**
+** (a,b,c) = (expr1,expr2,expr3)
+** Or: (a,b,c) = (SELECT x,y,z FROM ....)
+**
+** For each term of the vector assignment, append new entries to the
+** expression list pList. In the case of a subquery on the LHS, append
+** TK_SELECT_COLUMN expressions.
+*/
+SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(
+ Parse *pParse, /* Parsing context */
+ ExprList *pList, /* List to which to append. Might be NULL */
+ IdList *pColumns, /* List of names of LHS of the assignment */
+ Expr *pExpr /* Vector expression to be appended. Might be NULL */
+){
+ sqlite3 *db = pParse->db;
+ int n;
+ int i;
+ int iFirst = pList ? pList->nExpr : 0;
+ /* pColumns can only be NULL due to an OOM but an OOM will cause an
+ ** exit prior to this routine being invoked */
+ if( NEVER(pColumns==0) ) goto vector_append_error;
+ if( pExpr==0 ) goto vector_append_error;
+ n = sqlite3ExprVectorSize(pExpr);
+ if( pColumns->nId!=n ){
+ sqlite3ErrorMsg(pParse, "%d columns assigned %d values",
+ pColumns->nId, n);
+ goto vector_append_error;
+ }
+ for(i=0; i<n; i++){
+ Expr *pSubExpr = sqlite3ExprForVectorField(pParse, pExpr, i);
+ pList = sqlite3ExprListAppend(pParse, pList, pSubExpr);
+ if( pList ){
+ assert( pList->nExpr==iFirst+i+1 );
+ pList->a[pList->nExpr-1].zName = pColumns->a[i].zName;
+ pColumns->a[i].zName = 0;
+ }
+ }
+ if( pExpr->op==TK_SELECT ){
+ if( pList && pList->a[iFirst].pExpr ){
+ assert( pList->a[iFirst].pExpr->op==TK_SELECT_COLUMN );
+ pList->a[iFirst].pExpr->pRight = pExpr;
+ pExpr = 0;
+ }
+ }
+
+vector_append_error:
+ sqlite3ExprDelete(db, pExpr);
+ sqlite3IdListDelete(db, pColumns);
+ return pList;
+}
+
+/*
** Set the sort order for the last element on the given ExprList.
*/
SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList *p, int iSortOrder){
@@ -91040,8 +91655,8 @@ static Select *isCandidateForInOpt(Expr *pX){
Select *p;
SrcList *pSrc;
ExprList *pEList;
- Expr *pRes;
Table *pTab;
+ int i;
if( !ExprHasProperty(pX, EP_xIsSelect) ) return 0; /* Not a subquery */
if( ExprHasProperty(pX, EP_VarSelect) ) return 0; /* Correlated subq */
p = pX->x.pSelect;
@@ -91064,23 +91679,18 @@ static Select *isCandidateForInOpt(Expr *pX){
assert( pTab->pSelect==0 ); /* FROM clause is not a view */
if( IsVirtual(pTab) ) return 0; /* FROM clause not a virtual table */
pEList = p->pEList;
- if( pEList->nExpr!=1 ) return 0; /* One column in the result set */
- pRes = pEList->a[0].pExpr;
- if( pRes->op!=TK_COLUMN ) return 0; /* Result is a column */
- assert( pRes->iTable==pSrc->a[0].iCursor ); /* Not a correlated subquery */
+ assert( pEList!=0 );
+ /* All SELECT results must be columns. */
+ for(i=0; i<pEList->nExpr; i++){
+ Expr *pRes = pEList->a[i].pExpr;
+ if( pRes->op!=TK_COLUMN ) return 0;
+ assert( pRes->iTable==pSrc->a[0].iCursor ); /* Not a correlated subquery */
+ }
return p;
}
#endif /* SQLITE_OMIT_SUBQUERY */
-/*
-** Code an OP_Once instruction and allocate space for its flag. Return the
-** address of the new instruction.
-*/
-SQLITE_PRIVATE int sqlite3CodeOnce(Parse *pParse){
- Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */
- return sqlite3VdbeAddOp1(v, OP_Once, pParse->nOnce++);
-}
-
+#ifndef SQLITE_OMIT_SUBQUERY
/*
** Generate code that checks the left-most column of index table iCur to see if
** it contains any NULL entries. Cause the register at regHasNull to be set
@@ -91096,6 +91706,7 @@ static void sqlite3SetHasNullFlag(Vdbe *v, int iCur, int regHasNull){
VdbeComment((v, "first_entry_in(%d)", iCur));
sqlite3VdbeJumpHere(v, addr1);
}
+#endif
#ifndef SQLITE_OMIT_SUBQUERY
@@ -91140,7 +91751,7 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** An existing b-tree might be used if the RHS expression pX is a simple
** subquery such as:
**
-** SELECT <column> FROM <table>
+** SELECT <column1>, <column2>... FROM <table>
**
** If the RHS of the IN operator is a list or a more complex subquery, then
** an ephemeral table might need to be generated from the RHS and then
@@ -91156,14 +91767,14 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
**
** When IN_INDEX_LOOP is used (and the b-tree will be used to iterate
** through the set members) then the b-tree must not contain duplicates.
-** An epheremal table must be used unless the selected <column> is guaranteed
-** to be unique - either because it is an INTEGER PRIMARY KEY or it
-** has a UNIQUE constraint or UNIQUE index.
+** An epheremal table must be used unless the selected columns are guaranteed
+** to be unique - either because it is an INTEGER PRIMARY KEY or due to
+** a UNIQUE constraint or index.
**
** When IN_INDEX_MEMBERSHIP is used (and the b-tree will be used
** for fast set membership tests) then an epheremal table must
-** be used unless <column> is an INTEGER PRIMARY KEY or an index can
-** be found with <column> as its left-most column.
+** be used unless <columns> is a single INTEGER PRIMARY KEY column or an
+** index can be found with the specified <columns> as its left-most.
**
** If the IN_INDEX_NOOP_OK and IN_INDEX_MEMBERSHIP are both set and
** if the RHS of the IN operator is a list (not a subquery) then this
@@ -91184,9 +91795,26 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** the value in that register will be NULL if the b-tree contains one or more
** NULL values, and it will be some non-NULL value if the b-tree contains no
** NULL values.
+**
+** If the aiMap parameter is not NULL, it must point to an array containing
+** one element for each column returned by the SELECT statement on the RHS
+** of the IN(...) operator. The i'th entry of the array is populated with the
+** offset of the index column that matches the i'th column returned by the
+** SELECT. For example, if the expression and selected index are:
+**
+** (?,?,?) IN (SELECT a, b, c FROM t1)
+** CREATE INDEX i1 ON t1(b, c, a);
+**
+** then aiMap[] is populated with {2, 0, 1}.
*/
#ifndef SQLITE_OMIT_SUBQUERY
-SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int *prRhsHasNull){
+SQLITE_PRIVATE int sqlite3FindInIndex(
+ Parse *pParse, /* Parsing context */
+ Expr *pX, /* The right-hand side (RHS) of the IN operator */
+ u32 inFlags, /* IN_INDEX_LOOP, _MEMBERSHIP, and/or _NOOP_OK */
+ int *prRhsHasNull, /* Register holding NULL status. See notes */
+ int *aiMap /* Mapping from Index fields to RHS fields */
+){
Select *p; /* SELECT to the right of IN operator */
int eType = 0; /* Type of RHS table. IN_INDEX_* */
int iTab = pParse->nTab++; /* Cursor of the RHS table */
@@ -91196,36 +91824,46 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
assert( pX->op==TK_IN );
mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0;
+ /* If the RHS of this IN(...) operator is a SELECT, and if it matters
+ ** whether or not the SELECT result contains NULL values, check whether
+ ** or not NULL is actually possible (it may not be, for example, due
+ ** to NOT NULL constraints in the schema). If no NULL values are possible,
+ ** set prRhsHasNull to 0 before continuing. */
+ if( prRhsHasNull && (pX->flags & EP_xIsSelect) ){
+ int i;
+ ExprList *pEList = pX->x.pSelect->pEList;
+ for(i=0; i<pEList->nExpr; i++){
+ if( sqlite3ExprCanBeNull(pEList->a[i].pExpr) ) break;
+ }
+ if( i==pEList->nExpr ){
+ prRhsHasNull = 0;
+ }
+ }
+
/* Check to see if an existing table or index can be used to
** satisfy the query. This is preferable to generating a new
- ** ephemeral table.
- */
+ ** ephemeral table. */
if( pParse->nErr==0 && (p = isCandidateForInOpt(pX))!=0 ){
sqlite3 *db = pParse->db; /* Database connection */
Table *pTab; /* Table <table>. */
- Expr *pExpr; /* Expression <column> */
- i16 iCol; /* Index of column <column> */
i16 iDb; /* Database idx for pTab */
+ ExprList *pEList = p->pEList;
+ int nExpr = pEList->nExpr;
assert( p->pEList!=0 ); /* Because of isCandidateForInOpt(p) */
assert( p->pEList->a[0].pExpr!=0 ); /* Because of isCandidateForInOpt(p) */
assert( p->pSrc!=0 ); /* Because of isCandidateForInOpt(p) */
pTab = p->pSrc->a[0].pTab;
- pExpr = p->pEList->a[0].pExpr;
- iCol = (i16)pExpr->iColumn;
-
+
/* Code an OP_Transaction and OP_TableLock for <table>. */
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
sqlite3CodeVerifySchema(pParse, iDb);
sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName);
- /* This function is only called from two places. In both cases the vdbe
- ** has already been allocated. So assume sqlite3GetVdbe() is always
- ** successful here.
- */
- assert(v);
- if( iCol<0 ){
- int iAddr = sqlite3CodeOnce(pParse);
+ assert(v); /* sqlite3GetVdbe() has always been previously called */
+ if( nExpr==1 && pEList->a[0].pExpr->iColumn<0 ){
+ /* The "x IN (SELECT rowid FROM table)" case */
+ int iAddr = sqlite3VdbeAddOp0(v, OP_Once);
VdbeCoverage(v);
sqlite3OpenTable(pParse, iTab, iDb, pTab, OP_OpenRead);
@@ -91234,44 +91872,109 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
sqlite3VdbeJumpHere(v, iAddr);
}else{
Index *pIdx; /* Iterator variable */
+ int affinity_ok = 1;
+ int i;
- /* The collation sequence used by the comparison. If an index is to
- ** be used in place of a temp-table, it must be ordered according
- ** to this collation sequence. */
- CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pX->pLeft, pExpr);
-
- /* Check that the affinity that will be used to perform the
- ** comparison is the same as the affinity of the column. If
- ** it is not, it is not possible to use any index.
- */
- int affinity_ok = sqlite3IndexAffinityOk(pX, pTab->aCol[iCol].affinity);
-
- for(pIdx=pTab->pIndex; pIdx && eType==0 && affinity_ok; pIdx=pIdx->pNext){
- if( (pIdx->aiColumn[0]==iCol)
- && sqlite3FindCollSeq(db, ENC(db), pIdx->azColl[0], 0)==pReq
- && (!mustBeUnique || (pIdx->nKeyCol==1 && IsUniqueIndex(pIdx)))
- ){
- int iAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_OpenRead, iTab, pIdx->tnum, iDb);
- sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
- VdbeComment((v, "%s", pIdx->zName));
- assert( IN_INDEX_INDEX_DESC == IN_INDEX_INDEX_ASC+1 );
- eType = IN_INDEX_INDEX_ASC + pIdx->aSortOrder[0];
-
- if( prRhsHasNull && !pTab->aCol[iCol].notNull ){
+ /* Check that the affinity that will be used to perform each
+ ** comparison is the same as the affinity of each column in table
+ ** on the RHS of the IN operator. If it not, it is not possible to
+ ** use any index of the RHS table. */
+ for(i=0; i<nExpr && affinity_ok; i++){
+ Expr *pLhs = sqlite3VectorFieldSubexpr(pX->pLeft, i);
+ int iCol = pEList->a[i].pExpr->iColumn;
+ char idxaff = sqlite3TableColumnAffinity(pTab,iCol); /* RHS table */
+ char cmpaff = sqlite3CompareAffinity(pLhs, idxaff);
+ testcase( cmpaff==SQLITE_AFF_BLOB );
+ testcase( cmpaff==SQLITE_AFF_TEXT );
+ switch( cmpaff ){
+ case SQLITE_AFF_BLOB:
+ break;
+ case SQLITE_AFF_TEXT:
+ /* sqlite3CompareAffinity() only returns TEXT if one side or the
+ ** other has no affinity and the other side is TEXT. Hence,
+ ** the only way for cmpaff to be TEXT is for idxaff to be TEXT
+ ** and for the term on the LHS of the IN to have no affinity. */
+ assert( idxaff==SQLITE_AFF_TEXT );
+ break;
+ default:
+ affinity_ok = sqlite3IsNumericAffinity(idxaff);
+ }
+ }
+
+ if( affinity_ok ){
+ /* Search for an existing index that will work for this IN operator */
+ for(pIdx=pTab->pIndex; pIdx && eType==0; pIdx=pIdx->pNext){
+ Bitmask colUsed; /* Columns of the index used */
+ Bitmask mCol; /* Mask for the current column */
+ if( pIdx->nColumn<nExpr ) continue;
+ /* Maximum nColumn is BMS-2, not BMS-1, so that we can compute
+ ** BITMASK(nExpr) without overflowing */
+ testcase( pIdx->nColumn==BMS-2 );
+ testcase( pIdx->nColumn==BMS-1 );
+ if( pIdx->nColumn>=BMS-1 ) continue;
+ if( mustBeUnique ){
+ if( pIdx->nKeyCol>nExpr
+ ||(pIdx->nColumn>nExpr && !IsUniqueIndex(pIdx))
+ ){
+ continue; /* This index is not unique over the IN RHS columns */
+ }
+ }
+
+ colUsed = 0; /* Columns of index used so far */
+ for(i=0; i<nExpr; i++){
+ Expr *pLhs = sqlite3VectorFieldSubexpr(pX->pLeft, i);
+ Expr *pRhs = pEList->a[i].pExpr;
+ CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pLhs, pRhs);
+ int j;
+
+ assert( pReq!=0 || pRhs->iColumn==XN_ROWID || pParse->nErr );
+ for(j=0; j<nExpr; j++){
+ if( pIdx->aiColumn[j]!=pRhs->iColumn ) continue;
+ assert( pIdx->azColl[j] );
+ if( pReq!=0 && sqlite3StrICmp(pReq->zName, pIdx->azColl[j])!=0 ){
+ continue;
+ }
+ break;
+ }
+ if( j==nExpr ) break;
+ mCol = MASKBIT(j);
+ if( mCol & colUsed ) break; /* Each column used only once */
+ colUsed |= mCol;
+ if( aiMap ) aiMap[i] = j;
+ }
+
+ assert( i==nExpr || colUsed!=(MASKBIT(nExpr)-1) );
+ if( colUsed==(MASKBIT(nExpr)-1) ){
+ /* If we reach this point, that means the index pIdx is usable */
+ int iAddr = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+#ifndef SQLITE_OMIT_EXPLAIN
+ sqlite3VdbeAddOp4(v, OP_Explain, 0, 0, 0,
+ sqlite3MPrintf(db, "USING INDEX %s FOR IN-OPERATOR",pIdx->zName),
+ P4_DYNAMIC);
+#endif
+ sqlite3VdbeAddOp3(v, OP_OpenRead, iTab, pIdx->tnum, iDb);
+ sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
+ VdbeComment((v, "%s", pIdx->zName));
+ assert( IN_INDEX_INDEX_DESC == IN_INDEX_INDEX_ASC+1 );
+ eType = IN_INDEX_INDEX_ASC + pIdx->aSortOrder[0];
+
+ if( prRhsHasNull ){
#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
- const i64 sOne = 1;
- sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed,
- iTab, 0, 0, (u8*)&sOne, P4_INT64);
+ i64 mask = (1<<nExpr)-1;
+ sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed,
+ iTab, 0, 0, (u8*)&mask, P4_INT64);
#endif
- *prRhsHasNull = ++pParse->nMem;
- sqlite3SetHasNullFlag(v, iTab, *prRhsHasNull);
+ *prRhsHasNull = ++pParse->nMem;
+ if( nExpr==1 ){
+ sqlite3SetHasNullFlag(v, iTab, *prRhsHasNull);
+ }
+ }
+ sqlite3VdbeJumpHere(v, iAddr);
}
- sqlite3VdbeJumpHere(v, iAddr);
- }
- }
- }
- }
+ } /* End loop over indexes */
+ } /* End if( affinity_ok ) */
+ } /* End if not an rowid index */
+ } /* End attempt to optimize using an index */
/* If no preexisting index is available for the IN clause
** and IN_INDEX_NOOP is an allowed reply
@@ -91287,7 +91990,6 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
){
eType = IN_INDEX_NOOP;
}
-
if( eType==0 ){
/* Could not find an existing table or index to use as the RHS b-tree.
@@ -91309,10 +92011,63 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
}else{
pX->iTable = iTab;
}
+
+ if( aiMap && eType!=IN_INDEX_INDEX_ASC && eType!=IN_INDEX_INDEX_DESC ){
+ int i, n;
+ n = sqlite3ExprVectorSize(pX->pLeft);
+ for(i=0; i<n; i++) aiMap[i] = i;
+ }
return eType;
}
#endif
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Argument pExpr is an (?, ?...) IN(...) expression. This
+** function allocates and returns a nul-terminated string containing
+** the affinities to be used for each column of the comparison.
+**
+** It is the responsibility of the caller to ensure that the returned
+** string is eventually freed using sqlite3DbFree().
+*/
+static char *exprINAffinity(Parse *pParse, Expr *pExpr){
+ Expr *pLeft = pExpr->pLeft;
+ int nVal = sqlite3ExprVectorSize(pLeft);
+ Select *pSelect = (pExpr->flags & EP_xIsSelect) ? pExpr->x.pSelect : 0;
+ char *zRet;
+
+ assert( pExpr->op==TK_IN );
+ zRet = sqlite3DbMallocZero(pParse->db, nVal+1);
+ if( zRet ){
+ int i;
+ for(i=0; i<nVal; i++){
+ Expr *pA = sqlite3VectorFieldSubexpr(pLeft, i);
+ char a = sqlite3ExprAffinity(pA);
+ if( pSelect ){
+ zRet[i] = sqlite3CompareAffinity(pSelect->pEList->a[i].pExpr, a);
+ }else{
+ zRet[i] = a;
+ }
+ }
+ zRet[nVal] = '\0';
+ }
+ return zRet;
+}
+#endif
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
+** Load the Parse object passed as the first argument with an error
+** message of the form:
+**
+** "sub-select returns N columns - expected M"
+*/
+SQLITE_PRIVATE void sqlite3SubselectError(Parse *pParse, int nActual, int nExpect){
+ const char *zFmt = "sub-select returns %d columns - expected %d";
+ sqlite3ErrorMsg(pParse, zFmt, nActual, nExpect);
+}
+#endif
+
/*
** Generate code for scalar subqueries used as a subquery expression, EXISTS,
** or IN operators. Examples:
@@ -91338,7 +92093,9 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int
** value to non-NULL if the RHS is NULL-free.
**
** For a SELECT or EXISTS operator, return the register that holds the
-** result. For IN operators or if an error occurs, the return value is 0.
+** result. For a multi-column SELECT, the result is stored in a contiguous
+** array of registers and the return value is the register of the left-most
+** result column. Return 0 for IN operators or if an error occurs.
*/
#ifndef SQLITE_OMIT_SUBQUERY
SQLITE_PRIVATE int sqlite3CodeSubselect(
@@ -91353,8 +92110,8 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
if( NEVER(v==0) ) return 0;
sqlite3ExprCachePush(pParse);
- /* This code must be run in its entirety every time it is encountered
- ** if any of the following is true:
+ /* The evaluation of the IN/EXISTS/SELECT must be repeated every time it
+ ** is encountered if any of the following is true:
**
** * The right-hand side is a correlated subquery
** * The right-hand side is an expression list containing variables
@@ -91364,7 +92121,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
** save the results, and reuse the same result on subsequent invocations.
*/
if( !ExprHasProperty(pExpr, EP_VarSelect) ){
- jmpIfDynamic = sqlite3CodeOnce(pParse); VdbeCoverage(v);
+ jmpIfDynamic = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
}
#ifndef SQLITE_OMIT_EXPLAIN
@@ -91380,17 +92137,18 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
switch( pExpr->op ){
case TK_IN: {
- char affinity; /* Affinity of the LHS of the IN */
int addr; /* Address of OP_OpenEphemeral instruction */
Expr *pLeft = pExpr->pLeft; /* the LHS of the IN operator */
KeyInfo *pKeyInfo = 0; /* Key information */
-
- affinity = sqlite3ExprAffinity(pLeft);
+ int nVal; /* Size of vector pLeft */
+
+ nVal = sqlite3ExprVectorSize(pLeft);
+ assert( !isRowid || nVal==1 );
/* Whether this is an 'x IN(SELECT...)' or an 'x IN(<exprlist>)'
** expression it is handled the same way. An ephemeral table is
- ** filled with single-field index keys representing the results
- ** from the SELECT or the <exprlist>.
+ ** filled with index keys representing the results from the
+ ** SELECT or the <exprlist>.
**
** If the 'x' expression is a column value, or the SELECT...
** statement returns a column value, then the affinity of that
@@ -91401,8 +92159,9 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
** is used.
*/
pExpr->iTable = pParse->nTab++;
- addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pExpr->iTable, !isRowid);
- pKeyInfo = isRowid ? 0 : sqlite3KeyInfoAlloc(pParse->db, 1, 1);
+ addr = sqlite3VdbeAddOp2(v, OP_OpenEphemeral,
+ pExpr->iTable, (isRowid?0:nVal));
+ pKeyInfo = isRowid ? 0 : sqlite3KeyInfoAlloc(pParse->db, nVal, 1);
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
/* Case 1: expr IN (SELECT ...)
@@ -91411,27 +92170,37 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
** table allocated and opened above.
*/
Select *pSelect = pExpr->x.pSelect;
- SelectDest dest;
- ExprList *pEList;
+ ExprList *pEList = pSelect->pEList;
assert( !isRowid );
- sqlite3SelectDestInit(&dest, SRT_Set, pExpr->iTable);
- dest.affSdst = (u8)affinity;
- assert( (pExpr->iTable&0x0000FFFF)==pExpr->iTable );
- pSelect->iLimit = 0;
- testcase( pSelect->selFlags & SF_Distinct );
- testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */
- if( sqlite3Select(pParse, pSelect, &dest) ){
- sqlite3KeyInfoUnref(pKeyInfo);
- return 0;
+ /* If the LHS and RHS of the IN operator do not match, that
+ ** error will have been caught long before we reach this point. */
+ if( ALWAYS(pEList->nExpr==nVal) ){
+ SelectDest dest;
+ int i;
+ sqlite3SelectDestInit(&dest, SRT_Set, pExpr->iTable);
+ dest.zAffSdst = exprINAffinity(pParse, pExpr);
+ assert( (pExpr->iTable&0x0000FFFF)==pExpr->iTable );
+ pSelect->iLimit = 0;
+ testcase( pSelect->selFlags & SF_Distinct );
+ testcase( pKeyInfo==0 ); /* Caused by OOM in sqlite3KeyInfoAlloc() */
+ if( sqlite3Select(pParse, pSelect, &dest) ){
+ sqlite3DbFree(pParse->db, dest.zAffSdst);
+ sqlite3KeyInfoUnref(pKeyInfo);
+ return 0;
+ }
+ sqlite3DbFree(pParse->db, dest.zAffSdst);
+ assert( pKeyInfo!=0 ); /* OOM will cause exit after sqlite3Select() */
+ assert( pEList!=0 );
+ assert( pEList->nExpr>0 );
+ assert( sqlite3KeyInfoIsWriteable(pKeyInfo) );
+ for(i=0; i<nVal; i++){
+ Expr *p = sqlite3VectorFieldSubexpr(pLeft, i);
+ pKeyInfo->aColl[i] = sqlite3BinaryCompareCollSeq(
+ pParse, p, pEList->a[i].pExpr
+ );
+ }
}
- pEList = pSelect->pEList;
- assert( pKeyInfo!=0 ); /* OOM will cause exit after sqlite3Select() */
- assert( pEList!=0 );
- assert( pEList->nExpr>0 );
- assert( sqlite3KeyInfoIsWriteable(pKeyInfo) );
- pKeyInfo->aColl[0] = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft,
- pEList->a[0].pExpr);
}else if( ALWAYS(pExpr->x.pList!=0) ){
/* Case 2: expr IN (exprlist)
**
@@ -91440,11 +92209,13 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
** that columns affinity when building index keys. If <expr> is not
** a column, use numeric affinity.
*/
+ char affinity; /* Affinity of the LHS of the IN */
int i;
ExprList *pList = pExpr->x.pList;
struct ExprList_item *pItem;
int r1, r2, r3;
+ affinity = sqlite3ExprAffinity(pLeft);
if( !affinity ){
affinity = SQLITE_AFF_BLOB;
}
@@ -91500,26 +92271,37 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
case TK_EXISTS:
case TK_SELECT:
default: {
- /* If this has to be a scalar SELECT. Generate code to put the
- ** value of this select in a memory cell and record the number
- ** of the memory cell in iColumn. If this is an EXISTS, write
- ** an integer 0 (not exists) or 1 (exists) into a memory cell
- ** and record that memory cell in iColumn.
+ /* Case 3: (SELECT ... FROM ...)
+ ** or: EXISTS(SELECT ... FROM ...)
+ **
+ ** For a SELECT, generate code to put the values for all columns of
+ ** the first row into an array of registers and return the index of
+ ** the first register.
+ **
+ ** If this is an EXISTS, write an integer 0 (not exists) or 1 (exists)
+ ** into a register and return that register number.
+ **
+ ** In both cases, the query is augmented with "LIMIT 1". Any
+ ** preexisting limit is discarded in place of the new LIMIT 1.
*/
Select *pSel; /* SELECT statement to encode */
- SelectDest dest; /* How to deal with SELECt result */
+ SelectDest dest; /* How to deal with SELECT result */
+ int nReg; /* Registers to allocate */
testcase( pExpr->op==TK_EXISTS );
testcase( pExpr->op==TK_SELECT );
assert( pExpr->op==TK_EXISTS || pExpr->op==TK_SELECT );
-
assert( ExprHasProperty(pExpr, EP_xIsSelect) );
+
pSel = pExpr->x.pSelect;
- sqlite3SelectDestInit(&dest, 0, ++pParse->nMem);
+ nReg = pExpr->op==TK_SELECT ? pSel->pEList->nExpr : 1;
+ sqlite3SelectDestInit(&dest, 0, pParse->nMem+1);
+ pParse->nMem += nReg;
if( pExpr->op==TK_SELECT ){
dest.eDest = SRT_Mem;
dest.iSdst = dest.iSDParm;
- sqlite3VdbeAddOp2(v, OP_Null, 0, dest.iSDParm);
+ dest.nSdst = nReg;
+ sqlite3VdbeAddOp3(v, OP_Null, 0, dest.iSDParm, dest.iSDParm+nReg-1);
VdbeComment((v, "Init subquery result"));
}else{
dest.eDest = SRT_Exists;
@@ -91527,8 +92309,8 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
VdbeComment((v, "Init EXISTS result"));
}
sqlite3ExprDelete(pParse->db, pSel->pLimit);
- pSel->pLimit = sqlite3PExpr(pParse, TK_INTEGER, 0, 0,
- &sqlite3IntTokens[1]);
+ pSel->pLimit = sqlite3ExprAlloc(pParse->db, TK_INTEGER,
+ &sqlite3IntTokens[1], 0);
pSel->iLimit = 0;
pSel->selFlags &= ~SF_MultiValue;
if( sqlite3Select(pParse, pSel, &dest) ){
@@ -91555,21 +92337,55 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
#ifndef SQLITE_OMIT_SUBQUERY
/*
+** Expr pIn is an IN(...) expression. This function checks that the
+** sub-select on the RHS of the IN() operator has the same number of
+** columns as the vector on the LHS. Or, if the RHS of the IN() is not
+** a sub-query, that the LHS is a vector of size 1.
+*/
+SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse *pParse, Expr *pIn){
+ int nVector = sqlite3ExprVectorSize(pIn->pLeft);
+ if( (pIn->flags & EP_xIsSelect) ){
+ if( nVector!=pIn->x.pSelect->pEList->nExpr ){
+ sqlite3SubselectError(pParse, pIn->x.pSelect->pEList->nExpr, nVector);
+ return 1;
+ }
+ }else if( nVector!=1 ){
+ if( (pIn->pLeft->flags & EP_xIsSelect) ){
+ sqlite3SubselectError(pParse, nVector, 1);
+ }else{
+ sqlite3ErrorMsg(pParse, "row value misused");
+ }
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+#ifndef SQLITE_OMIT_SUBQUERY
+/*
** Generate code for an IN expression.
**
** x IN (SELECT ...)
** x IN (value, value, ...)
**
-** The left-hand side (LHS) is a scalar expression. The right-hand side (RHS)
-** is an array of zero or more values. The expression is true if the LHS is
-** contained within the RHS. The value of the expression is unknown (NULL)
-** if the LHS is NULL or if the LHS is not contained within the RHS and the
-** RHS contains one or more NULL values.
+** The left-hand side (LHS) is a scalar or vector expression. The
+** right-hand side (RHS) is an array of zero or more scalar values, or a
+** subquery. If the RHS is a subquery, the number of result columns must
+** match the number of columns in the vector on the LHS. If the RHS is
+** a list of values, the LHS must be a scalar.
+**
+** The IN operator is true if the LHS value is contained within the RHS.
+** The result is false if the LHS is definitely not in the RHS. The
+** result is NULL if the presence of the LHS in the RHS cannot be
+** determined due to NULLs.
**
** This routine generates code that jumps to destIfFalse if the LHS is not
** contained within the RHS. If due to NULLs we cannot determine if the LHS
** is contained in the RHS then jump to destIfNull. If the LHS is contained
** within the RHS then fall through.
+**
+** See the separate in-operator.md documentation file in the canonical
+** SQLite source tree for additional information.
*/
static void sqlite3ExprCodeIN(
Parse *pParse, /* Parsing and code generating context */
@@ -91578,36 +92394,83 @@ static void sqlite3ExprCodeIN(
int destIfNull /* Jump here if the results are unknown due to NULLs */
){
int rRhsHasNull = 0; /* Register that is true if RHS contains NULL values */
- char affinity; /* Comparison affinity to use */
int eType; /* Type of the RHS */
- int r1; /* Temporary use register */
+ int rLhs; /* Register(s) holding the LHS values */
+ int rLhsOrig; /* LHS values prior to reordering by aiMap[] */
Vdbe *v; /* Statement under construction */
+ int *aiMap = 0; /* Map from vector field to index column */
+ char *zAff = 0; /* Affinity string for comparisons */
+ int nVector; /* Size of vectors for this IN operator */
+ int iDummy; /* Dummy parameter to exprCodeVector() */
+ Expr *pLeft; /* The LHS of the IN operator */
+ int i; /* loop counter */
+ int destStep2; /* Where to jump when NULLs seen in step 2 */
+ int destStep6 = 0; /* Start of code for Step 6 */
+ int addrTruthOp; /* Address of opcode that determines the IN is true */
+ int destNotNull; /* Jump here if a comparison is not true in step 6 */
+ int addrTop; /* Top of the step-6 loop */
+
+ pLeft = pExpr->pLeft;
+ if( sqlite3ExprCheckIN(pParse, pExpr) ) return;
+ zAff = exprINAffinity(pParse, pExpr);
+ nVector = sqlite3ExprVectorSize(pExpr->pLeft);
+ aiMap = (int*)sqlite3DbMallocZero(
+ pParse->db, nVector*(sizeof(int) + sizeof(char)) + 1
+ );
+ if( pParse->db->mallocFailed ) goto sqlite3ExprCodeIN_oom_error;
- /* Compute the RHS. After this step, the table with cursor
- ** pExpr->iTable will contains the values that make up the RHS.
- */
+ /* Attempt to compute the RHS. After this step, if anything other than
+ ** IN_INDEX_NOOP is returned, the table opened ith cursor pExpr->iTable
+ ** contains the values that make up the RHS. If IN_INDEX_NOOP is returned,
+ ** the RHS has not yet been coded. */
v = pParse->pVdbe;
assert( v!=0 ); /* OOM detected prior to this routine */
VdbeNoopComment((v, "begin IN expr"));
eType = sqlite3FindInIndex(pParse, pExpr,
IN_INDEX_MEMBERSHIP | IN_INDEX_NOOP_OK,
- destIfFalse==destIfNull ? 0 : &rRhsHasNull);
+ destIfFalse==destIfNull ? 0 : &rRhsHasNull, aiMap);
- /* Figure out the affinity to use to create a key from the results
- ** of the expression. affinityStr stores a static string suitable for
- ** P4 of OP_MakeRecord.
- */
- affinity = comparisonAffinity(pExpr);
+ assert( pParse->nErr || nVector==1 || eType==IN_INDEX_EPH
+ || eType==IN_INDEX_INDEX_ASC || eType==IN_INDEX_INDEX_DESC
+ );
+#ifdef SQLITE_DEBUG
+ /* Confirm that aiMap[] contains nVector integer values between 0 and
+ ** nVector-1. */
+ for(i=0; i<nVector; i++){
+ int j, cnt;
+ for(cnt=j=0; j<nVector; j++) if( aiMap[j]==i ) cnt++;
+ assert( cnt==1 );
+ }
+#endif
- /* Code the LHS, the <expr> from "<expr> IN (...)".
+ /* Code the LHS, the <expr> from "<expr> IN (...)". If the LHS is a
+ ** vector, then it is stored in an array of nVector registers starting
+ ** at r1.
+ **
+ ** sqlite3FindInIndex() might have reordered the fields of the LHS vector
+ ** so that the fields are in the same order as an existing index. The
+ ** aiMap[] array contains a mapping from the original LHS field order to
+ ** the field order that matches the RHS index.
*/
sqlite3ExprCachePush(pParse);
- r1 = sqlite3GetTempReg(pParse);
- sqlite3ExprCode(pParse, pExpr->pLeft, r1);
+ rLhsOrig = exprCodeVector(pParse, pLeft, &iDummy);
+ for(i=0; i<nVector && aiMap[i]==i; i++){} /* Are LHS fields reordered? */
+ if( i==nVector ){
+ /* LHS fields are not reordered */
+ rLhs = rLhsOrig;
+ }else{
+ /* Need to reorder the LHS fields according to aiMap */
+ rLhs = sqlite3GetTempRange(pParse, nVector);
+ for(i=0; i<nVector; i++){
+ sqlite3VdbeAddOp3(v, OP_Copy, rLhsOrig+i, rLhs+aiMap[i], 0);
+ }
+ }
/* If sqlite3FindInIndex() did not find or create an index that is
** suitable for evaluating the IN operator, then evaluate using a
** sequence of comparisons.
+ **
+ ** This is step (1) in the in-operator.md optimized algorithm.
*/
if( eType==IN_INDEX_NOOP ){
ExprList *pList = pExpr->x.pList;
@@ -91619,7 +92482,7 @@ static void sqlite3ExprCodeIN(
assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
if( destIfNull!=destIfFalse ){
regCkNull = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp3(v, OP_BitAnd, r1, r1, regCkNull);
+ sqlite3VdbeAddOp3(v, OP_BitAnd, rLhs, rLhs, regCkNull);
}
for(ii=0; ii<pList->nExpr; ii++){
r2 = sqlite3ExprCodeTemp(pParse, pList->a[ii].pExpr, &regToFree);
@@ -91627,16 +92490,16 @@ static void sqlite3ExprCodeIN(
sqlite3VdbeAddOp3(v, OP_BitAnd, regCkNull, r2, regCkNull);
}
if( ii<pList->nExpr-1 || destIfNull!=destIfFalse ){
- sqlite3VdbeAddOp4(v, OP_Eq, r1, labelOk, r2,
+ sqlite3VdbeAddOp4(v, OP_Eq, rLhs, labelOk, r2,
(void*)pColl, P4_COLLSEQ);
VdbeCoverageIf(v, ii<pList->nExpr-1);
VdbeCoverageIf(v, ii==pList->nExpr-1);
- sqlite3VdbeChangeP5(v, affinity);
+ sqlite3VdbeChangeP5(v, zAff[0]);
}else{
assert( destIfNull==destIfFalse );
- sqlite3VdbeAddOp4(v, OP_Ne, r1, destIfFalse, r2,
+ sqlite3VdbeAddOp4(v, OP_Ne, rLhs, destIfFalse, r2,
(void*)pColl, P4_COLLSEQ); VdbeCoverage(v);
- sqlite3VdbeChangeP5(v, affinity | SQLITE_JUMPIFNULL);
+ sqlite3VdbeChangeP5(v, zAff[0] | SQLITE_JUMPIFNULL);
}
sqlite3ReleaseTempReg(pParse, regToFree);
}
@@ -91646,77 +92509,113 @@ static void sqlite3ExprCodeIN(
}
sqlite3VdbeResolveLabel(v, labelOk);
sqlite3ReleaseTempReg(pParse, regCkNull);
+ goto sqlite3ExprCodeIN_finished;
+ }
+
+ /* Step 2: Check to see if the LHS contains any NULL columns. If the
+ ** LHS does contain NULLs then the result must be either FALSE or NULL.
+ ** We will then skip the binary search of the RHS.
+ */
+ if( destIfNull==destIfFalse ){
+ destStep2 = destIfFalse;
}else{
-
- /* If the LHS is NULL, then the result is either false or NULL depending
- ** on whether the RHS is empty or not, respectively.
- */
- if( sqlite3ExprCanBeNull(pExpr->pLeft) ){
- if( destIfNull==destIfFalse ){
- /* Shortcut for the common case where the false and NULL outcomes are
- ** the same. */
- sqlite3VdbeAddOp2(v, OP_IsNull, r1, destIfNull); VdbeCoverage(v);
- }else{
- int addr1 = sqlite3VdbeAddOp1(v, OP_NotNull, r1); VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_Rewind, pExpr->iTable, destIfFalse);
- VdbeCoverage(v);
- sqlite3VdbeGoto(v, destIfNull);
- sqlite3VdbeJumpHere(v, addr1);
- }
- }
-
- if( eType==IN_INDEX_ROWID ){
- /* In this case, the RHS is the ROWID of table b-tree
- */
- sqlite3VdbeAddOp3(v, OP_SeekRowid, pExpr->iTable, destIfFalse, r1);
+ destStep2 = destStep6 = sqlite3VdbeMakeLabel(v);
+ }
+ for(i=0; i<nVector; i++){
+ Expr *p = sqlite3VectorFieldSubexpr(pExpr->pLeft, i);
+ if( sqlite3ExprCanBeNull(p) ){
+ sqlite3VdbeAddOp2(v, OP_IsNull, rLhs+i, destStep2);
VdbeCoverage(v);
- }else{
- /* In this case, the RHS is an index b-tree.
- */
- sqlite3VdbeAddOp4(v, OP_Affinity, r1, 1, 0, &affinity, 1);
-
- /* If the set membership test fails, then the result of the
- ** "x IN (...)" expression must be either 0 or NULL. If the set
- ** contains no NULL values, then the result is 0. If the set
- ** contains one or more NULL values, then the result of the
- ** expression is also NULL.
- */
- assert( destIfFalse!=destIfNull || rRhsHasNull==0 );
- if( rRhsHasNull==0 ){
- /* This branch runs if it is known at compile time that the RHS
- ** cannot contain NULL values. This happens as the result
- ** of a "NOT NULL" constraint in the database schema.
- **
- ** Also run this branch if NULL is equivalent to FALSE
- ** for this particular IN operator.
- */
- sqlite3VdbeAddOp4Int(v, OP_NotFound, pExpr->iTable, destIfFalse, r1, 1);
- VdbeCoverage(v);
- }else{
- /* In this branch, the RHS of the IN might contain a NULL and
- ** the presence of a NULL on the RHS makes a difference in the
- ** outcome.
- */
- int addr1;
-
- /* First check to see if the LHS is contained in the RHS. If so,
- ** then the answer is TRUE the presence of NULLs in the RHS does
- ** not matter. If the LHS is not contained in the RHS, then the
- ** answer is NULL if the RHS contains NULLs and the answer is
- ** FALSE if the RHS is NULL-free.
- */
- addr1 = sqlite3VdbeAddOp4Int(v, OP_Found, pExpr->iTable, 0, r1, 1);
- VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_IsNull, rRhsHasNull, destIfNull);
- VdbeCoverage(v);
- sqlite3VdbeGoto(v, destIfFalse);
- sqlite3VdbeJumpHere(v, addr1);
- }
}
}
- sqlite3ReleaseTempReg(pParse, r1);
+
+ /* Step 3. The LHS is now known to be non-NULL. Do the binary search
+ ** of the RHS using the LHS as a probe. If found, the result is
+ ** true.
+ */
+ if( eType==IN_INDEX_ROWID ){
+ /* In this case, the RHS is the ROWID of table b-tree and so we also
+ ** know that the RHS is non-NULL. Hence, we combine steps 3 and 4
+ ** into a single opcode. */
+ sqlite3VdbeAddOp3(v, OP_SeekRowid, pExpr->iTable, destIfFalse, rLhs);
+ VdbeCoverage(v);
+ addrTruthOp = sqlite3VdbeAddOp0(v, OP_Goto); /* Return True */
+ }else{
+ sqlite3VdbeAddOp4(v, OP_Affinity, rLhs, nVector, 0, zAff, nVector);
+ if( destIfFalse==destIfNull ){
+ /* Combine Step 3 and Step 5 into a single opcode */
+ sqlite3VdbeAddOp4Int(v, OP_NotFound, pExpr->iTable, destIfFalse,
+ rLhs, nVector); VdbeCoverage(v);
+ goto sqlite3ExprCodeIN_finished;
+ }
+ /* Ordinary Step 3, for the case where FALSE and NULL are distinct */
+ addrTruthOp = sqlite3VdbeAddOp4Int(v, OP_Found, pExpr->iTable, 0,
+ rLhs, nVector); VdbeCoverage(v);
+ }
+
+ /* Step 4. If the RHS is known to be non-NULL and we did not find
+ ** an match on the search above, then the result must be FALSE.
+ */
+ if( rRhsHasNull && nVector==1 ){
+ sqlite3VdbeAddOp2(v, OP_NotNull, rRhsHasNull, destIfFalse);
+ VdbeCoverage(v);
+ }
+
+ /* Step 5. If we do not care about the difference between NULL and
+ ** FALSE, then just return false.
+ */
+ if( destIfFalse==destIfNull ) sqlite3VdbeGoto(v, destIfFalse);
+
+ /* Step 6: Loop through rows of the RHS. Compare each row to the LHS.
+ ** If any comparison is NULL, then the result is NULL. If all
+ ** comparisons are FALSE then the final result is FALSE.
+ **
+ ** For a scalar LHS, it is sufficient to check just the first row
+ ** of the RHS.
+ */
+ if( destStep6 ) sqlite3VdbeResolveLabel(v, destStep6);
+ addrTop = sqlite3VdbeAddOp2(v, OP_Rewind, pExpr->iTable, destIfFalse);
+ VdbeCoverage(v);
+ if( nVector>1 ){
+ destNotNull = sqlite3VdbeMakeLabel(v);
+ }else{
+ /* For nVector==1, combine steps 6 and 7 by immediately returning
+ ** FALSE if the first comparison is not NULL */
+ destNotNull = destIfFalse;
+ }
+ for(i=0; i<nVector; i++){
+ Expr *p;
+ CollSeq *pColl;
+ int r3 = sqlite3GetTempReg(pParse);
+ p = sqlite3VectorFieldSubexpr(pLeft, i);
+ pColl = sqlite3ExprCollSeq(pParse, p);
+ sqlite3VdbeAddOp3(v, OP_Column, pExpr->iTable, i, r3);
+ sqlite3VdbeAddOp4(v, OP_Ne, rLhs+i, destNotNull, r3,
+ (void*)pColl, P4_COLLSEQ);
+ VdbeCoverage(v);
+ sqlite3ReleaseTempReg(pParse, r3);
+ }
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfNull);
+ if( nVector>1 ){
+ sqlite3VdbeResolveLabel(v, destNotNull);
+ sqlite3VdbeAddOp2(v, OP_Next, pExpr->iTable, addrTop+1);
+ VdbeCoverage(v);
+
+ /* Step 7: If we reach this point, we know that the result must
+ ** be false. */
+ sqlite3VdbeAddOp2(v, OP_Goto, 0, destIfFalse);
+ }
+
+ /* Jumps here in order to return true. */
+ sqlite3VdbeJumpHere(v, addrTruthOp);
+
+sqlite3ExprCodeIN_finished:
+ if( rLhs!=rLhsOrig ) sqlite3ReleaseTempReg(pParse, rLhs);
sqlite3ExprCachePop(pParse);
VdbeComment((v, "end IN expr"));
+sqlite3ExprCodeIN_oom_error:
+ sqlite3DbFree(pParse->db, aiMap);
+ sqlite3DbFree(pParse->db, zAff);
}
#endif /* SQLITE_OMIT_SUBQUERY */
@@ -91780,32 +92679,19 @@ static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){
}
}
-#if defined(SQLITE_DEBUG)
/*
-** Verify the consistency of the column cache
+** Erase column-cache entry number i
*/
-static int cacheIsValid(Parse *pParse){
- int i, n;
- for(i=n=0; i<SQLITE_N_COLCACHE; i++){
- if( pParse->aColCache[i].iReg>0 ) n++;
- }
- return n==pParse->nColCache;
-}
-#endif
-
-/*
-** Clear a cache entry.
-*/
-static void cacheEntryClear(Parse *pParse, struct yColCache *p){
- if( p->tempReg ){
+static void cacheEntryClear(Parse *pParse, int i){
+ if( pParse->aColCache[i].tempReg ){
if( pParse->nTempReg<ArraySize(pParse->aTempReg) ){
- pParse->aTempReg[pParse->nTempReg++] = p->iReg;
+ pParse->aTempReg[pParse->nTempReg++] = pParse->aColCache[i].iReg;
}
- p->tempReg = 0;
}
- p->iReg = 0;
pParse->nColCache--;
- assert( pParse->db->mallocFailed || cacheIsValid(pParse) );
+ if( i<pParse->nColCache ){
+ pParse->aColCache[i] = pParse->aColCache[pParse->nColCache];
+ }
}
@@ -91835,46 +92721,33 @@ SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int
** that the object will never already be in cache. Verify this guarantee.
*/
#ifndef NDEBUG
- for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
- assert( p->iReg==0 || p->iTable!=iTab || p->iColumn!=iCol );
+ for(i=0, p=pParse->aColCache; i<pParse->nColCache; i++, p++){
+ assert( p->iTable!=iTab || p->iColumn!=iCol );
}
#endif
- /* Find an empty slot and replace it */
- for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
- if( p->iReg==0 ){
- p->iLevel = pParse->iCacheLevel;
- p->iTable = iTab;
- p->iColumn = iCol;
- p->iReg = iReg;
- p->tempReg = 0;
- p->lru = pParse->iCacheCnt++;
- pParse->nColCache++;
- assert( pParse->db->mallocFailed || cacheIsValid(pParse) );
- return;
- }
- }
-
- /* Replace the last recently used */
- minLru = 0x7fffffff;
- idxLru = -1;
- for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
- if( p->lru<minLru ){
- idxLru = i;
- minLru = p->lru;
+ /* If the cache is already full, delete the least recently used entry */
+ if( pParse->nColCache>=SQLITE_N_COLCACHE ){
+ minLru = 0x7fffffff;
+ idxLru = -1;
+ for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
+ if( p->lru<minLru ){
+ idxLru = i;
+ minLru = p->lru;
+ }
}
- }
- if( ALWAYS(idxLru>=0) ){
p = &pParse->aColCache[idxLru];
- p->iLevel = pParse->iCacheLevel;
- p->iTable = iTab;
- p->iColumn = iCol;
- p->iReg = iReg;
- p->tempReg = 0;
- p->lru = pParse->iCacheCnt++;
- assert( cacheIsValid(pParse) );
- return;
+ }else{
+ p = &pParse->aColCache[pParse->nColCache++];
}
+
+ /* Add the new entry to the end of the cache */
+ p->iLevel = pParse->iCacheLevel;
+ p->iTable = iTab;
+ p->iColumn = iCol;
+ p->iReg = iReg;
+ p->tempReg = 0;
+ p->lru = pParse->iCacheCnt++;
}
/*
@@ -91882,13 +92755,14 @@ SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int
** Purge the range of registers from the column cache.
*/
SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse *pParse, int iReg, int nReg){
- struct yColCache *p;
- if( iReg<=0 || pParse->nColCache==0 ) return;
- p = &pParse->aColCache[SQLITE_N_COLCACHE-1];
- while(1){
- if( p->iReg >= iReg && p->iReg < iReg+nReg ) cacheEntryClear(pParse, p);
- if( p==pParse->aColCache ) break;
- p--;
+ int i = 0;
+ while( i<pParse->nColCache ){
+ struct yColCache *p = &pParse->aColCache[i];
+ if( p->iReg >= iReg && p->iReg < iReg+nReg ){
+ cacheEntryClear(pParse, i);
+ }else{
+ i++;
+ }
}
}
@@ -91912,8 +92786,7 @@ SQLITE_PRIVATE void sqlite3ExprCachePush(Parse *pParse){
** the cache to the state it was in prior the most recent Push.
*/
SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse){
- int i;
- struct yColCache *p;
+ int i = 0;
assert( pParse->iCacheLevel>=1 );
pParse->iCacheLevel--;
#ifdef SQLITE_DEBUG
@@ -91921,9 +92794,11 @@ SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse){
printf("POP to %d\n", pParse->iCacheLevel);
}
#endif
- for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
- if( p->iReg && p->iLevel>pParse->iCacheLevel ){
- cacheEntryClear(pParse, p);
+ while( i<pParse->nColCache ){
+ if( pParse->aColCache[i].iLevel>pParse->iCacheLevel ){
+ cacheEntryClear(pParse, i);
+ }else{
+ i++;
}
}
}
@@ -91937,7 +92812,7 @@ SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse){
static void sqlite3ExprCachePinRegister(Parse *pParse, int iReg){
int i;
struct yColCache *p;
- for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
+ for(i=0, p=pParse->aColCache; i<pParse->nColCache; i++, p++){
if( p->iReg==iReg ){
p->tempReg = 0;
}
@@ -92015,8 +92890,8 @@ SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(
int i;
struct yColCache *p;
- for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
- if( p->iReg>0 && p->iTable==iTable && p->iColumn==iColumn ){
+ for(i=0, p=pParse->aColCache; i<pParse->nColCache; i++, p++){
+ if( p->iTable==iTable && p->iColumn==iColumn ){
p->lru = pParse->iCacheCnt++;
sqlite3ExprCachePinRegister(pParse, p->iReg);
return p->iReg;
@@ -92048,18 +92923,20 @@ SQLITE_PRIVATE void sqlite3ExprCodeGetColumnToReg(
*/
SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse *pParse){
int i;
- struct yColCache *p;
#if SQLITE_DEBUG
if( pParse->db->flags & SQLITE_VdbeAddopTrace ){
printf("CLEAR\n");
}
#endif
- for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
- if( p->iReg ){
- cacheEntryClear(pParse, p);
+ for(i=0; i<pParse->nColCache; i++){
+ if( pParse->aColCache[i].tempReg
+ && pParse->nTempReg<ArraySize(pParse->aTempReg)
+ ){
+ pParse->aTempReg[pParse->nTempReg++] = pParse->aColCache[i].iReg;
}
}
+ pParse->nColCache = 0;
}
/*
@@ -92091,7 +92968,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse *pParse, int iFrom, int iTo, int n
static int usedAsColumnCache(Parse *pParse, int iFrom, int iTo){
int i;
struct yColCache *p;
- for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
+ for(i=0, p=pParse->aColCache; i<pParse->nColCache; i++, p++){
int r = p->iReg;
if( r>=iFrom && r<=iTo ) return 1; /*NO_TEST*/
}
@@ -92101,7 +92978,9 @@ static int usedAsColumnCache(Parse *pParse, int iFrom, int iTo){
/*
-** Convert an expression node to a TK_REGISTER
+** Convert a scalar expression node to a TK_REGISTER referencing
+** register iReg. The caller must ensure that iReg already contains
+** the correct value for the expression.
*/
static void exprToRegister(Expr *p, int iReg){
p->op2 = p->op;
@@ -92111,6 +92990,38 @@ static void exprToRegister(Expr *p, int iReg){
}
/*
+** Evaluate an expression (either a vector or a scalar expression) and store
+** the result in continguous temporary registers. Return the index of
+** the first register used to store the result.
+**
+** If the returned result register is a temporary scalar, then also write
+** that register number into *piFreeable. If the returned result register
+** is not a temporary or if the expression is a vector set *piFreeable
+** to 0.
+*/
+static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){
+ int iResult;
+ int nResult = sqlite3ExprVectorSize(p);
+ if( nResult==1 ){
+ iResult = sqlite3ExprCodeTemp(pParse, p, piFreeable);
+ }else{
+ *piFreeable = 0;
+ if( p->op==TK_SELECT ){
+ iResult = sqlite3CodeSubselect(pParse, p, 0, 0);
+ }else{
+ int i;
+ iResult = pParse->nMem+1;
+ pParse->nMem += nResult;
+ for(i=0; i<nResult; i++){
+ sqlite3ExprCode(pParse, p->x.pList->a[i].pExpr, i+iResult);
+ }
+ }
+ }
+ return iResult;
+}
+
+
+/*
** Generate code into the current Vdbe to evaluate the given
** expression. Attempt to store the results in register "target".
** Return the register where results are stored.
@@ -92127,9 +93038,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
int inReg = target; /* Results stored in register inReg */
int regFree1 = 0; /* If non-zero free this temporary register */
int regFree2 = 0; /* If non-zero free this temporary register */
- int r1, r2, r3, r4; /* Various register numbers */
- sqlite3 *db = pParse->db; /* The database connection */
+ int r1, r2; /* Various register numbers */
Expr tempX; /* Temporary expression node */
+ int p5 = 0;
assert( target>0 && target<=pParse->nMem );
if( v==0 ){
@@ -92148,12 +93059,11 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
struct AggInfo_col *pCol = &pAggInfo->aCol[pExpr->iAgg];
if( !pAggInfo->directMode ){
assert( pCol->iMem>0 );
- inReg = pCol->iMem;
- break;
+ return pCol->iMem;
}else if( pAggInfo->useSortingIdx ){
sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab,
pCol->iSorterColumn, target);
- break;
+ return target;
}
/* Otherwise, fall thru into the TK_COLUMN case */
}
@@ -92162,38 +93072,36 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
if( iTab<0 ){
if( pParse->ckBase>0 ){
/* Generating CHECK constraints or inserting into partial index */
- inReg = pExpr->iColumn + pParse->ckBase;
- break;
+ return pExpr->iColumn + pParse->ckBase;
}else{
/* Coding an expression that is part of an index where column names
** in the index refer to the table to which the index belongs */
iTab = pParse->iSelfTab;
}
}
- inReg = sqlite3ExprCodeGetColumn(pParse, pExpr->pTab,
+ return sqlite3ExprCodeGetColumn(pParse, pExpr->pTab,
pExpr->iColumn, iTab, target,
pExpr->op2);
- break;
}
case TK_INTEGER: {
codeInteger(pParse, pExpr, 0, target);
- break;
+ return target;
}
#ifndef SQLITE_OMIT_FLOATING_POINT
case TK_FLOAT: {
assert( !ExprHasProperty(pExpr, EP_IntValue) );
codeReal(v, pExpr->u.zToken, 0, target);
- break;
+ return target;
}
#endif
case TK_STRING: {
assert( !ExprHasProperty(pExpr, EP_IntValue) );
sqlite3VdbeLoadString(v, target, pExpr->u.zToken);
- break;
+ return target;
}
case TK_NULL: {
sqlite3VdbeAddOp2(v, OP_Null, 0, target);
- break;
+ return target;
}
#ifndef SQLITE_OMIT_BLOB_LITERAL
case TK_BLOB: {
@@ -92208,7 +93116,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
assert( z[n]=='\'' );
zBlob = sqlite3HexToBlob(sqlite3VdbeDb(v), z, n);
sqlite3VdbeAddOp4(v, OP_Blob, n/2, target, 0, zBlob, P4_DYNAMIC);
- break;
+ return target;
}
#endif
case TK_VARIABLE: {
@@ -92221,11 +93129,10 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
|| strcmp(pExpr->u.zToken, pParse->azVar[pExpr->iColumn-1])==0 );
sqlite3VdbeChangeP4(v, -1, pParse->azVar[pExpr->iColumn-1], P4_STATIC);
}
- break;
+ return target;
}
case TK_REGISTER: {
- inReg = pExpr->iTable;
- break;
+ return pExpr->iTable;
}
#ifndef SQLITE_OMIT_CAST
case TK_CAST: {
@@ -92239,42 +93146,37 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
sqlite3AffinityType(pExpr->u.zToken, 0));
testcase( usedAsColumnCache(pParse, inReg, inReg) );
sqlite3ExprCacheAffinityChange(pParse, inReg, 1);
- break;
+ return inReg;
}
#endif /* SQLITE_OMIT_CAST */
+ case TK_IS:
+ case TK_ISNOT:
+ op = (op==TK_IS) ? TK_EQ : TK_NE;
+ p5 = SQLITE_NULLEQ;
+ /* fall-through */
case TK_LT:
case TK_LE:
case TK_GT:
case TK_GE:
case TK_NE:
case TK_EQ: {
- r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
- r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
- codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
- r1, r2, inReg, SQLITE_STOREP2);
- assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
- assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
- assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
- assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
- assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq);
- assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne);
- testcase( regFree1==0 );
- testcase( regFree2==0 );
- break;
- }
- case TK_IS:
- case TK_ISNOT: {
- testcase( op==TK_IS );
- testcase( op==TK_ISNOT );
- r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
- r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
- op = (op==TK_IS) ? TK_EQ : TK_NE;
- codeCompare(pParse, pExpr->pLeft, pExpr->pRight, op,
- r1, r2, inReg, SQLITE_STOREP2 | SQLITE_NULLEQ);
- VdbeCoverageIf(v, op==TK_EQ);
- VdbeCoverageIf(v, op==TK_NE);
- testcase( regFree1==0 );
- testcase( regFree2==0 );
+ Expr *pLeft = pExpr->pLeft;
+ if( sqlite3ExprIsVector(pLeft) ){
+ codeVectorCompare(pParse, pExpr, target, op, p5);
+ }else{
+ r1 = sqlite3ExprCodeTemp(pParse, pLeft, &regFree1);
+ r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
+ codeCompare(pParse, pLeft, pExpr->pRight, op,
+ r1, r2, inReg, SQLITE_STOREP2 | p5);
+ assert(TK_LT==OP_Lt); testcase(op==OP_Lt); VdbeCoverageIf(v,op==OP_Lt);
+ assert(TK_LE==OP_Le); testcase(op==OP_Le); VdbeCoverageIf(v,op==OP_Le);
+ assert(TK_GT==OP_Gt); testcase(op==OP_Gt); VdbeCoverageIf(v,op==OP_Gt);
+ assert(TK_GE==OP_Ge); testcase(op==OP_Ge); VdbeCoverageIf(v,op==OP_Ge);
+ assert(TK_EQ==OP_Eq); testcase(op==OP_Eq); VdbeCoverageIf(v,op==OP_Eq);
+ assert(TK_NE==OP_Ne); testcase(op==OP_Ne); VdbeCoverageIf(v,op==OP_Ne);
+ testcase( regFree1==0 );
+ testcase( regFree2==0 );
+ }
break;
}
case TK_AND:
@@ -92312,10 +93214,12 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
assert( pLeft );
if( pLeft->op==TK_INTEGER ){
codeInteger(pParse, pLeft, 1, target);
+ return target;
#ifndef SQLITE_OMIT_FLOATING_POINT
}else if( pLeft->op==TK_FLOAT ){
assert( !ExprHasProperty(pExpr, EP_IntValue) );
codeReal(v, pLeft->u.zToken, 1, target);
+ return target;
#endif
}else{
tempX.op = TK_INTEGER;
@@ -92326,7 +93230,6 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
sqlite3VdbeAddOp3(v, OP_Subtract, r2, r1, target);
testcase( regFree2==0 );
}
- inReg = target;
break;
}
case TK_BITNOT:
@@ -92335,7 +93238,6 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
assert( TK_NOT==OP_Not ); testcase( op==TK_NOT );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
testcase( regFree1==0 );
- inReg = target;
sqlite3VdbeAddOp2(v, op, r1, inReg);
break;
}
@@ -92360,7 +93262,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
assert( !ExprHasProperty(pExpr, EP_IntValue) );
sqlite3ErrorMsg(pParse, "misuse of aggregate: %s()", pExpr->u.zToken);
}else{
- inReg = pInfo->aFunc[pExpr->iAgg].iMem;
+ return pInfo->aFunc[pExpr->iAgg].iMem;
}
break;
}
@@ -92371,6 +93273,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
const char *zId; /* The function name */
u32 constMask = 0; /* Mask of function arguments that are constant */
int i; /* Loop counter */
+ sqlite3 *db = pParse->db; /* The database connection */
u8 enc = ENC(db); /* The text encoding used by this database */
CollSeq *pColl = 0; /* A collating sequence */
@@ -92419,8 +93322,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
*/
if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){
assert( nFarg>=1 );
- inReg = sqlite3ExprCodeTarget(pParse, pFarg->a[0].pExpr, target);
- break;
+ return sqlite3ExprCodeTarget(pParse, pFarg->a[0].pExpr, target);
}
for(i=0; i<nFarg; i++){
@@ -92495,16 +93397,27 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
if( nFarg && constMask==0 ){
sqlite3ReleaseTempRange(pParse, r1, nFarg);
}
- break;
+ return target;
}
#ifndef SQLITE_OMIT_SUBQUERY
case TK_EXISTS:
case TK_SELECT: {
+ int nCol;
testcase( op==TK_EXISTS );
testcase( op==TK_SELECT );
- inReg = sqlite3CodeSubselect(pParse, pExpr, 0, 0);
+ if( op==TK_SELECT && (nCol = pExpr->x.pSelect->pEList->nExpr)!=1 ){
+ sqlite3SubselectError(pParse, nCol, 1);
+ }else{
+ return sqlite3CodeSubselect(pParse, pExpr, 0, 0);
+ }
break;
}
+ case TK_SELECT_COLUMN: {
+ if( pExpr->pLeft->iTable==0 ){
+ pExpr->pLeft->iTable = sqlite3CodeSubselect(pParse, pExpr->pLeft, 0, 0);
+ }
+ return pExpr->pLeft->iTable + pExpr->iColumn;
+ }
case TK_IN: {
int destIfFalse = sqlite3VdbeMakeLabel(v);
int destIfNull = sqlite3VdbeMakeLabel(v);
@@ -92514,7 +93427,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
sqlite3VdbeResolveLabel(v, destIfFalse);
sqlite3VdbeAddOp2(v, OP_AddImm, target, 0);
sqlite3VdbeResolveLabel(v, destIfNull);
- break;
+ return target;
}
#endif /* SQLITE_OMIT_SUBQUERY */
@@ -92531,35 +93444,13 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
** Z is stored in pExpr->pList->a[1].pExpr.
*/
case TK_BETWEEN: {
- Expr *pLeft = pExpr->pLeft;
- struct ExprList_item *pLItem = pExpr->x.pList->a;
- Expr *pRight = pLItem->pExpr;
-
- r1 = sqlite3ExprCodeTemp(pParse, pLeft, &regFree1);
- r2 = sqlite3ExprCodeTemp(pParse, pRight, &regFree2);
- testcase( regFree1==0 );
- testcase( regFree2==0 );
- r3 = sqlite3GetTempReg(pParse);
- r4 = sqlite3GetTempReg(pParse);
- codeCompare(pParse, pLeft, pRight, OP_Ge,
- r1, r2, r3, SQLITE_STOREP2); VdbeCoverage(v);
- pLItem++;
- pRight = pLItem->pExpr;
- sqlite3ReleaseTempReg(pParse, regFree2);
- r2 = sqlite3ExprCodeTemp(pParse, pRight, &regFree2);
- testcase( regFree2==0 );
- codeCompare(pParse, pLeft, pRight, OP_Le, r1, r2, r4, SQLITE_STOREP2);
- VdbeCoverage(v);
- sqlite3VdbeAddOp3(v, OP_And, r3, r4, target);
- sqlite3ReleaseTempReg(pParse, r3);
- sqlite3ReleaseTempReg(pParse, r4);
- break;
+ exprCodeBetween(pParse, pExpr, target, 0, 0);
+ return target;
}
case TK_SPAN:
case TK_COLLATE:
case TK_UPLUS: {
- inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
- break;
+ return sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
}
case TK_TRIGGER: {
@@ -92618,6 +93509,10 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
break;
}
+ case TK_VECTOR: {
+ sqlite3ErrorMsg(pParse, "row value misused");
+ break;
+ }
/*
** Form A:
@@ -92661,8 +93556,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
if( (pX = pExpr->pLeft)!=0 ){
tempX = *pX;
testcase( pX->op==TK_COLUMN );
- exprToRegister(&tempX, sqlite3ExprCodeTemp(pParse, pX, &regFree1));
+ exprToRegister(&tempX, exprCodeVector(pParse, &tempX, &regFree1));
testcase( regFree1==0 );
+ memset(&opCompare, 0, sizeof(opCompare));
opCompare.op = TK_EQ;
opCompare.pLeft = &tempX;
pTest = &opCompare;
@@ -92696,7 +93592,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
}else{
sqlite3VdbeAddOp2(v, OP_Null, 0, target);
}
- assert( db->mallocFailed || pParse->nErr>0
+ assert( pParse->db->mallocFailed || pParse->nErr>0
|| pParse->iCacheLevel==iCacheLevel );
sqlite3VdbeResolveLabel(v, endLabel);
break;
@@ -92941,20 +93837,33 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(
**
** Code it as such, taking care to do the common subexpression
** elimination of x.
+**
+** The xJumpIf parameter determines details:
+**
+** NULL: Store the boolean result in reg[dest]
+** sqlite3ExprIfTrue: Jump to dest if true
+** sqlite3ExprIfFalse: Jump to dest if false
+**
+** The jumpIfNull parameter is ignored if xJumpIf is NULL.
*/
static void exprCodeBetween(
Parse *pParse, /* Parsing and code generating context */
Expr *pExpr, /* The BETWEEN expression */
- int dest, /* Jump here if the jump is taken */
- int jumpIfTrue, /* Take the jump if the BETWEEN is true */
+ int dest, /* Jump destination or storage location */
+ void (*xJump)(Parse*,Expr*,int,int), /* Action to take */
int jumpIfNull /* Take the jump if the BETWEEN is NULL */
){
- Expr exprAnd; /* The AND operator in x>=y AND x<=z */
+ Expr exprAnd; /* The AND operator in x>=y AND x<=z */
Expr compLeft; /* The x>=y term */
Expr compRight; /* The x<=z term */
Expr exprX; /* The x subexpression */
int regFree1 = 0; /* Temporary use register */
+
+ memset(&compLeft, 0, sizeof(Expr));
+ memset(&compRight, 0, sizeof(Expr));
+ memset(&exprAnd, 0, sizeof(Expr));
+
assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
exprX = *pExpr->pLeft;
exprAnd.op = TK_AND;
@@ -92966,23 +93875,25 @@ static void exprCodeBetween(
compRight.op = TK_LE;
compRight.pLeft = &exprX;
compRight.pRight = pExpr->x.pList->a[1].pExpr;
- exprToRegister(&exprX, sqlite3ExprCodeTemp(pParse, &exprX, &regFree1));
- if( jumpIfTrue ){
- sqlite3ExprIfTrue(pParse, &exprAnd, dest, jumpIfNull);
+ exprToRegister(&exprX, exprCodeVector(pParse, &exprX, &regFree1));
+ if( xJump ){
+ xJump(pParse, &exprAnd, dest, jumpIfNull);
}else{
- sqlite3ExprIfFalse(pParse, &exprAnd, dest, jumpIfNull);
+ exprX.flags |= EP_FromJoin;
+ sqlite3ExprCodeTarget(pParse, &exprAnd, dest);
}
sqlite3ReleaseTempReg(pParse, regFree1);
/* Ensure adequate test coverage */
- testcase( jumpIfTrue==0 && jumpIfNull==0 && regFree1==0 );
- testcase( jumpIfTrue==0 && jumpIfNull==0 && regFree1!=0 );
- testcase( jumpIfTrue==0 && jumpIfNull!=0 && regFree1==0 );
- testcase( jumpIfTrue==0 && jumpIfNull!=0 && regFree1!=0 );
- testcase( jumpIfTrue!=0 && jumpIfNull==0 && regFree1==0 );
- testcase( jumpIfTrue!=0 && jumpIfNull==0 && regFree1!=0 );
- testcase( jumpIfTrue!=0 && jumpIfNull!=0 && regFree1==0 );
- testcase( jumpIfTrue!=0 && jumpIfNull!=0 && regFree1!=0 );
+ testcase( xJump==sqlite3ExprIfTrue && jumpIfNull==0 && regFree1==0 );
+ testcase( xJump==sqlite3ExprIfTrue && jumpIfNull==0 && regFree1!=0 );
+ testcase( xJump==sqlite3ExprIfTrue && jumpIfNull!=0 && regFree1==0 );
+ testcase( xJump==sqlite3ExprIfTrue && jumpIfNull!=0 && regFree1!=0 );
+ testcase( xJump==sqlite3ExprIfFalse && jumpIfNull==0 && regFree1==0 );
+ testcase( xJump==sqlite3ExprIfFalse && jumpIfNull==0 && regFree1!=0 );
+ testcase( xJump==sqlite3ExprIfFalse && jumpIfNull!=0 && regFree1==0 );
+ testcase( xJump==sqlite3ExprIfFalse && jumpIfNull!=0 && regFree1!=0 );
+ testcase( xJump==0 );
}
/*
@@ -93047,6 +93958,7 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int
case TK_GE:
case TK_NE:
case TK_EQ: {
+ if( sqlite3ExprIsVector(pExpr->pLeft) ) goto default_expr;
testcase( jumpIfNull==0 );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
@@ -93079,7 +93991,7 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int
}
case TK_BETWEEN: {
testcase( jumpIfNull==0 );
- exprCodeBetween(pParse, pExpr, dest, 1, jumpIfNull);
+ exprCodeBetween(pParse, pExpr, dest, sqlite3ExprIfTrue, jumpIfNull);
break;
}
#ifndef SQLITE_OMIT_SUBQUERY
@@ -93093,6 +94005,7 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int
}
#endif
default: {
+ default_expr:
if( exprAlwaysTrue(pExpr) ){
sqlite3VdbeGoto(v, dest);
}else if( exprAlwaysFalse(pExpr) ){
@@ -93199,6 +94112,7 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
case TK_GE:
case TK_NE:
case TK_EQ: {
+ if( sqlite3ExprIsVector(pExpr->pLeft) ) goto default_expr;
testcase( jumpIfNull==0 );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, &regFree1);
r2 = sqlite3ExprCodeTemp(pParse, pExpr->pRight, &regFree2);
@@ -93229,7 +94143,7 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
}
case TK_BETWEEN: {
testcase( jumpIfNull==0 );
- exprCodeBetween(pParse, pExpr, dest, 0, jumpIfNull);
+ exprCodeBetween(pParse, pExpr, dest, sqlite3ExprIfFalse, jumpIfNull);
break;
}
#ifndef SQLITE_OMIT_SUBQUERY
@@ -93245,6 +94159,7 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
}
#endif
default: {
+ default_expr:
if( exprAlwaysFalse(pExpr) ){
sqlite3VdbeGoto(v, dest);
}else if( exprAlwaysTrue(pExpr) ){
@@ -93749,7 +94664,7 @@ SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse *pParse, int iReg){
if( iReg && pParse->nTempReg<ArraySize(pParse->aTempReg) ){
int i;
struct yColCache *p;
- for(i=0, p=pParse->aColCache; i<SQLITE_N_COLCACHE; i++, p++){
+ for(i=0, p=pParse->aColCache; i<pParse->nColCache; i++, p++){
if( p->iReg==iReg ){
p->tempReg = 1;
return;
@@ -93760,10 +94675,11 @@ SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse *pParse, int iReg){
}
/*
-** Allocate or deallocate a block of nReg consecutive registers
+** Allocate or deallocate a block of nReg consecutive registers.
*/
SQLITE_PRIVATE int sqlite3GetTempRange(Parse *pParse, int nReg){
int i, n;
+ if( nReg==1 ) return sqlite3GetTempReg(pParse);
i = pParse->iRangeReg;
n = pParse->nRangeReg;
if( nReg<=n ){
@@ -93777,6 +94693,10 @@ SQLITE_PRIVATE int sqlite3GetTempRange(Parse *pParse, int nReg){
return i;
}
SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse *pParse, int iReg, int nReg){
+ if( nReg==1 ){
+ sqlite3ReleaseTempReg(pParse, iReg);
+ return;
+ }
sqlite3ExprCacheRemove(pParse, iReg, nReg);
if( nReg>pParse->nRangeReg ){
pParse->nRangeReg = nReg;
@@ -94232,7 +95152,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable(
pTab = sqlite3LocateTableItem(pParse, 0, &pSrc->a[0]);
if( !pTab ) goto exit_rename_table;
iDb = sqlite3SchemaToIndex(pParse->db, pTab->pSchema);
- zDb = db->aDb[iDb].zName;
+ zDb = db->aDb[iDb].zDbSName;
db->flags |= SQLITE_PreferBuiltin;
/* Get a NULL terminated version of the new table name. */
@@ -94430,7 +95350,7 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){
assert( sqlite3BtreeHoldsAllMutexes(db) );
iDb = sqlite3SchemaToIndex(db, pNew->pSchema);
- zDb = db->aDb[iDb].zName;
+ zDb = db->aDb[iDb].zDbSName;
zTab = &pNew->zName[16]; /* Skip the "sqlite_altertab_" prefix on the name */
pCol = &pNew->aCol[pNew->nCol-1];
pDflt = pCol->pDflt;
@@ -94840,14 +95760,14 @@ static void openStatTable(
for(i=0; i<ArraySize(aTable); i++){
const char *zTab = aTable[i].zName;
Table *pStat;
- if( (pStat = sqlite3FindTable(db, zTab, pDb->zName))==0 ){
+ if( (pStat = sqlite3FindTable(db, zTab, pDb->zDbSName))==0 ){
if( aTable[i].zCols ){
/* The sqlite_statN table does not exist. Create it. Note that a
** side-effect of the CREATE TABLE statement is to leave the rootpage
** of the new table in register pParse->regRoot. This is important
** because the OpenWrite opcode below will be needing it. */
sqlite3NestedParse(pParse,
- "CREATE TABLE %Q.%s(%s)", pDb->zName, zTab, aTable[i].zCols
+ "CREATE TABLE %Q.%s(%s)", pDb->zDbSName, zTab, aTable[i].zCols
);
aRoot[i] = pParse->regRoot;
aCreateTbl[i] = OPFLAG_P2ISREG;
@@ -94862,7 +95782,7 @@ static void openStatTable(
if( zWhere ){
sqlite3NestedParse(pParse,
"DELETE FROM %Q.%s WHERE %s=%Q",
- pDb->zName, zTab, zWhereType, zWhere
+ pDb->zDbSName, zTab, zWhereType, zWhere
);
}else{
/* The sqlite_stat[134] table already exists. Delete all rows. */
@@ -95624,7 +96544,7 @@ static void analyzeOneTable(
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
#ifndef SQLITE_OMIT_AUTHORIZATION
if( sqlite3AuthCheck(pParse, SQLITE_ANALYZE, pTab->zName, 0,
- db->aDb[iDb].zName ) ){
+ db->aDb[iDb].zDbSName ) ){
return;
}
#endif
@@ -96014,7 +96934,7 @@ SQLITE_PRIVATE void sqlite3Analyze(Parse *pParse, Token *pName1, Token *pName2){
/* Form 3: Analyze the fully qualified table name */
iDb = sqlite3TwoPartName(pParse, pName1, pName2, &pTableName);
if( iDb>=0 ){
- zDb = db->aDb[iDb].zName;
+ zDb = db->aDb[iDb].zDbSName;
z = sqlite3NameFromToken(db, pTableName);
if( z ){
if( (pIdx = sqlite3FindIndex(db, z, zDb))!=0 ){
@@ -96474,7 +97394,7 @@ SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){
/* Load new statistics out of the sqlite_stat1 table */
sInfo.db = db;
- sInfo.zDatabase = db->aDb[iDb].zName;
+ sInfo.zDatabase = db->aDb[iDb].zDbSName;
if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)!=0 ){
zSql = sqlite3MPrintf(db,
"SELECT tbl,idx,stat FROM %Q.sqlite_stat1", sInfo.zDatabase);
@@ -96617,7 +97537,7 @@ static void attachFunc(
goto attach_error;
}
for(i=0; i<db->nDb; i++){
- char *z = db->aDb[i].zName;
+ char *z = db->aDb[i].zDbSName;
assert( z && zName );
if( sqlite3StrICmp(z, zName)==0 ){
zErrDyn = sqlite3MPrintf(db, "database %s is already in use", zName);
@@ -96682,8 +97602,8 @@ static void attachFunc(
sqlite3BtreeLeave(aNew->pBt);
}
aNew->safety_level = SQLITE_DEFAULT_SYNCHRONOUS+1;
- aNew->zName = sqlite3DbStrDup(db, zName);
- if( rc==SQLITE_OK && aNew->zName==0 ){
+ aNew->zDbSName = sqlite3DbStrDup(db, zName);
+ if( rc==SQLITE_OK && aNew->zDbSName==0 ){
rc = SQLITE_NOMEM_BKPT;
}
@@ -96712,7 +97632,7 @@ static void attachFunc(
case SQLITE_NULL:
/* No key specified. Use the key from the main database */
sqlite3CodecGetKey(db, 0, (void**)&zKey, &nKey);
- if( nKey>0 || sqlite3BtreeGetOptimalReserve(db->aDb[0].pBt)>0 ){
+ if( nKey || sqlite3BtreeGetOptimalReserve(db->aDb[0].pBt)>0 ){
rc = sqlite3CodecAttach(db, db->nDb-1, zKey, nKey);
}
break;
@@ -96795,7 +97715,7 @@ static void detachFunc(
for(i=0; i<db->nDb; i++){
pDb = &db->aDb[i];
if( pDb->pBt==0 ) continue;
- if( sqlite3StrICmp(pDb->zName, zName)==0 ) break;
+ if( sqlite3StrICmp(pDb->zDbSName, zName)==0 ) break;
}
if( i>=db->nDb ){
@@ -96953,7 +97873,7 @@ SQLITE_PRIVATE void sqlite3FixInit(
db = pParse->db;
assert( db->nDb>iDb );
pFix->pParse = pParse;
- pFix->zDb = db->aDb[iDb].zName;
+ pFix->zDb = db->aDb[iDb].zDbSName;
pFix->pSchema = db->aDb[iDb].pSchema;
pFix->zType = zType;
pFix->pName = pName;
@@ -97050,7 +97970,7 @@ SQLITE_PRIVATE int sqlite3FixExpr(
return 1;
}
}
- if( ExprHasProperty(pExpr, EP_TokenOnly) ) break;
+ if( ExprHasProperty(pExpr, EP_TokenOnly|EP_Leaf) ) break;
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
if( sqlite3FixSelect(pFix, pExpr->x.pSelect) ) return 1;
}else{
@@ -97171,7 +98091,7 @@ SQLITE_PRIVATE int sqlite3FixTriggerStep(
** Setting the auth function to NULL disables this hook. The default
** setting of the auth function is NULL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
+SQLITE_API int sqlite3_set_authorizer(
sqlite3 *db,
int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
void *pArg
@@ -97211,9 +98131,9 @@ SQLITE_PRIVATE int sqlite3AuthReadCol(
const char *zCol, /* Column name */
int iDb /* Index of containing database. */
){
- sqlite3 *db = pParse->db; /* Database handle */
- char *zDb = db->aDb[iDb].zName; /* Name of attached database */
- int rc; /* Auth callback return code */
+ sqlite3 *db = pParse->db; /* Database handle */
+ char *zDb = db->aDb[iDb].zDbSName; /* Schema name of attached database */
+ int rc; /* Auth callback return code */
if( db->init.busy ) return SQLITE_OK;
rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext
@@ -97514,15 +98434,14 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
assert( !pParse->isMultiWrite
|| sqlite3VdbeAssertMayAbort(v, pParse->mayAbort));
if( v ){
- while( sqlite3VdbeDeletePriorOpcode(v, OP_Close) ){}
sqlite3VdbeAddOp0(v, OP_Halt);
#if SQLITE_USER_AUTHENTICATION
if( pParse->nTableLock>0 && db->init.busy==0 ){
sqlite3UserAuthInit(db);
if( db->auth.authLevel<UAUTH_User ){
- pParse->rc = SQLITE_AUTH_USER;
sqlite3ErrorMsg(pParse, "user not authenticated");
+ pParse->rc = SQLITE_AUTH_USER;
return;
}
}
@@ -97541,14 +98460,16 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
assert( sqlite3VdbeGetOp(v, 0)->opcode==OP_Init );
sqlite3VdbeJumpHere(v, 0);
for(iDb=0; iDb<db->nDb; iDb++){
+ Schema *pSchema;
if( DbMaskTest(pParse->cookieMask, iDb)==0 ) continue;
sqlite3VdbeUsesBtree(v, iDb);
+ pSchema = db->aDb[iDb].pSchema;
sqlite3VdbeAddOp4Int(v,
OP_Transaction, /* Opcode */
iDb, /* P1 */
DbMaskTest(pParse->writeMask,iDb), /* P2 */
- pParse->cookieValue[iDb], /* P3 */
- db->aDb[iDb].pSchema->iGeneration /* P4 */
+ pSchema->schema_cookie, /* P3 */
+ pSchema->iGeneration /* P4 */
);
if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1);
VdbeComment((v,
@@ -97599,16 +98520,6 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
}else{
pParse->rc = SQLITE_ERROR;
}
-
- /* We are done with this Parse object. There is no need to de-initialize it */
-#if 0
- pParse->colNamesSet = 0;
- pParse->nTab = 0;
- pParse->nMem = 0;
- pParse->nSet = 0;
- pParse->nVar = 0;
- DbMaskZero(pParse->cookieMask);
-#endif
}
/*
@@ -97628,8 +98539,7 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){
char *zSql;
char *zErrMsg = 0;
sqlite3 *db = pParse->db;
-# define SAVE_SZ (sizeof(Parse) - offsetof(Parse,nVar))
- char saveBuf[SAVE_SZ];
+ char saveBuf[PARSE_TAIL_SZ];
if( pParse->nErr ) return;
assert( pParse->nested<10 ); /* Nesting should only be of limited depth */
@@ -97640,12 +98550,12 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){
return; /* A malloc must have failed */
}
pParse->nested++;
- memcpy(saveBuf, &pParse->nVar, SAVE_SZ);
- memset(&pParse->nVar, 0, SAVE_SZ);
+ memcpy(saveBuf, PARSE_TAIL(pParse), PARSE_TAIL_SZ);
+ memset(PARSE_TAIL(pParse), 0, PARSE_TAIL_SZ);
sqlite3RunParser(pParse, zSql, &zErrMsg);
sqlite3DbFree(db, zErrMsg);
sqlite3DbFree(db, zSql);
- memcpy(&pParse->nVar, saveBuf, SAVE_SZ);
+ memcpy(PARSE_TAIL(pParse), saveBuf, PARSE_TAIL_SZ);
pParse->nested--;
}
@@ -97686,10 +98596,11 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const cha
#endif
for(i=OMIT_TEMPDB; i<db->nDb; i++){
int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
- if( zDatabase!=0 && sqlite3StrICmp(zDatabase, db->aDb[j].zName) ) continue;
- assert( sqlite3SchemaMutexHeld(db, j, 0) );
- p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName);
- if( p ) break;
+ if( zDatabase==0 || sqlite3StrICmp(zDatabase, db->aDb[j].zDbSName)==0 ){
+ assert( sqlite3SchemaMutexHeld(db, j, 0) );
+ p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName);
+ if( p ) break;
+ }
}
return p;
}
@@ -97763,7 +98674,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTableItem(
assert( p->pSchema==0 || p->zDatabase==0 );
if( p->pSchema ){
int iDb = sqlite3SchemaToIndex(pParse->db, p->pSchema);
- zDb = pParse->db->aDb[iDb].zName;
+ zDb = pParse->db->aDb[iDb].zDbSName;
}else{
zDb = p->zDatabase;
}
@@ -97791,7 +98702,7 @@ SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3 *db, const char *zName, const cha
int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
Schema *pSchema = db->aDb[j].pSchema;
assert( pSchema );
- if( zDb && sqlite3StrICmp(zDb, db->aDb[j].zName) ) continue;
+ if( zDb && sqlite3StrICmp(zDb, db->aDb[j].zDbSName) ) continue;
assert( sqlite3SchemaMutexHeld(db, j, 0) );
p = sqlite3HashFind(&pSchema->idxHash, zName);
if( p ) break;
@@ -97860,8 +98771,8 @@ SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3 *db){
for(i=j=2; i<db->nDb; i++){
struct Db *pDb = &db->aDb[i];
if( pDb->pBt==0 ){
- sqlite3DbFree(db, pDb->zName);
- pDb->zName = 0;
+ sqlite3DbFree(db, pDb->zDbSName);
+ pDb->zDbSName = 0;
continue;
}
if( j<i ){
@@ -98081,7 +98992,7 @@ SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *db, const char *zName){
if( zName ){
Db *pDb;
for(i=(db->nDb-1), pDb=&db->aDb[i]; i>=0; i--, pDb--){
- if( 0==sqlite3StrICmp(pDb->zName, zName) ) break;
+ if( 0==sqlite3StrICmp(pDb->zDbSName, zName) ) break;
}
}
return i;
@@ -98140,7 +99051,7 @@ SQLITE_PRIVATE int sqlite3TwoPartName(
return -1;
}
}else{
- assert( db->init.iDb==0 || db->init.busy );
+ assert( db->init.iDb==0 || db->init.busy || (db->flags & SQLITE_Vacuum)!=0);
iDb = db->init.iDb;
*pUnqual = pName1;
}
@@ -98251,7 +99162,7 @@ SQLITE_PRIVATE void sqlite3StartTable(
SQLITE_CREATE_VIEW,
SQLITE_CREATE_TEMP_VIEW
};
- char *zDb = db->aDb[iDb].zName;
+ char *zDb = db->aDb[iDb].zDbSName;
if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(isTemp), 0, zDb) ){
goto begin_table_error;
}
@@ -98270,7 +99181,7 @@ SQLITE_PRIVATE void sqlite3StartTable(
** collisions.
*/
if( !IN_DECLARE_VTAB ){
- char *zDb = db->aDb[iDb].zName;
+ char *zDb = db->aDb[iDb].zDbSName;
if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){
goto begin_table_error;
}
@@ -98825,6 +99736,9 @@ SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char *zName){
** set back to prior value. But schema changes are infrequent
** and the probability of hitting the same cookie value is only
** 1 chance in 2^32. So we're safe enough.
+**
+** IMPLEMENTATION-OF: R-34230-56049 SQLite automatically increments
+** the schema-version whenever the schema changes.
*/
SQLITE_PRIVATE void sqlite3ChangeCookie(Parse *pParse, int iDb){
sqlite3 *db = pParse->db;
@@ -99363,7 +100277,7 @@ SQLITE_PRIVATE void sqlite3EndTable(
"UPDATE %Q.%s "
"SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q "
"WHERE rowid=#%d",
- db->aDb[iDb].zName, SCHEMA_TABLE(iDb),
+ db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb),
zType,
p->zName,
p->zName,
@@ -99378,13 +100292,13 @@ SQLITE_PRIVATE void sqlite3EndTable(
/* Check to see if we need to create an sqlite_sequence table for
** keeping track of autoincrement keys.
*/
- if( p->tabFlags & TF_Autoincrement ){
+ if( (p->tabFlags & TF_Autoincrement)!=0 ){
Db *pDb = &db->aDb[iDb];
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
if( pDb->pSchema->pSeqTab==0 ){
sqlite3NestedParse(pParse,
"CREATE TABLE %Q.sqlite_sequence(name,seq)",
- pDb->zName
+ pDb->zDbSName
);
}
}
@@ -99508,7 +100422,9 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
int nErr = 0; /* Number of errors encountered */
int n; /* Temporarily holds the number of cursors assigned */
sqlite3 *db = pParse->db; /* Database connection for malloc errors */
+#ifndef SQLITE_OMIT_AUTHORIZATION
sqlite3_xauth xAuth; /* Saved xAuth pointer */
+#endif
assert( pTable );
@@ -99698,7 +100614,7 @@ static void destroyRootPage(Parse *pParse, int iTable, int iDb){
*/
sqlite3NestedParse(pParse,
"UPDATE %Q.%s SET rootpage=%d WHERE #%d AND rootpage=#%d",
- pParse->db->aDb[iDb].zName, SCHEMA_TABLE(iDb), iTable, r1, r1);
+ pParse->db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), iTable, r1, r1);
#endif
sqlite3ReleaseTempReg(pParse, r1);
}
@@ -99774,7 +100690,7 @@ static void sqlite3ClearStatTables(
const char *zName /* Name of index or table */
){
int i;
- const char *zDbName = pParse->db->aDb[iDb].zName;
+ const char *zDbName = pParse->db->aDb[iDb].zDbSName;
for(i=1; i<=4; i++){
char zTab[24];
sqlite3_snprintf(sizeof(zTab),zTab,"sqlite_stat%d",i);
@@ -99827,7 +100743,7 @@ SQLITE_PRIVATE void sqlite3CodeDropTable(Parse *pParse, Table *pTab, int iDb, in
if( pTab->tabFlags & TF_Autoincrement ){
sqlite3NestedParse(pParse,
"DELETE FROM %Q.sqlite_sequence WHERE name=%Q",
- pDb->zName, pTab->zName
+ pDb->zDbSName, pTab->zName
);
}
#endif
@@ -99841,7 +100757,7 @@ SQLITE_PRIVATE void sqlite3CodeDropTable(Parse *pParse, Table *pTab, int iDb, in
*/
sqlite3NestedParse(pParse,
"DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'",
- pDb->zName, SCHEMA_TABLE(iDb), pTab->zName);
+ pDb->zDbSName, SCHEMA_TABLE(iDb), pTab->zName);
if( !isView && !IsVirtual(pTab) ){
destroyTable(pParse, pTab);
}
@@ -99895,7 +100811,7 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView,
{
int code;
const char *zTab = SCHEMA_TABLE(iDb);
- const char *zDb = db->aDb[iDb].zName;
+ const char *zDb = db->aDb[iDb].zDbSName;
const char *zArg2 = 0;
if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb)){
goto exit_drop_table;
@@ -100136,7 +101052,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){
#ifndef SQLITE_OMIT_AUTHORIZATION
if( sqlite3AuthCheck(pParse, SQLITE_REINDEX, pIndex->zName, 0,
- db->aDb[iDb].zName ) ){
+ db->aDb[iDb].zDbSName ) ){
return;
}
#endif
@@ -100388,7 +101304,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
goto exit_create_index;
}
}
- if( sqlite3FindIndex(db, zName, pDb->zName)!=0 ){
+ if( sqlite3FindIndex(db, zName, pDb->zDbSName)!=0 ){
if( !ifNotExist ){
sqlite3ErrorMsg(pParse, "index %s already exists", zName);
}else{
@@ -100418,7 +101334,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
*/
#ifndef SQLITE_OMIT_AUTHORIZATION
{
- const char *zDb = pDb->zName;
+ const char *zDb = pDb->zDbSName;
if( sqlite3AuthCheck(pParse, SQLITE_INSERT, SCHEMA_TABLE(iDb), 0, zDb) ){
goto exit_create_index;
}
@@ -100733,7 +101649,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
*/
sqlite3NestedParse(pParse,
"INSERT INTO %Q.%s VALUES('index',%Q,%Q,#%d,%Q);",
- db->aDb[iDb].zName, SCHEMA_TABLE(iDb),
+ db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb),
pIndex->zName,
pTab->zName,
iMem,
@@ -100867,7 +101783,7 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists
{
int code = SQLITE_DROP_INDEX;
Table *pTab = pIndex->pTable;
- const char *zDb = db->aDb[iDb].zName;
+ const char *zDb = db->aDb[iDb].zDbSName;
const char *zTab = SCHEMA_TABLE(iDb);
if( sqlite3AuthCheck(pParse, SQLITE_DELETE, zTab, 0, zDb) ){
goto exit_drop_index;
@@ -100885,7 +101801,7 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists
sqlite3BeginWriteOperation(pParse, 1, iDb);
sqlite3NestedParse(pParse,
"DELETE FROM %Q.%s WHERE name=%Q AND type='index'",
- db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pIndex->zName
+ db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), pIndex->zName
);
sqlite3ClearStatTables(pParse, iDb, "idx", pIndex->zName);
sqlite3ChangeCookie(pParse, iDb);
@@ -101406,15 +102322,13 @@ SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *pParse){
*/
SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse *pParse, int iDb){
Parse *pToplevel = sqlite3ParseToplevel(pParse);
- sqlite3 *db = pToplevel->db;
- assert( iDb>=0 && iDb<db->nDb );
- assert( db->aDb[iDb].pBt!=0 || iDb==1 );
+ assert( iDb>=0 && iDb<pParse->db->nDb );
+ assert( pParse->db->aDb[iDb].pBt!=0 || iDb==1 );
assert( iDb<SQLITE_MAX_ATTACHED+2 );
- assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
+ assert( sqlite3SchemaMutexHeld(pParse->db, iDb, 0) );
if( DbMaskTest(pToplevel->cookieMask, iDb)==0 ){
DbMaskSet(pToplevel->cookieMask, iDb);
- pToplevel->cookieValue[iDb] = db->aDb[iDb].pSchema->schema_cookie;
if( !OMIT_TEMPDB && iDb==1 ){
sqlite3OpenTempDatabase(pToplevel);
}
@@ -101430,7 +102344,7 @@ SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse *pParse, const char *zDb)
int i;
for(i=0; i<db->nDb; i++){
Db *pDb = &db->aDb[i];
- if( pDb->pBt && (!zDb || 0==sqlite3StrICmp(zDb, pDb->zName)) ){
+ if( pDb->pBt && (!zDb || 0==sqlite3StrICmp(zDb, pDb->zDbSName)) ){
sqlite3CodeVerifySchema(pParse, i);
}
}
@@ -101677,7 +102591,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){
if( iDb<0 ) return;
z = sqlite3NameFromToken(db, pObjName);
if( z==0 ) return;
- zDb = db->aDb[iDb].zName;
+ zDb = db->aDb[iDb].zDbSName;
pTab = sqlite3FindTable(db, z, zDb);
if( pTab ){
reindexTable(pParse, pTab, 0);
@@ -102391,7 +103305,7 @@ SQLITE_PRIVATE void sqlite3MaterializeView(
if( pFrom ){
assert( pFrom->nSrc==1 );
pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName);
- pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zName);
+ pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName);
assert( pFrom->a[0].pOn==0 );
assert( pFrom->a[0].pUsing==0 );
}
@@ -102501,7 +103415,6 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
){
Vdbe *v; /* The virtual database engine */
Table *pTab; /* The table from which records will be deleted */
- const char *zDb; /* Name of database holding pTab */
int i; /* Loop counter */
WhereInfo *pWInfo; /* Information about the WHERE clause */
Index *pIdx; /* For looping over indices of the table */
@@ -102578,8 +103491,8 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
}
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
assert( iDb<db->nDb );
- zDb = db->aDb[iDb].zName;
- rcauth = sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0, zDb);
+ rcauth = sqlite3AuthCheck(pParse, SQLITE_DELETE, pTab->zName, 0,
+ db->aDb[iDb].zDbSName);
assert( rcauth==SQLITE_OK || rcauth==SQLITE_DENY || rcauth==SQLITE_IGNORE );
if( rcauth==SQLITE_DENY ){
goto delete_from_cleanup;
@@ -102763,7 +103676,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
if( !isView ){
int iAddrOnce = 0;
if( eOnePass==ONEPASS_MULTI ){
- iAddrOnce = sqlite3CodeOnce(pParse); VdbeCoverage(v);
+ iAddrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
}
testcase( IsVirtual(pTab) );
sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, OPFLAG_FORDELETE,
@@ -103941,14 +104854,14 @@ static int patternCompare(
/*
** The sqlite3_strglob() interface.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlobPattern, const char *zString){
+SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){
return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '[')==0;
}
/*
** The sqlite3_strlike() interface.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strlike(const char *zPattern, const char *zStr, unsigned int esc){
+SQLITE_API int sqlite3_strlike(const char *zPattern, const char *zStr, unsigned int esc){
return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc)==0;
}
@@ -105910,7 +106823,7 @@ SQLITE_PRIVATE void sqlite3FkCheck(
if( (db->flags&SQLITE_ForeignKeys)==0 ) return;
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
- zDb = db->aDb[iDb].zName;
+ zDb = db->aDb[iDb].zDbSName;
/* Loop through all the foreign key constraints for which pTab is the
** child table (the table that the foreign key definition is part of). */
@@ -106281,10 +107194,10 @@ static Trigger *fkActionTrigger(
if( pDflt ){
pNew = sqlite3ExprDup(db, pDflt, 0);
}else{
- pNew = sqlite3PExpr(pParse, TK_NULL, 0, 0, 0);
+ pNew = sqlite3ExprAlloc(db, TK_NULL, 0, 0);
}
}else{
- pNew = sqlite3PExpr(pParse, TK_NULL, 0, 0, 0);
+ pNew = sqlite3ExprAlloc(db, TK_NULL, 0, 0);
}
pList = sqlite3ExprListAppend(pParse, pList, pNew);
sqlite3ExprListSetName(pParse, pList, &tFromCol, 0);
@@ -106650,7 +107563,9 @@ static int readsTable(Parse *p, int iDb, Table *pTab){
/*
** Locate or create an AutoincInfo structure associated with table pTab
** which is in database iDb. Return the register number for the register
-** that holds the maximum rowid.
+** that holds the maximum rowid. Return zero if pTab is not an AUTOINCREMENT
+** table. (Also return zero when doing a VACUUM since we do not want to
+** update the AUTOINCREMENT counters during a VACUUM.)
**
** There is at most one AutoincInfo structure per table even if the
** same table is autoincremented multiple times due to inserts within
@@ -106673,7 +107588,9 @@ static int autoIncBegin(
Table *pTab /* The table we are writing to */
){
int memId = 0; /* Register holding maximum rowid */
- if( pTab->tabFlags & TF_Autoincrement ){
+ if( (pTab->tabFlags & TF_Autoincrement)!=0
+ && (pParse->db->flags & SQLITE_Vacuum)==0
+ ){
Parse *pToplevel = sqlite3ParseToplevel(pParse);
AutoincInfo *pInfo;
@@ -106931,7 +107848,6 @@ SQLITE_PRIVATE void sqlite3Insert(
sqlite3 *db; /* The main database structure */
Table *pTab; /* The table to insert into. aka TABLE */
char *zTab; /* Name of the table into which we are inserting */
- const char *zDb; /* Name of the database holding this table */
int i, j, idx; /* Loop counters */
Vdbe *v; /* Generate code into this virtual machine */
Index *pIdx; /* For looping over indices of the table */
@@ -106946,7 +107862,6 @@ SQLITE_PRIVATE void sqlite3Insert(
int addrCont = 0; /* Top of insert loop. Label "C" in templates 3 and 4 */
SelectDest dest; /* Destination for SELECT on rhs of INSERT */
int iDb; /* Index of database holding TABLE */
- Db *pDb; /* The database containing table being inserted into */
u8 useTempTable = 0; /* Store SELECT results in intermediate table */
u8 appendFlag = 0; /* True if the insert is likely to be an append */
u8 withoutRowid; /* 0 for normal table. 1 for WITHOUT ROWID table */
@@ -106996,9 +107911,8 @@ SQLITE_PRIVATE void sqlite3Insert(
}
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
assert( iDb<db->nDb );
- pDb = &db->aDb[iDb];
- zDb = pDb->zName;
- if( sqlite3AuthCheck(pParse, SQLITE_INSERT, pTab->zName, 0, zDb) ){
+ if( sqlite3AuthCheck(pParse, SQLITE_INSERT, pTab->zName, 0,
+ db->aDb[iDb].zDbSName) ){
goto insert_cleanup;
}
withoutRowid = !HasRowid(pTab);
@@ -108227,15 +109141,15 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices(
for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
int iIdxCur = iBase++;
assert( pIdx->pSchema==pTab->pSchema );
+ if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){
+ if( piDataCur ) *piDataCur = iIdxCur;
+ p5 = 0;
+ }
if( aToOpen==0 || aToOpen[i+1] ){
sqlite3VdbeAddOp3(v, op, iIdxCur, pIdx->tnum, iDb);
sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
- VdbeComment((v, "%s", pIdx->zName));
- }
- if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){
- if( piDataCur ) *piDataCur = iIdxCur;
- }else{
sqlite3VdbeChangeP5(v, p5);
+ VdbeComment((v, "%s", pIdx->zName));
}
}
if( iBase>pParse->nTab ) pParse->nTab = iBase;
@@ -108626,6 +109540,7 @@ static int xferOptimization(
sqlite3ReleaseTempReg(pParse, regRowid);
sqlite3ReleaseTempReg(pParse, regData);
if( emptyDestTest ){
+ sqlite3AutoincrementEnd(pParse);
sqlite3VdbeAddOp2(v, OP_Halt, SQLITE_OK, 0);
sqlite3VdbeJumpHere(v, emptyDestTest);
sqlite3VdbeAddOp2(v, OP_Close, iDest, 0);
@@ -108667,7 +109582,7 @@ static int xferOptimization(
** argument to xCallback(). If xCallback=NULL then no callback
** is invoked, even for queries.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_exec(
+SQLITE_API int sqlite3_exec(
sqlite3 *db, /* The database on which the SQL executes */
const char *zSql, /* The SQL to be executed */
sqlite3_callback xCallback, /* Invoke this callback routine */
@@ -109929,7 +110844,7 @@ static int sqlite3LoadExtension(
db->aExtension[db->nExtension++] = handle;
return SQLITE_OK;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
+SQLITE_API int sqlite3_load_extension(
sqlite3 *db, /* Load the extension into this database connection */
const char *zFile, /* Name of the shared library containing extension */
const char *zProc, /* Entry point. Use "sqlite3_extension_init" if 0 */
@@ -109960,7 +110875,7 @@ SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3 *db){
** Enable or disable extension loading. Extension loading is disabled by
** default so as not to open security holes in older applications.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff){
+SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff){
sqlite3_mutex_enter(db->mutex);
if( onoff ){
db->flags |= SQLITE_LoadExtension|SQLITE_LoadExtFunc;
@@ -109971,18 +110886,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono
return SQLITE_OK;
}
-#endif /* SQLITE_OMIT_LOAD_EXTENSION */
-
-/*
-** The auto-extension code added regardless of whether or not extension
-** loading is supported. We need a dummy sqlite3Apis pointer for that
-** code if regular extension loading is not available. This is that
-** dummy pointer.
-*/
-#ifdef SQLITE_OMIT_LOAD_EXTENSION
-static const sqlite3_api_routines sqlite3Apis = { 0 };
-#endif
-
+#endif /* !defined(SQLITE_OMIT_LOAD_EXTENSION) */
/*
** The following object holds the list of automatically loaded
@@ -110017,7 +110921,7 @@ static SQLITE_WSD struct sqlite3AutoExtList {
** Register a statically linked extension that is automatically
** loaded by every new database connection.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(
+SQLITE_API int sqlite3_auto_extension(
void (*xInit)(void)
){
int rc = SQLITE_OK;
@@ -110064,7 +110968,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(
** Return 1 if xInit was found on the list and removed. Return 0 if xInit
** was not on the list.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(
+SQLITE_API int sqlite3_cancel_auto_extension(
void (*xInit)(void)
){
#if SQLITE_THREADSAFE
@@ -110089,7 +110993,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(
/*
** Reset the automatic extension loading mechanism.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void){
+SQLITE_API void sqlite3_reset_auto_extension(void){
#ifndef SQLITE_OMIT_AUTOINIT
if( sqlite3_initialize()==SQLITE_OK )
#endif
@@ -110127,6 +111031,11 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
#if SQLITE_THREADSAFE
sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
#endif
+#ifdef SQLITE_OMIT_LOAD_EXTENSION
+ const sqlite3_api_routines *pThunk = 0;
+#else
+ const sqlite3_api_routines *pThunk = &sqlite3Apis;
+#endif
sqlite3_mutex_enter(mutex);
if( i>=wsdAutoext.nExt ){
xInit = 0;
@@ -110136,7 +111045,7 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
}
sqlite3_mutex_leave(mutex);
zErrmsg = 0;
- if( xInit && (rc = xInit(db, &zErrmsg, &sqlite3Apis))!=0 ){
+ if( xInit && (rc = xInit(db, &zErrmsg, pThunk))!=0 ){
sqlite3ErrorWithMsg(db, rc,
"automatic extension loading failed: %s", zErrmsg);
go = 0;
@@ -110955,7 +111864,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
assert( pId2 );
- zDb = pId2->n>0 ? pDb->zName : 0;
+ zDb = pId2->n>0 ? pDb->zDbSName : 0;
if( sqlite3AuthCheck(pParse, SQLITE_PRAGMA, zLeft, zRight, zDb) ){
goto pragma_out;
}
@@ -111808,10 +112717,10 @@ SQLITE_PRIVATE void sqlite3Pragma(
setAllColumnNames(v, 3, azCol); assert( 3==ArraySize(azCol) );
for(i=0; i<db->nDb; i++){
if( db->aDb[i].pBt==0 ) continue;
- assert( db->aDb[i].zName!=0 );
+ assert( db->aDb[i].zDbSName!=0 );
sqlite3VdbeMultiLoad(v, 1, "iss",
i,
- db->aDb[i].zName,
+ db->aDb[i].zDbSName,
sqlite3BtreeGetFilename(db->aDb[i].pBt));
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3);
}
@@ -112100,7 +113009,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
sqlite3VdbeChangeP5(v, (u8)i);
addr = sqlite3VdbeAddOp1(v, OP_IsNull, 2); VdbeCoverage(v);
sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0,
- sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zName),
+ sqlite3MPrintf(db, "*** in database %s ***\n", db->aDb[i].zDbSName),
P4_DYNAMIC);
sqlite3VdbeAddOp3(v, OP_Move, 2, 4, 1);
sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 2);
@@ -112539,15 +113448,15 @@ SQLITE_PRIVATE void sqlite3Pragma(
Btree *pBt;
const char *zState = "unknown";
int j;
- if( db->aDb[i].zName==0 ) continue;
+ if( db->aDb[i].zDbSName==0 ) continue;
pBt = db->aDb[i].pBt;
if( pBt==0 || sqlite3BtreePager(pBt)==0 ){
zState = "closed";
- }else if( sqlite3_file_control(db, i ? db->aDb[i].zName : 0,
+ }else if( sqlite3_file_control(db, i ? db->aDb[i].zDbSName : 0,
SQLITE_FCNTL_LOCKSTATE, &j)==SQLITE_OK ){
zState = azLockName[j];
}
- sqlite3VdbeMultiLoad(v, 1, "ss", db->aDb[i].zName, zState);
+ sqlite3VdbeMultiLoad(v, 1, "ss", db->aDb[i].zDbSName, zState);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 2);
}
break;
@@ -112683,6 +113592,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char
** structures that describe the table, index, or view.
*/
int rc;
+ u8 saved_iDb = db->init.iDb;
sqlite3_stmt *pStmt;
TESTONLY(int rcp); /* Return code from sqlite3_prepare() */
@@ -112693,7 +113603,8 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char
TESTONLY(rcp = ) sqlite3_prepare(db, argv[2], -1, &pStmt, 0);
rc = db->errCode;
assert( (rc&0xFF)==(rcp&0xFF) );
- db->init.iDb = 0;
+ db->init.iDb = saved_iDb;
+ assert( saved_iDb==0 || (db->flags & SQLITE_Vacuum)!=0 );
if( SQLITE_OK!=rc ){
if( db->init.orphanTrigger ){
assert( iDb==1 );
@@ -112717,7 +113628,7 @@ SQLITE_PRIVATE int sqlite3InitCallback(void *pInit, int argc, char **argv, char
** to do here is record the root page number for that index.
*/
Index *pIndex;
- pIndex = sqlite3FindIndex(db, argv[0], db->aDb[iDb].zName);
+ pIndex = sqlite3FindIndex(db, argv[0], db->aDb[iDb].zDbSName);
if( pIndex==0 ){
/* This can occur if there exists an index on a TEMP table which
** has the same name as another index on a permanent index. Since
@@ -112896,7 +113807,7 @@ static int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg){
char *zSql;
zSql = sqlite3MPrintf(db,
"SELECT name, rootpage, sql FROM \"%w\".%s ORDER BY rowid",
- db->aDb[iDb].zName, zMasterName);
+ db->aDb[iDb].zDbSName, zMasterName);
#ifndef SQLITE_OMIT_AUTHORIZATION
{
sqlite3_xauth xAuth;
@@ -113126,18 +114037,14 @@ static int sqlite3Prepare(
sqlite3_stmt **ppStmt, /* OUT: A pointer to the prepared statement */
const char **pzTail /* OUT: End of parsed string */
){
- Parse *pParse; /* Parsing context */
char *zErrMsg = 0; /* Error message */
int rc = SQLITE_OK; /* Result code */
int i; /* Loop counter */
+ Parse sParse; /* Parsing context */
- /* Allocate the parsing context */
- pParse = sqlite3StackAllocZero(db, sizeof(*pParse));
- if( pParse==0 ){
- rc = SQLITE_NOMEM_BKPT;
- goto end_prepare;
- }
- pParse->pReprepare = pReprepare;
+ memset(&sParse, 0, PARSE_HDR_SZ);
+ memset(PARSE_TAIL(&sParse), 0, PARSE_TAIL_SZ);
+ sParse.pReprepare = pReprepare;
assert( ppStmt && *ppStmt==0 );
/* assert( !db->mallocFailed ); // not true with SQLITE_USE_ALLOCA */
assert( sqlite3_mutex_held(db->mutex) );
@@ -113171,7 +114078,7 @@ static int sqlite3Prepare(
assert( sqlite3BtreeHoldsMutex(pBt) );
rc = sqlite3BtreeSchemaLocked(pBt);
if( rc ){
- const char *zDb = db->aDb[i].zName;
+ const char *zDb = db->aDb[i].zDbSName;
sqlite3ErrorWithMsg(db, rc, "database schema is locked: %s", zDb);
testcase( db->flags & SQLITE_ReadUncommitted );
goto end_prepare;
@@ -113181,8 +114088,7 @@ static int sqlite3Prepare(
sqlite3VtabUnlockList(db);
- pParse->db = db;
- pParse->nQueryLoop = 0; /* Logarithmic, so 0 really means 1 */
+ sParse.db = db;
if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){
char *zSqlCopy;
int mxLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH];
@@ -113195,61 +114101,61 @@ static int sqlite3Prepare(
}
zSqlCopy = sqlite3DbStrNDup(db, zSql, nBytes);
if( zSqlCopy ){
- sqlite3RunParser(pParse, zSqlCopy, &zErrMsg);
- pParse->zTail = &zSql[pParse->zTail-zSqlCopy];
+ sqlite3RunParser(&sParse, zSqlCopy, &zErrMsg);
+ sParse.zTail = &zSql[sParse.zTail-zSqlCopy];
sqlite3DbFree(db, zSqlCopy);
}else{
- pParse->zTail = &zSql[nBytes];
+ sParse.zTail = &zSql[nBytes];
}
}else{
- sqlite3RunParser(pParse, zSql, &zErrMsg);
+ sqlite3RunParser(&sParse, zSql, &zErrMsg);
}
- assert( 0==pParse->nQueryLoop );
+ assert( 0==sParse.nQueryLoop );
- if( pParse->rc==SQLITE_DONE ) pParse->rc = SQLITE_OK;
- if( pParse->checkSchema ){
- schemaIsValid(pParse);
+ if( sParse.rc==SQLITE_DONE ) sParse.rc = SQLITE_OK;
+ if( sParse.checkSchema ){
+ schemaIsValid(&sParse);
}
if( db->mallocFailed ){
- pParse->rc = SQLITE_NOMEM_BKPT;
+ sParse.rc = SQLITE_NOMEM_BKPT;
}
if( pzTail ){
- *pzTail = pParse->zTail;
+ *pzTail = sParse.zTail;
}
- rc = pParse->rc;
+ rc = sParse.rc;
#ifndef SQLITE_OMIT_EXPLAIN
- if( rc==SQLITE_OK && pParse->pVdbe && pParse->explain ){
+ if( rc==SQLITE_OK && sParse.pVdbe && sParse.explain ){
static const char * const azColName[] = {
"addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment",
"selectid", "order", "from", "detail"
};
int iFirst, mx;
- if( pParse->explain==2 ){
- sqlite3VdbeSetNumCols(pParse->pVdbe, 4);
+ if( sParse.explain==2 ){
+ sqlite3VdbeSetNumCols(sParse.pVdbe, 4);
iFirst = 8;
mx = 12;
}else{
- sqlite3VdbeSetNumCols(pParse->pVdbe, 8);
+ sqlite3VdbeSetNumCols(sParse.pVdbe, 8);
iFirst = 0;
mx = 8;
}
for(i=iFirst; i<mx; i++){
- sqlite3VdbeSetColName(pParse->pVdbe, i-iFirst, COLNAME_NAME,
+ sqlite3VdbeSetColName(sParse.pVdbe, i-iFirst, COLNAME_NAME,
azColName[i], SQLITE_STATIC);
}
}
#endif
if( db->init.busy==0 ){
- Vdbe *pVdbe = pParse->pVdbe;
- sqlite3VdbeSetSql(pVdbe, zSql, (int)(pParse->zTail-zSql), saveSqlFlag);
+ Vdbe *pVdbe = sParse.pVdbe;
+ sqlite3VdbeSetSql(pVdbe, zSql, (int)(sParse.zTail-zSql), saveSqlFlag);
}
- if( pParse->pVdbe && (rc!=SQLITE_OK || db->mallocFailed) ){
- sqlite3VdbeFinalize(pParse->pVdbe);
+ if( sParse.pVdbe && (rc!=SQLITE_OK || db->mallocFailed) ){
+ sqlite3VdbeFinalize(sParse.pVdbe);
assert(!(*ppStmt));
}else{
- *ppStmt = (sqlite3_stmt*)pParse->pVdbe;
+ *ppStmt = (sqlite3_stmt*)sParse.pVdbe;
}
if( zErrMsg ){
@@ -113260,16 +114166,15 @@ static int sqlite3Prepare(
}
/* Delete any TriggerPrg structures allocated while parsing this statement. */
- while( pParse->pTriggerPrg ){
- TriggerPrg *pT = pParse->pTriggerPrg;
- pParse->pTriggerPrg = pT->pNext;
+ while( sParse.pTriggerPrg ){
+ TriggerPrg *pT = sParse.pTriggerPrg;
+ sParse.pTriggerPrg = pT->pNext;
sqlite3DbFree(db, pT);
}
end_prepare:
- sqlite3ParserReset(pParse);
- sqlite3StackFree(db, pParse);
+ sqlite3ParserReset(&sParse);
rc = sqlite3ApiExit(db, rc);
assert( (rc&db->errMask)==rc );
return rc;
@@ -113350,7 +114255,7 @@ SQLITE_PRIVATE int sqlite3Reprepare(Vdbe *p){
** and the statement is automatically recompiled if an schema change
** occurs.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare(
+SQLITE_API int sqlite3_prepare(
sqlite3 *db, /* Database handle. */
const char *zSql, /* UTF-8 encoded SQL statement. */
int nBytes, /* Length of zSql in bytes. */
@@ -113362,7 +114267,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare(
assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare_v2(
+SQLITE_API int sqlite3_prepare_v2(
sqlite3 *db, /* Database handle. */
const char *zSql, /* UTF-8 encoded SQL statement. */
int nBytes, /* Length of zSql in bytes. */
@@ -113438,7 +114343,7 @@ static int sqlite3Prepare16(
** and the statement is automatically recompiled if an schema change
** occurs.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16(
+SQLITE_API int sqlite3_prepare16(
sqlite3 *db, /* Database handle. */
const void *zSql, /* UTF-16 encoded SQL statement. */
int nBytes, /* Length of zSql in bytes. */
@@ -113450,7 +114355,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16(
assert( rc==SQLITE_OK || ppStmt==0 || *ppStmt==0 ); /* VERIFY: F13021 */
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
+SQLITE_API int sqlite3_prepare16_v2(
sqlite3 *db, /* Database handle. */
const void *zSql, /* UTF-16 encoded SQL statement. */
int nBytes, /* Length of zSql in bytes. */
@@ -113557,7 +114462,7 @@ static void clearSelect(sqlite3 *db, Select *p, int bFree){
SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest *pDest, int eDest, int iParm){
pDest->eDest = (u8)eDest;
pDest->iSDParm = iParm;
- pDest->affSdst = 0;
+ pDest->zAffSdst = 0;
pDest->iSdst = 0;
pDest->nSdst = 0;
}
@@ -114128,30 +115033,6 @@ static void codeDistinct(
sqlite3ReleaseTempReg(pParse, r1);
}
-#ifndef SQLITE_OMIT_SUBQUERY
-/*
-** Generate an error message when a SELECT is used within a subexpression
-** (example: "a IN (SELECT * FROM table)") but it has more than 1 result
-** column. We do this in a subroutine because the error used to occur
-** in multiple places. (The error only occurs in one place now, but we
-** retain the subroutine to minimize code disruption.)
-*/
-static int checkForMultiColumnSelectError(
- Parse *pParse, /* Parse context. */
- SelectDest *pDest, /* Destination of SELECT results */
- int nExpr /* Number of result columns returned by SELECT */
-){
- int eDest = pDest->eDest;
- if( nExpr>1 && (eDest==SRT_Mem || eDest==SRT_Set) ){
- sqlite3ErrorMsg(pParse, "only a single result allowed for "
- "a SELECT that is part of an expression");
- return 1;
- }else{
- return 0;
- }
-}
-#endif
-
/*
** This routine generates the code for the inside of the inner loop
** of a SELECT.
@@ -114361,19 +115242,19 @@ static void selectInnerLoop(
** item into the set table with bogus data.
*/
case SRT_Set: {
- assert( nResultCol==1 );
- pDest->affSdst =
- sqlite3CompareAffinity(pEList->a[0].pExpr, pDest->affSdst);
if( pSort ){
/* At first glance you would think we could optimize out the
** ORDER BY in this case since the order of entries in the set
** does not matter. But there might be a LIMIT clause, in which
** case the order does matter */
- pushOntoSorter(pParse, pSort, p, regResult, regResult, 1, nPrefixReg);
+ pushOntoSorter(
+ pParse, pSort, p, regResult, regResult, nResultCol, nPrefixReg);
}else{
int r1 = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult,1,r1, &pDest->affSdst, 1);
- sqlite3ExprCacheAffinityChange(pParse, regResult, 1);
+ assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol );
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol,
+ r1, pDest->zAffSdst, nResultCol);
+ sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol);
sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1);
sqlite3ReleaseTempReg(pParse, r1);
}
@@ -114389,13 +115270,14 @@ static void selectInnerLoop(
}
/* If this is a scalar select that is part of an expression, then
- ** store the results in the appropriate memory cell and break out
- ** of the scan loop.
+ ** store the results in the appropriate memory cell or array of
+ ** memory cells and break out of the scan loop.
*/
case SRT_Mem: {
- assert( nResultCol==1 );
+ assert( nResultCol==pDest->nSdst );
if( pSort ){
- pushOntoSorter(pParse, pSort, p, regResult, regResult, 1, nPrefixReg);
+ pushOntoSorter(
+ pParse, pSort, p, regResult, regResult, nResultCol, nPrefixReg);
}else{
assert( regResult==iParm );
/* The LIMIT clause will jump out of the loop for us */
@@ -114497,7 +115379,7 @@ static void selectInnerLoop(
*/
SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){
int nExtra = (N+X)*(sizeof(CollSeq*)+1);
- KeyInfo *p = sqlite3DbMallocRaw(db, sizeof(KeyInfo) + nExtra);
+ KeyInfo *p = sqlite3DbMallocRawNN(db, sizeof(KeyInfo) + nExtra);
if( p ){
p->aSortOrder = (u8*)&p->aColl[N+X];
p->nField = (u16)N;
@@ -114710,21 +115592,21 @@ static void generateSortTail(
sqlite3VdbeResolveLabel(v, pSort->labelBkOut);
}
iTab = pSort->iECursor;
- if( eDest==SRT_Output || eDest==SRT_Coroutine ){
+ if( eDest==SRT_Output || eDest==SRT_Coroutine || eDest==SRT_Mem ){
regRowid = 0;
regRow = pDest->iSdst;
nSortData = nColumn;
}else{
regRowid = sqlite3GetTempReg(pParse);
- regRow = sqlite3GetTempReg(pParse);
- nSortData = 1;
+ regRow = sqlite3GetTempRange(pParse, nColumn);
+ nSortData = nColumn;
}
nKey = pOrderBy->nExpr - pSort->nOBSat;
if( pSort->sortFlags & SORTFLAG_UseSorter ){
int regSortOut = ++pParse->nMem;
iSortTab = pParse->nTab++;
if( pSort->labelBkOut ){
- addrOnce = sqlite3CodeOnce(pParse); VdbeCoverage(v);
+ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
}
sqlite3VdbeAddOp3(v, OP_OpenPseudo, iSortTab, regSortOut, nKey+1+nSortData);
if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce);
@@ -114752,16 +115634,14 @@ static void generateSortTail(
}
#ifndef SQLITE_OMIT_SUBQUERY
case SRT_Set: {
- assert( nColumn==1 );
- sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, 1, regRowid,
- &pDest->affSdst, 1);
- sqlite3ExprCacheAffinityChange(pParse, regRow, 1);
+ assert( nColumn==sqlite3Strlen30(pDest->zAffSdst) );
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, nColumn, regRowid,
+ pDest->zAffSdst, nColumn);
+ sqlite3ExprCacheAffinityChange(pParse, regRow, nColumn);
sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, regRowid);
break;
}
case SRT_Mem: {
- assert( nColumn==1 );
- sqlite3ExprCodeMove(pParse, regRow, iParm, 1);
/* The LIMIT clause will terminate the loop for us */
break;
}
@@ -114780,7 +115660,11 @@ static void generateSortTail(
}
}
if( regRowid ){
- sqlite3ReleaseTempReg(pParse, regRow);
+ if( eDest==SRT_Set ){
+ sqlite3ReleaseTempRange(pParse, regRow, nColumn);
+ }else{
+ sqlite3ReleaseTempReg(pParse, regRow);
+ }
sqlite3ReleaseTempReg(pParse, regRowid);
}
/* The bottom of the loop
@@ -114927,7 +115811,7 @@ static const char *columnTypeImpl(
zOrigTab = pTab->zName;
if( pNC->pParse ){
int iDb = sqlite3SchemaToIndex(pNC->pParse->db, pTab->pSchema);
- zOrigDb = pNC->pParse->db->aDb[iDb].zName;
+ zOrigDb = pNC->pParse->db->aDb[iDb].zDbSName;
}
#else
if( iCol<0 ){
@@ -115282,7 +116166,7 @@ SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){
*/
static SQLITE_NOINLINE Vdbe *allocVdbe(Parse *pParse){
Vdbe *v = pParse->pVdbe = sqlite3VdbeCreate(pParse);
- if( v ) sqlite3VdbeAddOp0(v, OP_Init);
+ if( v ) sqlite3VdbeAddOp2(v, OP_Init, 0, 1);
if( pParse->pToplevel==0
&& OptimizationEnabled(pParse->db,SQLITE_FactorOutConst)
){
@@ -116121,18 +117005,15 @@ static int generateOutputSubroutine(
}
#ifndef SQLITE_OMIT_SUBQUERY
- /* If we are creating a set for an "expr IN (SELECT ...)" construct,
- ** then there should be a single item on the stack. Write this
- ** item into the set table with bogus data.
+ /* If we are creating a set for an "expr IN (SELECT ...)".
*/
case SRT_Set: {
int r1;
- assert( pIn->nSdst==1 || pParse->nErr>0 );
- pDest->affSdst =
- sqlite3CompareAffinity(p->pEList->a[0].pExpr, pDest->affSdst);
+ testcase( pIn->nSdst>1 );
r1 = sqlite3GetTempReg(pParse);
- sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, 1, r1, &pDest->affSdst,1);
- sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, 1);
+ sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst,
+ r1, pDest->zAffSdst, pIn->nSdst);
+ sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, pIn->nSdst);
sqlite3VdbeAddOp2(v, OP_IdxInsert, pDest->iSDParm, r1);
sqlite3ReleaseTempReg(pParse, r1);
break;
@@ -117188,12 +118069,13 @@ static int flattenSubquery(
assert( pParent->pHaving==0 );
pParent->pHaving = pParent->pWhere;
pParent->pWhere = pWhere;
- pParent->pHaving = sqlite3ExprAnd(db, pParent->pHaving,
- sqlite3ExprDup(db, pSub->pHaving, 0));
+ pParent->pHaving = sqlite3ExprAnd(db,
+ sqlite3ExprDup(db, pSub->pHaving, 0), pParent->pHaving
+ );
assert( pParent->pGroupBy==0 );
pParent->pGroupBy = sqlite3ExprListDup(db, pSub->pGroupBy, 0);
}else{
- pParent->pWhere = sqlite3ExprAnd(db, pParent->pWhere, pWhere);
+ pParent->pWhere = sqlite3ExprAnd(db, pWhere, pParent->pWhere);
}
substSelect(db, pParent, iParent, pSub->pEList, 0);
@@ -117883,7 +118765,7 @@ static int selectExpander(Walker *pWalker, Select *p){
continue;
}
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
- zSchemaName = iDb>=0 ? db->aDb[iDb].zName : "*";
+ zSchemaName = iDb>=0 ? db->aDb[iDb].zDbSName : "*";
}
for(j=0; j<pTab->nCol; j++){
char *zName = pTab->aCol[j].zName;
@@ -118366,16 +119248,6 @@ SQLITE_PRIVATE int sqlite3Select(
}
#endif
-
- /* If writing to memory or generating a set
- ** only a single column may be output.
- */
-#ifndef SQLITE_OMIT_SUBQUERY
- if( checkForMultiColumnSelectError(pParse, pDest, p->pEList->nExpr) ){
- goto select_end;
- }
-#endif
-
/* Try to flatten subqueries in the FROM clause up into the main query
*/
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
@@ -118530,7 +119402,7 @@ SQLITE_PRIVATE int sqlite3Select(
/* If the subquery is not correlated and if we are not inside of
** a trigger, then we only need to compute the value of the subquery
** once. */
- onceAddr = sqlite3CodeOnce(pParse); VdbeCoverage(v);
+ onceAddr = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
VdbeComment((v, "materialize \"%s\"", pItem->pTab->zName));
}else{
VdbeNoopComment((v, "materialize \"%s\"", pItem->pTab->zName));
@@ -119293,7 +120165,7 @@ malloc_failed:
** Instead, the entire table should be passed to sqlite3_free_table() when
** the calling procedure is finished using it.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
+SQLITE_API int sqlite3_get_table(
sqlite3 *db, /* The database on which the SQL executes */
const char *zSql, /* The SQL to be executed */
char ***pazResult, /* Write the result table here */
@@ -119362,7 +120234,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
/*
** This routine frees the space the sqlite3_get_table() malloced.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_free_table(
+SQLITE_API void sqlite3_free_table(
char **azResult /* Result returned from sqlite3_get_table() */
){
if( azResult ){
@@ -119477,7 +120349,6 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
int iDb; /* The database to store the trigger in */
Token *pName; /* The unqualified db name */
DbFixer sFix; /* State vector for the DB fixer */
- int iTabDb; /* Index of the database holding pTab */
assert( pName1!=0 ); /* pName1->z might be NULL, but not pName1 itself */
assert( pName2!=0 );
@@ -119590,13 +120461,13 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
" trigger on table: %S", pTableName, 0);
goto trigger_cleanup;
}
- iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema);
#ifndef SQLITE_OMIT_AUTHORIZATION
{
+ int iTabDb = sqlite3SchemaToIndex(db, pTab->pSchema);
int code = SQLITE_CREATE_TRIGGER;
- const char *zDb = db->aDb[iTabDb].zName;
- const char *zDbTrig = isTemp ? db->aDb[1].zName : zDb;
+ const char *zDb = db->aDb[iTabDb].zDbSName;
+ const char *zDbTrig = isTemp ? db->aDb[1].zDbSName : zDb;
if( iTabDb==1 || isTemp ) code = SQLITE_CREATE_TEMP_TRIGGER;
if( sqlite3AuthCheck(pParse, code, zName, pTab->zName, zDbTrig) ){
goto trigger_cleanup;
@@ -119690,7 +120561,7 @@ SQLITE_PRIVATE void sqlite3FinishTrigger(
z = sqlite3DbStrNDup(db, (char*)pAll->z, pAll->n);
sqlite3NestedParse(pParse,
"INSERT INTO %Q.%s VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')",
- db->aDb[iDb].zName, SCHEMA_TABLE(iDb), zName,
+ db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), zName,
pTrig->table, z);
sqlite3DbFree(db, z);
sqlite3ChangeCookie(pParse, iDb);
@@ -119879,7 +120750,7 @@ SQLITE_PRIVATE void sqlite3DropTrigger(Parse *pParse, SrcList *pName, int noErr)
assert( zDb!=0 || sqlite3BtreeHoldsAllMutexes(db) );
for(i=OMIT_TEMPDB; i<db->nDb; i++){
int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
- if( zDb && sqlite3StrICmp(db->aDb[j].zName, zDb) ) continue;
+ if( zDb && sqlite3StrICmp(db->aDb[j].zDbSName, zDb) ) continue;
assert( sqlite3SchemaMutexHeld(db, j, 0) );
pTrigger = sqlite3HashFind(&(db->aDb[j].pSchema->trigHash), zName);
if( pTrigger ) break;
@@ -119925,7 +120796,7 @@ SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){
#ifndef SQLITE_OMIT_AUTHORIZATION
{
int code = SQLITE_DROP_TRIGGER;
- const char *zDb = db->aDb[iDb].zName;
+ const char *zDb = db->aDb[iDb].zDbSName;
const char *zTab = SCHEMA_TABLE(iDb);
if( iDb==1 ) code = SQLITE_DROP_TEMP_TRIGGER;
if( sqlite3AuthCheck(pParse, code, pTrigger->zName, pTable->zName, zDb) ||
@@ -119941,7 +120812,7 @@ SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){
if( (v = sqlite3GetVdbe(pParse))!=0 ){
sqlite3NestedParse(pParse,
"DELETE FROM %Q.%s WHERE name=%Q AND type='trigger'",
- db->aDb[iDb].zName, SCHEMA_TABLE(iDb), pTrigger->zName
+ db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), pTrigger->zName
);
sqlite3ChangeCookie(pParse, iDb);
sqlite3VdbeAddOp4(v, OP_DropTrigger, iDb, 0, 0, pTrigger->zName, 0);
@@ -120044,8 +120915,10 @@ static SrcList *targetSrcList(
pSrc->a[pSrc->nSrc-1].zName = sqlite3DbStrDup(db, pStep->zTarget);
iDb = sqlite3SchemaToIndex(db, pStep->pTrig->pSchema);
if( iDb==0 || iDb>=2 ){
+ const char *zDb;
assert( iDb<db->nDb );
- pSrc->a[pSrc->nSrc-1].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zName);
+ zDb = db->aDb[iDb].zDbSName;
+ pSrc->a[pSrc->nSrc-1].zDatabase = sqlite3DbStrDup(db, zDb);
}
}
return pSrc;
@@ -120259,7 +121132,6 @@ static TriggerPrg *codeRowTrigger(
}
pProgram->nMem = pSubParse->nMem;
pProgram->nCsr = pSubParse->nTab;
- pProgram->nOnce = pSubParse->nOnce;
pProgram->token = (void *)pTrigger;
pPrg->aColmask[0] = pSubParse->oldmask;
pPrg->aColmask[1] = pSubParse->newmask;
@@ -120732,7 +121604,7 @@ SQLITE_PRIVATE void sqlite3Update(
int rc;
rc = sqlite3AuthCheck(pParse, SQLITE_UPDATE, pTab->zName,
j<0 ? "ROWID" : pTab->aCol[j].zName,
- db->aDb[iDb].zName);
+ db->aDb[iDb].zDbSName);
if( rc==SQLITE_DENY ){
goto update_cleanup;
}else if( rc==SQLITE_IGNORE ){
@@ -121334,57 +122206,52 @@ static void updateVirtualTable(
/* #include "vdbeInt.h" */
#if !defined(SQLITE_OMIT_VACUUM) && !defined(SQLITE_OMIT_ATTACH)
-/*
-** Finalize a prepared statement. If there was an error, store the
-** text of the error message in *pzErrMsg. Return the result code.
-*/
-static int vacuumFinalize(sqlite3 *db, sqlite3_stmt *pStmt, char **pzErrMsg){
- int rc;
- rc = sqlite3VdbeFinalize((Vdbe*)pStmt);
- if( rc ){
- sqlite3SetString(pzErrMsg, db, sqlite3_errmsg(db));
- }
- return rc;
-}
/*
-** Execute zSql on database db. Return an error code.
+** Execute zSql on database db.
+**
+** If zSql returns rows, then each row will have exactly one
+** column. (This will only happen if zSql begins with "SELECT".)
+** Take each row of result and call execSql() again recursively.
+**
+** The execSqlF() routine does the same thing, except it accepts
+** a format string as its third argument
*/
static int execSql(sqlite3 *db, char **pzErrMsg, const char *zSql){
sqlite3_stmt *pStmt;
- VVA_ONLY( int rc; )
- if( !zSql ){
- return SQLITE_NOMEM_BKPT;
- }
- if( SQLITE_OK!=sqlite3_prepare(db, zSql, -1, &pStmt, 0) ){
- sqlite3SetString(pzErrMsg, db, sqlite3_errmsg(db));
- return sqlite3_errcode(db);
- }
- VVA_ONLY( rc = ) sqlite3_step(pStmt);
- assert( rc!=SQLITE_ROW || (db->flags&SQLITE_CountRows) );
- return vacuumFinalize(db, pStmt, pzErrMsg);
-}
-
-/*
-** Execute zSql on database db. The statement returns exactly
-** one column. Execute this as SQL on the same database.
-*/
-static int execExecSql(sqlite3 *db, char **pzErrMsg, const char *zSql){
- sqlite3_stmt *pStmt;
int rc;
- rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0);
+ /* printf("SQL: [%s]\n", zSql); fflush(stdout); */
+ rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0);
if( rc!=SQLITE_OK ) return rc;
-
- while( SQLITE_ROW==sqlite3_step(pStmt) ){
- rc = execSql(db, pzErrMsg, (char*)sqlite3_column_text(pStmt, 0));
- if( rc!=SQLITE_OK ){
- vacuumFinalize(db, pStmt, pzErrMsg);
- return rc;
+ while( SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){
+ const char *zSubSql = (const char*)sqlite3_column_text(pStmt,0);
+ assert( sqlite3_strnicmp(zSql,"SELECT",6)==0 );
+ if( zSubSql ){
+ assert( zSubSql[0]!='S' );
+ rc = execSql(db, pzErrMsg, zSubSql);
+ if( rc!=SQLITE_OK ) break;
}
}
-
- return vacuumFinalize(db, pStmt, pzErrMsg);
+ assert( rc!=SQLITE_ROW );
+ if( rc==SQLITE_DONE ) rc = SQLITE_OK;
+ if( rc ){
+ sqlite3SetString(pzErrMsg, db, sqlite3_errmsg(db));
+ }
+ (void)sqlite3_finalize(pStmt);
+ return rc;
+}
+static int execSqlF(sqlite3 *db, char **pzErrMsg, const char *zSql, ...){
+ char *z;
+ va_list ap;
+ int rc;
+ va_start(ap, zSql);
+ z = sqlite3VMPrintf(db, zSql, ap);
+ va_end(ap);
+ if( z==0 ) return SQLITE_NOMEM;
+ rc = execSql(db, pzErrMsg, z);
+ sqlite3DbFree(db, z);
+ return rc;
}
/*
@@ -121417,11 +122284,12 @@ static int execExecSql(sqlite3 *db, char **pzErrMsg, const char *zSql){
** transient would cause the database file to appear to be deleted
** following reboot.
*/
-SQLITE_PRIVATE void sqlite3Vacuum(Parse *pParse){
+SQLITE_PRIVATE void sqlite3Vacuum(Parse *pParse, Token *pNm){
Vdbe *v = sqlite3GetVdbe(pParse);
- if( v ){
- sqlite3VdbeAddOp2(v, OP_Vacuum, 0, 0);
- sqlite3VdbeUsesBtree(v, 0);
+ int iDb = pNm ? sqlite3TwoPartName(pParse, pNm, pNm, &pNm) : 0;
+ if( v && (iDb>=2 || iDb==0) ){
+ sqlite3VdbeAddOp1(v, OP_Vacuum, iDb);
+ sqlite3VdbeUsesBtree(v, iDb);
}
return;
}
@@ -121429,11 +122297,10 @@ SQLITE_PRIVATE void sqlite3Vacuum(Parse *pParse){
/*
** This routine implements the OP_Vacuum opcode of the VDBE.
*/
-SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
+SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db, int iDb){
int rc = SQLITE_OK; /* Return code from service routines */
Btree *pMain; /* The database being vacuumed */
Btree *pTemp; /* The temporary database we vacuum into */
- char *zSql = 0; /* SQL statements */
int saved_flags; /* Saved value of the db->flags */
int saved_nChange; /* Saved value of db->nChange */
int saved_nTotalChange; /* Saved value of db->nTotalChange */
@@ -121442,6 +122309,7 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
int isMemDb; /* True if vacuuming a :memory: database */
int nRes; /* Bytes of reserved space at the end of each page */
int nDb; /* Number of attached databases */
+ const char *zDbMain; /* Schema name of database to vacuum */
if( !db->autoCommit ){
sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction");
@@ -121459,11 +122327,13 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
saved_nChange = db->nChange;
saved_nTotalChange = db->nTotalChange;
saved_mTrace = db->mTrace;
- db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_PreferBuiltin;
- db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder);
+ db->flags |= (SQLITE_WriteSchema | SQLITE_IgnoreChecks
+ | SQLITE_PreferBuiltin | SQLITE_Vacuum);
+ db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder | SQLITE_CountRows);
db->mTrace = 0;
- pMain = db->aDb[0].pBt;
+ zDbMain = db->aDb[iDb].zDbSName;
+ pMain = db->aDb[iDb].pBt;
isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain));
/* Attach the temporary database as 'vacuum_db'. The synchronous pragma
@@ -121481,18 +122351,12 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
** to write the journal header file.
*/
nDb = db->nDb;
- if( sqlite3TempInMemory(db) ){
- zSql = "ATTACH ':memory:' AS vacuum_db;";
- }else{
- zSql = "ATTACH '' AS vacuum_db;";
- }
- rc = execSql(db, pzErrMsg, zSql);
- if( db->nDb>nDb ){
- pDb = &db->aDb[db->nDb-1];
- assert( strcmp(pDb->zName,"vacuum_db")==0 );
- }
+ rc = execSql(db, pzErrMsg, "ATTACH''AS vacuum_db");
if( rc!=SQLITE_OK ) goto end_of_vacuum;
- pTemp = db->aDb[db->nDb-1].pBt;
+ assert( (db->nDb-1)==nDb );
+ pDb = &db->aDb[nDb];
+ assert( strcmp(pDb->zDbSName,"vacuum_db")==0 );
+ pTemp = pDb->pBt;
/* The call to execSql() to attach the temp database has left the file
** locked (as there was more than one active statement when the transaction
@@ -121513,16 +122377,15 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
}
#endif
- sqlite3BtreeSetCacheSize(pTemp, db->aDb[0].pSchema->cache_size);
+ sqlite3BtreeSetCacheSize(pTemp, db->aDb[iDb].pSchema->cache_size);
sqlite3BtreeSetSpillSize(pTemp, sqlite3BtreeSetSpillSize(pMain,0));
- rc = execSql(db, pzErrMsg, "PRAGMA vacuum_db.synchronous=OFF");
- if( rc!=SQLITE_OK ) goto end_of_vacuum;
+ sqlite3BtreeSetPagerFlags(pTemp, PAGER_SYNCHRONOUS_OFF|PAGER_CACHESPILL);
/* Begin a transaction and take an exclusive lock on the main database
** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below,
** to ensure that we do not try to change the page-size on a WAL database.
*/
- rc = execSql(db, pzErrMsg, "BEGIN;");
+ rc = execSql(db, pzErrMsg, "BEGIN");
if( rc!=SQLITE_OK ) goto end_of_vacuum;
rc = sqlite3BtreeBeginTrans(pMain, 2);
if( rc!=SQLITE_OK ) goto end_of_vacuum;
@@ -121549,64 +122412,48 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
/* Query the schema of the main database. Create a mirror schema
** in the temporary database.
*/
- rc = execExecSql(db, pzErrMsg,
- "SELECT 'CREATE TABLE vacuum_db.' || substr(sql,14) "
- " FROM sqlite_master WHERE type='table' AND name!='sqlite_sequence'"
- " AND coalesce(rootpage,1)>0"
+ db->init.iDb = nDb; /* force new CREATE statements into vacuum_db */
+ rc = execSqlF(db, pzErrMsg,
+ "SELECT sql FROM \"%w\".sqlite_master"
+ " WHERE type='table'AND name<>'sqlite_sequence'"
+ " AND coalesce(rootpage,1)>0",
+ zDbMain
);
if( rc!=SQLITE_OK ) goto end_of_vacuum;
- rc = execExecSql(db, pzErrMsg,
- "SELECT 'CREATE INDEX vacuum_db.' || substr(sql,14)"
- " FROM sqlite_master WHERE sql LIKE 'CREATE INDEX %' ");
- if( rc!=SQLITE_OK ) goto end_of_vacuum;
- rc = execExecSql(db, pzErrMsg,
- "SELECT 'CREATE UNIQUE INDEX vacuum_db.' || substr(sql,21) "
- " FROM sqlite_master WHERE sql LIKE 'CREATE UNIQUE INDEX %'");
+ rc = execSqlF(db, pzErrMsg,
+ "SELECT sql FROM \"%w\".sqlite_master"
+ " WHERE type='index' AND length(sql)>10",
+ zDbMain
+ );
if( rc!=SQLITE_OK ) goto end_of_vacuum;
+ db->init.iDb = 0;
/* Loop through the tables in the main database. For each, do
** an "INSERT INTO vacuum_db.xxx SELECT * FROM main.xxx;" to copy
** the contents to the temporary database.
*/
- assert( (db->flags & SQLITE_Vacuum)==0 );
- db->flags |= SQLITE_Vacuum;
- rc = execExecSql(db, pzErrMsg,
- "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
- "|| ' SELECT * FROM main.' || quote(name) || ';'"
- "FROM main.sqlite_master "
- "WHERE type = 'table' AND name!='sqlite_sequence' "
- " AND coalesce(rootpage,1)>0"
+ rc = execSqlF(db, pzErrMsg,
+ "SELECT'INSERT INTO vacuum_db.'||quote(name)"
+ "||' SELECT*FROM\"%w\".'||quote(name)"
+ "FROM vacuum_db.sqlite_master "
+ "WHERE type='table'AND coalesce(rootpage,1)>0",
+ zDbMain
);
assert( (db->flags & SQLITE_Vacuum)!=0 );
db->flags &= ~SQLITE_Vacuum;
if( rc!=SQLITE_OK ) goto end_of_vacuum;
- /* Copy over the sequence table
- */
- rc = execExecSql(db, pzErrMsg,
- "SELECT 'DELETE FROM vacuum_db.' || quote(name) || ';' "
- "FROM vacuum_db.sqlite_master WHERE name='sqlite_sequence' "
- );
- if( rc!=SQLITE_OK ) goto end_of_vacuum;
- rc = execExecSql(db, pzErrMsg,
- "SELECT 'INSERT INTO vacuum_db.' || quote(name) "
- "|| ' SELECT * FROM main.' || quote(name) || ';' "
- "FROM vacuum_db.sqlite_master WHERE name=='sqlite_sequence';"
- );
- if( rc!=SQLITE_OK ) goto end_of_vacuum;
-
-
/* Copy the triggers, views, and virtual tables from the main database
** over to the temporary database. None of these objects has any
** associated storage, so all we have to do is copy their entries
** from the SQLITE_MASTER table.
*/
- rc = execSql(db, pzErrMsg,
- "INSERT INTO vacuum_db.sqlite_master "
- " SELECT type, name, tbl_name, rootpage, sql"
- " FROM main.sqlite_master"
- " WHERE type='view' OR type='trigger'"
- " OR (type='table' AND rootpage=0)"
+ rc = execSqlF(db, pzErrMsg,
+ "INSERT INTO vacuum_db.sqlite_master"
+ " SELECT*FROM \"%w\".sqlite_master"
+ " WHERE type IN('view','trigger')"
+ " OR(type='table'AND rootpage=0)",
+ zDbMain
);
if( rc ) goto end_of_vacuum;
@@ -121660,6 +122507,7 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){
end_of_vacuum:
/* Restore the original value of db->flags */
+ db->init.iDb = 0;
db->flags = saved_flags;
db->nChange = saved_nChange;
db->nTotalChange = saved_nTotalChange;
@@ -121772,7 +122620,7 @@ static int createModule(
/*
** External API function used to create a new virtual-table module.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module(
+SQLITE_API int sqlite3_create_module(
sqlite3 *db, /* Database in which module is registered */
const char *zName, /* Name assigned to this module */
const sqlite3_module *pModule, /* The definition of the module */
@@ -121787,7 +122635,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_module(
/*
** External API function used to create a new virtual-table module.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2(
+SQLITE_API int sqlite3_create_module_v2(
sqlite3 *db, /* Database in which module is registered */
const char *zName, /* Name assigned to this module */
const sqlite3_module *pModule, /* The definition of the module */
@@ -122038,7 +122886,7 @@ SQLITE_PRIVATE void sqlite3VtabBeginParse(
*/
if( pTable->azModuleArg ){
sqlite3AuthCheck(pParse, SQLITE_CREATE_VTABLE, pTable->zName,
- pTable->azModuleArg[0], pParse->db->aDb[iDb].zName);
+ pTable->azModuleArg[0], pParse->db->aDb[iDb].zDbSName);
}
#endif
}
@@ -122102,7 +122950,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){
"UPDATE %Q.%s "
"SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q "
"WHERE rowid=#%d",
- db->aDb[iDb].zName, SCHEMA_TABLE(iDb),
+ db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb),
pTab->zName,
pTab->zName,
zStmt,
@@ -122212,7 +123060,7 @@ static int vtabCallConstructor(
pVTable->pMod = pMod;
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
- pTab->azModuleArg[1] = db->aDb[iDb].zName;
+ pTab->azModuleArg[1] = db->aDb[iDb].zDbSName;
/* Invoke the virtual table constructor */
assert( &db->pVtabCtx );
@@ -122366,7 +123214,7 @@ static void addToVTrans(sqlite3 *db, VTable *pVTab){
** This function is invoked by the vdbe to call the xCreate method
** of the virtual table named zTab in database iDb.
**
-** If an error occurs, *pzErr is set to point an an English language
+** If an error occurs, *pzErr is set to point to an English language
** description of the error and an SQLITE_XXX error code is returned.
** In this case the caller must call sqlite3DbFree(db, ) on *pzErr.
*/
@@ -122376,7 +123224,7 @@ SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab,
Module *pMod;
const char *zMod;
- pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName);
+ pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zDbSName);
assert( pTab && (pTab->tabFlags & TF_Virtual)!=0 && !pTab->pVTable );
/* Locate the required virtual table module */
@@ -122411,7 +123259,7 @@ SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3 *db, int iDb, const char *zTab,
** valid to call this function from within the xCreate() or xConnect() of a
** virtual table module.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){
+SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){
VtabCtx *pCtx;
Parse *pParse;
int rc = SQLITE_OK;
@@ -122500,7 +123348,7 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab
int rc = SQLITE_OK;
Table *pTab;
- pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName);
+ pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zDbSName);
if( pTab!=0 && ALWAYS(pTab->pVTable!=0) ){
VTable *p;
int (*xDestroy)(sqlite3_vtab *);
@@ -122868,7 +123716,7 @@ SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3 *db, Module *pMod){
** The results of this routine are undefined unless it is called from
** within an xUpdate method.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *db){
+SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *db){
static const unsigned char aMap[] = {
SQLITE_ROLLBACK, SQLITE_ABORT, SQLITE_FAIL, SQLITE_IGNORE, SQLITE_REPLACE
};
@@ -122886,7 +123734,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *db){
** the SQLite core with additional information about the behavior
** of the virtual table being implemented.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3 *db, int op, ...){
+SQLITE_API int sqlite3_vtab_config(sqlite3 *db, int op, ...){
va_list ap;
int rc = SQLITE_OK;
@@ -123067,6 +123915,8 @@ struct WhereLoop {
union {
struct { /* Information for internal btree tables */
u16 nEq; /* Number of equality constraints */
+ u16 nBtm; /* Size of BTM vector */
+ u16 nTop; /* Size of TOP vector */
Index *pIndex; /* Index used, or NULL */
} btree;
struct { /* Information for virtual tables */
@@ -123189,19 +124039,20 @@ struct WherePath {
*/
struct WhereTerm {
Expr *pExpr; /* Pointer to the subexpression that is this term */
+ WhereClause *pWC; /* The clause this term is part of */
+ LogEst truthProb; /* Probability of truth for this expression */
+ u16 wtFlags; /* TERM_xxx bit flags. See below */
+ u16 eOperator; /* A WO_xx value describing <op> */
+ u8 nChild; /* Number of children that must disable us */
+ u8 eMatchOp; /* Op for vtab MATCH/LIKE/GLOB/REGEXP terms */
int iParent; /* Disable pWC->a[iParent] when this term disabled */
int leftCursor; /* Cursor number of X in "X <op> <expr>" */
+ int iField; /* Field in (?,?,?) IN (SELECT...) vector */
union {
int leftColumn; /* Column number of X in "X <op> <expr>" */
WhereOrInfo *pOrInfo; /* Extra information if (eOperator & WO_OR)!=0 */
WhereAndInfo *pAndInfo; /* Extra information if (eOperator& WO_AND)!=0 */
} u;
- LogEst truthProb; /* Probability of truth for this expression */
- u16 eOperator; /* A WO_xx value describing <op> */
- u16 wtFlags; /* TERM_xxx bit flags. See below */
- u8 nChild; /* Number of children that must disable us */
- u8 eMatchOp; /* Op for vtab MATCH/LIKE/GLOB/REGEXP terms */
- WhereClause *pWC; /* The clause this term is part of */
Bitmask prereqRight; /* Bitmask of tables used by pExpr->pRight */
Bitmask prereqAll; /* Bitmask of tables referenced by pExpr */
};
@@ -123354,25 +124205,25 @@ struct WhereInfo {
SrcList *pTabList; /* List of tables in the join */
ExprList *pOrderBy; /* The ORDER BY clause or NULL */
ExprList *pDistinctSet; /* DISTINCT over all these values */
- WhereLoop *pLoops; /* List of all WhereLoop objects */
- Bitmask revMask; /* Mask of ORDER BY terms that need reversing */
- LogEst nRowOut; /* Estimated number of output rows */
LogEst iLimit; /* LIMIT if wctrlFlags has WHERE_USE_LIMIT */
+ int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */
+ int iContinue; /* Jump here to continue with next record */
+ int iBreak; /* Jump here to break out of the loop */
+ int savedNQueryLoop; /* pParse->nQueryLoop outside the WHERE loop */
u16 wctrlFlags; /* Flags originally passed to sqlite3WhereBegin() */
+ u8 nLevel; /* Number of nested loop */
i8 nOBSat; /* Number of ORDER BY terms satisfied by indices */
u8 sorted; /* True if really sorted (not just grouped) */
u8 eOnePass; /* ONEPASS_OFF, or _SINGLE, or _MULTI */
u8 untestedTerms; /* Not all WHERE terms resolved by outer loop */
u8 eDistinct; /* One of the WHERE_DISTINCT_* values */
- u8 nLevel; /* Number of nested loop */
u8 bOrderedInnerLoop; /* True if only the inner-most loop is ordered */
int iTop; /* The very beginning of the WHERE loop */
- int iContinue; /* Jump here to continue with next record */
- int iBreak; /* Jump here to break out of the loop */
- int savedNQueryLoop; /* pParse->nQueryLoop outside the WHERE loop */
- int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */
- WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */
+ WhereLoop *pLoops; /* List of all WhereLoop objects */
+ Bitmask revMask; /* Mask of ORDER BY terms that need reversing */
+ LogEst nRowOut; /* Estimated number of output rows */
WhereClause sWC; /* Decomposition of the WHERE clause */
+ WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */
WhereLevel a[1]; /* Information about each nest loop in WHERE */
};
@@ -123496,6 +124347,17 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereC
/************** Continuing where we left off in wherecode.c ******************/
#ifndef SQLITE_OMIT_EXPLAIN
+
+/*
+** Return the name of the i-th column of the pIdx index.
+*/
+static const char *explainIndexColumnName(Index *pIdx, int i){
+ i = pIdx->aiColumn[i];
+ if( i==XN_EXPR ) return "<expr>";
+ if( i==XN_ROWID ) return "rowid";
+ return pIdx->pTable->aCol[i].zName;
+}
+
/*
** This routine is a helper for explainIndexRange() below
**
@@ -123506,24 +124368,32 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereC
*/
static void explainAppendTerm(
StrAccum *pStr, /* The text expression being built */
- int iTerm, /* Index of this term. First is zero */
- const char *zColumn, /* Name of the column */
+ Index *pIdx, /* Index to read column names from */
+ int nTerm, /* Number of terms */
+ int iTerm, /* Zero-based index of first term. */
+ int bAnd, /* Non-zero to append " AND " */
const char *zOp /* Name of the operator */
){
- if( iTerm ) sqlite3StrAccumAppend(pStr, " AND ", 5);
- sqlite3StrAccumAppendAll(pStr, zColumn);
+ int i;
+
+ assert( nTerm>=1 );
+ if( bAnd ) sqlite3StrAccumAppend(pStr, " AND ", 5);
+
+ if( nTerm>1 ) sqlite3StrAccumAppend(pStr, "(", 1);
+ for(i=0; i<nTerm; i++){
+ if( i ) sqlite3StrAccumAppend(pStr, ",", 1);
+ sqlite3StrAccumAppendAll(pStr, explainIndexColumnName(pIdx, iTerm+i));
+ }
+ if( nTerm>1 ) sqlite3StrAccumAppend(pStr, ")", 1);
+
sqlite3StrAccumAppend(pStr, zOp, 1);
- sqlite3StrAccumAppend(pStr, "?", 1);
-}
-/*
-** Return the name of the i-th column of the pIdx index.
-*/
-static const char *explainIndexColumnName(Index *pIdx, int i){
- i = pIdx->aiColumn[i];
- if( i==XN_EXPR ) return "<expr>";
- if( i==XN_ROWID ) return "rowid";
- return pIdx->pTable->aCol[i].zName;
+ if( nTerm>1 ) sqlite3StrAccumAppend(pStr, "(", 1);
+ for(i=0; i<nTerm; i++){
+ if( i ) sqlite3StrAccumAppend(pStr, ",", 1);
+ sqlite3StrAccumAppend(pStr, "?", 1);
+ }
+ if( nTerm>1 ) sqlite3StrAccumAppend(pStr, ")", 1);
}
/*
@@ -123556,12 +124426,11 @@ static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){
j = i;
if( pLoop->wsFlags&WHERE_BTM_LIMIT ){
- const char *z = explainIndexColumnName(pIndex, i);
- explainAppendTerm(pStr, i++, z, ">");
+ explainAppendTerm(pStr, pIndex, pLoop->u.btree.nBtm, j, i, ">");
+ i = 1;
}
if( pLoop->wsFlags&WHERE_TOP_LIMIT ){
- const char *z = explainIndexColumnName(pIndex, j);
- explainAppendTerm(pStr, i, z, "<");
+ explainAppendTerm(pStr, pIndex, pLoop->u.btree.nTop, j, i, "<");
}
sqlite3StrAccumAppend(pStr, ")", 1);
}
@@ -123751,7 +124620,7 @@ SQLITE_PRIVATE void sqlite3WhereAddScanStatus(
*/
static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){
int nLoop = 0;
- while( pTerm
+ while( ALWAYS(pTerm!=0)
&& (pTerm->wtFlags & TERM_CODED)==0
&& (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin))
&& (pLevel->notReady & pTerm->prereqAll)==0
@@ -123807,16 +124676,45 @@ static void codeApplyAffinity(Parse *pParse, int base, int n, char *zAff){
}
}
+/*
+** Expression pRight, which is the RHS of a comparison operation, is
+** either a vector of n elements or, if n==1, a scalar expression.
+** Before the comparison operation, affinity zAff is to be applied
+** to the pRight values. This function modifies characters within the
+** affinity string to SQLITE_AFF_BLOB if either:
+**
+** * the comparison will be performed with no affinity, or
+** * the affinity change in zAff is guaranteed not to change the value.
+*/
+static void updateRangeAffinityStr(
+ Expr *pRight, /* RHS of comparison */
+ int n, /* Number of vector elements in comparison */
+ char *zAff /* Affinity string to modify */
+){
+ int i;
+ for(i=0; i<n; i++){
+ Expr *p = sqlite3VectorFieldSubexpr(pRight, i);
+ if( sqlite3CompareAffinity(p, zAff[i])==SQLITE_AFF_BLOB
+ || sqlite3ExprNeedsNoAffinityChange(p, zAff[i])
+ ){
+ zAff[i] = SQLITE_AFF_BLOB;
+ }
+ }
+}
/*
** Generate code for a single equality term of the WHERE clause. An equality
** term can be either X=expr or X IN (...). pTerm is the term to be
** coded.
**
-** The current value for the constraint is left in register iReg.
+** The current value for the constraint is left in a register, the index
+** of which is returned. An attempt is made store the result in iTarget but
+** this is only guaranteed for TK_ISNULL and TK_IN constraints. If the
+** constraint is a TK_EQ or TK_IS, then the current value might be left in
+** some other register and it is the caller's responsibility to compensate.
**
-** For a constraint of the form X=expr, the expression is evaluated and its
-** result is left on the stack. For constraints of the form X IN (...)
+** For a constraint of the form X=expr, the expression is evaluated in
+** straight-line code. For constraints of the form X IN (...)
** this routine sets up a loop that will iterate over all values of X.
*/
static int codeEqualityTerm(
@@ -123831,6 +124729,7 @@ static int codeEqualityTerm(
Vdbe *v = pParse->pVdbe;
int iReg; /* Register holding results */
+ assert( pLevel->pWLoop->aLTerm[iEq]==pTerm );
assert( iTarget>0 );
if( pX->op==TK_EQ || pX->op==TK_IS ){
iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget);
@@ -123839,10 +124738,13 @@ static int codeEqualityTerm(
sqlite3VdbeAddOp2(v, OP_Null, 0, iReg);
#ifndef SQLITE_OMIT_SUBQUERY
}else{
- int eType;
+ int eType = IN_INDEX_NOOP;
int iTab;
struct InLoop *pIn;
WhereLoop *pLoop = pLevel->pWLoop;
+ int i;
+ int nEq = 0;
+ int *aiMap = 0;
if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0
&& pLoop->u.btree.pIndex!=0
@@ -123854,7 +124756,75 @@ static int codeEqualityTerm(
}
assert( pX->op==TK_IN );
iReg = iTarget;
- eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0);
+
+ for(i=0; i<iEq; i++){
+ if( pLoop->aLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){
+ disableTerm(pLevel, pTerm);
+ return iTarget;
+ }
+ }
+ for(i=iEq;i<pLoop->nLTerm; i++){
+ if( ALWAYS(pLoop->aLTerm[i]) && pLoop->aLTerm[i]->pExpr==pX ) nEq++;
+ }
+
+ if( (pX->flags & EP_xIsSelect)==0 || pX->x.pSelect->pEList->nExpr==1 ){
+ eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0);
+ }else{
+ Select *pSelect = pX->x.pSelect;
+ sqlite3 *db = pParse->db;
+ ExprList *pOrigRhs = pSelect->pEList;
+ ExprList *pOrigLhs = pX->pLeft->x.pList;
+ ExprList *pRhs = 0; /* New Select.pEList for RHS */
+ ExprList *pLhs = 0; /* New pX->pLeft vector */
+
+ for(i=iEq;i<pLoop->nLTerm; i++){
+ if( pLoop->aLTerm[i]->pExpr==pX ){
+ int iField = pLoop->aLTerm[i]->iField - 1;
+ Expr *pNewRhs = sqlite3ExprDup(db, pOrigRhs->a[iField].pExpr, 0);
+ Expr *pNewLhs = sqlite3ExprDup(db, pOrigLhs->a[iField].pExpr, 0);
+
+ pRhs = sqlite3ExprListAppend(pParse, pRhs, pNewRhs);
+ pLhs = sqlite3ExprListAppend(pParse, pLhs, pNewLhs);
+ }
+ }
+ if( !db->mallocFailed ){
+ Expr *pLeft = pX->pLeft;
+
+ if( pSelect->pOrderBy ){
+ /* If the SELECT statement has an ORDER BY clause, zero the
+ ** iOrderByCol variables. These are set to non-zero when an
+ ** ORDER BY term exactly matches one of the terms of the
+ ** result-set. Since the result-set of the SELECT statement may
+ ** have been modified or reordered, these variables are no longer
+ ** set correctly. Since setting them is just an optimization,
+ ** it's easiest just to zero them here. */
+ ExprList *pOrderBy = pSelect->pOrderBy;
+ for(i=0; i<pOrderBy->nExpr; i++){
+ pOrderBy->a[i].u.x.iOrderByCol = 0;
+ }
+ }
+
+ /* Take care here not to generate a TK_VECTOR containing only a
+ ** single value. Since the parser never creates such a vector, some
+ ** of the subroutines do not handle this case. */
+ if( pLhs->nExpr==1 ){
+ pX->pLeft = pLhs->a[0].pExpr;
+ }else{
+ pLeft->x.pList = pLhs;
+ aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int) * nEq);
+ testcase( aiMap==0 );
+ }
+ pSelect->pEList = pRhs;
+ eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap);
+ testcase( aiMap!=0 && aiMap[0]!=0 );
+ pSelect->pEList = pOrigRhs;
+ pLeft->x.pList = pOrigLhs;
+ pX->pLeft = pLeft;
+ }
+ sqlite3ExprListDelete(pParse->db, pLhs);
+ sqlite3ExprListDelete(pParse->db, pRhs);
+ }
+
if( eType==IN_INDEX_INDEX_DESC ){
testcase( bRev );
bRev = !bRev;
@@ -123864,28 +124834,45 @@ static int codeEqualityTerm(
VdbeCoverageIf(v, bRev);
VdbeCoverageIf(v, !bRev);
assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 );
+
pLoop->wsFlags |= WHERE_IN_ABLE;
if( pLevel->u.in.nIn==0 ){
pLevel->addrNxt = sqlite3VdbeMakeLabel(v);
}
- pLevel->u.in.nIn++;
+
+ i = pLevel->u.in.nIn;
+ pLevel->u.in.nIn += nEq;
pLevel->u.in.aInLoop =
sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop,
sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn);
pIn = pLevel->u.in.aInLoop;
if( pIn ){
- pIn += pLevel->u.in.nIn - 1;
- pIn->iCur = iTab;
- if( eType==IN_INDEX_ROWID ){
- pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iReg);
- }else{
- pIn->addrInTop = sqlite3VdbeAddOp3(v, OP_Column, iTab, 0, iReg);
+ int iMap = 0; /* Index in aiMap[] */
+ pIn += i;
+ for(i=iEq;i<pLoop->nLTerm; i++){
+ if( pLoop->aLTerm[i]->pExpr==pX ){
+ int iOut = iReg + i - iEq;
+ if( eType==IN_INDEX_ROWID ){
+ testcase( nEq>1 ); /* Happens with a UNIQUE index on ROWID */
+ pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut);
+ }else{
+ int iCol = aiMap ? aiMap[iMap++] : 0;
+ pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut);
+ }
+ sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v);
+ if( i==iEq ){
+ pIn->iCur = iTab;
+ pIn->eEndLoopOp = bRev ? OP_PrevIfOpen : OP_NextIfOpen;
+ }else{
+ pIn->eEndLoopOp = OP_Noop;
+ }
+ pIn++;
+ }
}
- pIn->eEndLoopOp = bRev ? OP_PrevIfOpen : OP_NextIfOpen;
- sqlite3VdbeAddOp1(v, OP_IsNull, iReg); VdbeCoverage(v);
}else{
pLevel->u.in.nIn = 0;
}
+ sqlite3DbFree(pParse->db, aiMap);
#endif
}
disableTerm(pLevel, pTerm);
@@ -124011,9 +124998,15 @@ static int codeAllEqualityTerms(
sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j);
}
}
- testcase( pTerm->eOperator & WO_ISNULL );
- testcase( pTerm->eOperator & WO_IN );
- if( (pTerm->eOperator & (WO_ISNULL|WO_IN))==0 ){
+ if( pTerm->eOperator & WO_IN ){
+ if( pTerm->pExpr->flags & EP_xIsSelect ){
+ /* No affinity ever needs to be (or should be) applied to a value
+ ** from the RHS of an "? IN (SELECT ...)" expression. The
+ ** sqlite3FindInIndex() routine has already ensured that the
+ ** affinity of the comparison has been applied to the value. */
+ if( zAff ) zAff[j] = SQLITE_AFF_BLOB;
+ }
+ }else if( (pTerm->eOperator & WO_ISNULL)==0 ){
Expr *pRight = pTerm->pExpr->pRight;
if( (pTerm->wtFlags & TERM_IS)==0 && sqlite3ExprCanBeNull(pRight) ){
sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk);
@@ -124337,6 +125330,39 @@ static void codeDeferredSeek(
}
/*
+** If the expression passed as the second argument is a vector, generate
+** code to write the first nReg elements of the vector into an array
+** of registers starting with iReg.
+**
+** If the expression is not a vector, then nReg must be passed 1. In
+** this case, generate code to evaluate the expression and leave the
+** result in register iReg.
+*/
+static void codeExprOrVector(Parse *pParse, Expr *p, int iReg, int nReg){
+ assert( nReg>0 );
+ if( sqlite3ExprIsVector(p) ){
+#ifndef SQLITE_OMIT_SUBQUERY
+ if( (p->flags & EP_xIsSelect) ){
+ Vdbe *v = pParse->pVdbe;
+ int iSelect = sqlite3CodeSubselect(pParse, p, 0, 0);
+ sqlite3VdbeAddOp3(v, OP_Copy, iSelect, iReg, nReg-1);
+ }else
+#endif
+ {
+ int i;
+ ExprList *pList = p->x.pList;
+ assert( nReg<=pList->nExpr );
+ for(i=0; i<nReg; i++){
+ sqlite3ExprCode(pParse, pList->a[i].pExpr, iReg+i);
+ }
+ }
+ }else{
+ assert( nReg==1 );
+ sqlite3ExprCode(pParse, p, iReg);
+ }
+}
+
+/*
** Generate code for the start of the iLevel-th loop in the WHERE clause
** implementation described by pWInfo.
*/
@@ -124431,7 +125457,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget);
addrNotFound = pLevel->addrNxt;
}else{
- sqlite3ExprCode(pParse, pTerm->pExpr->pRight, iTarget);
+ Expr *pRight = pTerm->pExpr->pRight;
+ codeExprOrVector(pParse, pRight, iTarget, 1);
}
}
sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg);
@@ -124545,6 +125572,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
if( pStart ){
Expr *pX; /* The expression that defines the start bound */
int r1, rTemp; /* Registers for holding the start boundary */
+ int op; /* Cursor seek operation */
/* The following constant maps TK_xx codes into corresponding
** seek opcodes. It depends on a particular ordering of TK_xx
@@ -124564,8 +125592,16 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
pX = pStart->pExpr;
assert( pX!=0 );
testcase( pStart->leftCursor!=iCur ); /* transitive constraints */
- r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp);
- sqlite3VdbeAddOp3(v, aMoveOp[pX->op-TK_GT], iCur, addrBrk, r1);
+ if( sqlite3ExprIsVector(pX->pRight) ){
+ r1 = rTemp = sqlite3GetTempReg(pParse);
+ codeExprOrVector(pParse, pX->pRight, r1, 1);
+ op = aMoveOp[(pX->op - TK_GT) | 0x0001];
+ }else{
+ r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp);
+ disableTerm(pLevel, pStart);
+ op = aMoveOp[(pX->op - TK_GT)];
+ }
+ sqlite3VdbeAddOp3(v, op, iCur, addrBrk, r1);
VdbeComment((v, "pk"));
VdbeCoverageIf(v, pX->op==TK_GT);
VdbeCoverageIf(v, pX->op==TK_LE);
@@ -124573,7 +125609,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
VdbeCoverageIf(v, pX->op==TK_GE);
sqlite3ExprCacheAffinityChange(pParse, r1, 1);
sqlite3ReleaseTempReg(pParse, rTemp);
- disableTerm(pLevel, pStart);
}else{
sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrBrk);
VdbeCoverageIf(v, bRev==0);
@@ -124587,13 +125622,17 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
testcase( pEnd->leftCursor!=iCur ); /* Transitive constraints */
testcase( pEnd->wtFlags & TERM_VIRTUAL );
memEndValue = ++pParse->nMem;
- sqlite3ExprCode(pParse, pX->pRight, memEndValue);
- if( pX->op==TK_LT || pX->op==TK_GT ){
+ codeExprOrVector(pParse, pX->pRight, memEndValue, 1);
+ if( 0==sqlite3ExprIsVector(pX->pRight)
+ && (pX->op==TK_LT || pX->op==TK_GT)
+ ){
testOp = bRev ? OP_Le : OP_Ge;
}else{
testOp = bRev ? OP_Lt : OP_Gt;
}
- disableTerm(pLevel, pEnd);
+ if( 0==sqlite3ExprIsVector(pX->pRight) ){
+ disableTerm(pLevel, pEnd);
+ }
}
start = sqlite3VdbeCurrentAddr(v);
pLevel->op = bRev ? OP_Prev : OP_Next;
@@ -124660,6 +125699,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
OP_IdxLT, /* 3: (end_constraints && bRev && endEq) */
};
u16 nEq = pLoop->u.btree.nEq; /* Number of == or IN terms */
+ u16 nBtm = pLoop->u.btree.nBtm; /* Length of BTM vector */
+ u16 nTop = pLoop->u.btree.nTop; /* Length of TOP vector */
int regBase; /* Base register holding constraint values */
WhereTerm *pRangeStart = 0; /* Inequality constraint at range start */
WhereTerm *pRangeEnd = 0; /* Inequality constraint at range end */
@@ -124672,7 +125713,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
int nExtraReg = 0; /* Number of extra registers needed */
int op; /* Instruction opcode */
char *zStartAff; /* Affinity for start of range constraint */
- char cEndAff = 0; /* Affinity for end of range constraint */
+ char *zEndAff = 0; /* Affinity for end of range constraint */
u8 bSeekPastNull = 0; /* True to seek past initial nulls */
u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */
@@ -124706,14 +125747,14 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
j = nEq;
if( pLoop->wsFlags & WHERE_BTM_LIMIT ){
pRangeStart = pLoop->aLTerm[j++];
- nExtraReg = 1;
+ nExtraReg = MAX(nExtraReg, pLoop->u.btree.nBtm);
/* Like optimization range constraints always occur in pairs */
assert( (pRangeStart->wtFlags & TERM_LIKEOPT)==0 ||
(pLoop->wsFlags & WHERE_TOP_LIMIT)!=0 );
}
if( pLoop->wsFlags & WHERE_TOP_LIMIT ){
pRangeEnd = pLoop->aLTerm[j++];
- nExtraReg = 1;
+ nExtraReg = MAX(nExtraReg, pLoop->u.btree.nTop);
#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS
if( (pRangeEnd->wtFlags & TERM_LIKEOPT)!=0 ){
assert( pRangeStart!=0 ); /* LIKE opt constraints */
@@ -124731,11 +125772,11 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
pLevel->iLikeRepCntr |= bRev ^ (pIdx->aSortOrder[nEq]==SQLITE_SO_DESC);
}
#endif
- if( pRangeStart==0
- && (j = pIdx->aiColumn[nEq])>=0
- && pIdx->pTable->aCol[j].notNull==0
- ){
- bSeekPastNull = 1;
+ if( pRangeStart==0 ){
+ j = pIdx->aiColumn[nEq];
+ if( (j>=0 && pIdx->pTable->aCol[j].notNull==0) || j==XN_EXPR ){
+ bSeekPastNull = 1;
+ }
}
}
assert( pRangeEnd==0 || (pRangeEnd->wtFlags & TERM_VNULL)==0 );
@@ -124749,6 +125790,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
){
SWAP(WhereTerm *, pRangeEnd, pRangeStart);
SWAP(u8, bSeekPastNull, bStopAtNull);
+ SWAP(u8, nBtm, nTop);
}
/* Generate code to evaluate all constraint terms using == or IN
@@ -124758,7 +125800,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
codeCursorHint(pTabItem, pWInfo, pLevel, pRangeEnd);
regBase = codeAllEqualityTerms(pParse,pLevel,bRev,nExtraReg,&zStartAff);
assert( zStartAff==0 || sqlite3Strlen30(zStartAff)>=nEq );
- if( zStartAff ) cEndAff = zStartAff[nEq];
+ if( zStartAff && nTop ){
+ zEndAff = sqlite3DbStrDup(db, &zStartAff[nEq]);
+ }
addrNxt = pLevel->addrNxt;
testcase( pRangeStart && (pRangeStart->eOperator & WO_LE)!=0 );
@@ -124773,7 +125817,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
nConstraint = nEq;
if( pRangeStart ){
Expr *pRight = pRangeStart->pExpr->pRight;
- sqlite3ExprCode(pParse, pRight, regBase+nEq);
+ codeExprOrVector(pParse, pRight, regBase+nEq, nBtm);
whereLikeOptimizationStringFixup(v, pLevel, pRangeStart);
if( (pRangeStart->wtFlags & TERM_VNULL)==0
&& sqlite3ExprCanBeNull(pRight)
@@ -124782,18 +125826,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
VdbeCoverage(v);
}
if( zStartAff ){
- if( sqlite3CompareAffinity(pRight, zStartAff[nEq])==SQLITE_AFF_BLOB){
- /* Since the comparison is to be performed with no conversions
- ** applied to the operands, set the affinity to apply to pRight to
- ** SQLITE_AFF_BLOB. */
- zStartAff[nEq] = SQLITE_AFF_BLOB;
- }
- if( sqlite3ExprNeedsNoAffinityChange(pRight, zStartAff[nEq]) ){
- zStartAff[nEq] = SQLITE_AFF_BLOB;
- }
+ updateRangeAffinityStr(pRight, nBtm, &zStartAff[nEq]);
}
- nConstraint++;
+ nConstraint += nBtm;
testcase( pRangeStart->wtFlags & TERM_VIRTUAL );
+ if( sqlite3ExprIsVector(pRight)==0 ){
+ disableTerm(pLevel, pRangeStart);
+ }else{
+ startEq = 1;
+ }
bSeekPastNull = 0;
}else if( bSeekPastNull ){
sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
@@ -124826,7 +125867,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
if( pRangeEnd ){
Expr *pRight = pRangeEnd->pExpr->pRight;
sqlite3ExprCacheRemove(pParse, regBase+nEq, 1);
- sqlite3ExprCode(pParse, pRight, regBase+nEq);
+ codeExprOrVector(pParse, pRight, regBase+nEq, nTop);
whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd);
if( (pRangeEnd->wtFlags & TERM_VNULL)==0
&& sqlite3ExprCanBeNull(pRight)
@@ -124834,19 +125875,27 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt);
VdbeCoverage(v);
}
- if( sqlite3CompareAffinity(pRight, cEndAff)!=SQLITE_AFF_BLOB
- && !sqlite3ExprNeedsNoAffinityChange(pRight, cEndAff)
- ){
- codeApplyAffinity(pParse, regBase+nEq, 1, &cEndAff);
+ if( zEndAff ){
+ updateRangeAffinityStr(pRight, nTop, zEndAff);
+ codeApplyAffinity(pParse, regBase+nEq, nTop, zEndAff);
+ }else{
+ assert( pParse->db->mallocFailed );
}
- nConstraint++;
+ nConstraint += nTop;
testcase( pRangeEnd->wtFlags & TERM_VIRTUAL );
+
+ if( sqlite3ExprIsVector(pRight)==0 ){
+ disableTerm(pLevel, pRangeEnd);
+ }else{
+ endEq = 1;
+ }
}else if( bStopAtNull ){
sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
endEq = 0;
nConstraint++;
}
sqlite3DbFree(db, zStartAff);
+ sqlite3DbFree(db, zEndAff);
/* Top of the loop body */
pLevel->p2 = sqlite3VdbeCurrentAddr(v);
@@ -124862,8 +125911,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
}
/* Seek the table cursor, if required */
- disableTerm(pLevel, pRangeStart);
- disableTerm(pLevel, pRangeEnd);
if( omitTable ){
/* pIdx is a covering index. No need to access the main table. */
}else if( HasRowid(pIdx->pTable) ){
@@ -124887,9 +125934,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
iRowidReg, pPk->nKeyCol); VdbeCoverage(v);
}
- /* Record the instruction used to terminate the loop. Disable
- ** WHERE clause terms made redundant by the index range scan.
- */
+ /* Record the instruction used to terminate the loop. */
if( pLoop->wsFlags & WHERE_ONEROW ){
pLevel->op = OP_Noop;
}else if( bRev ){
@@ -124966,7 +126011,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
u16 wctrlFlags; /* Flags for sub-WHERE clause */
Expr *pAndExpr = 0; /* An ".. AND (...)" expression */
Table *pTab = pTabItem->pTab;
-
+
pTerm = pLoop->aLTerm[0];
assert( pTerm!=0 );
assert( pTerm->eOperator & WO_OR );
@@ -125267,7 +126312,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
** the implied "t1.a=123" constraint.
*/
for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
- Expr *pE, *pEAlt;
+ Expr *pE, sEAlt;
WhereTerm *pAlt;
if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) continue;
@@ -125285,13 +126330,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
testcase( pAlt->eOperator & WO_IS );
testcase( pAlt->eOperator & WO_IN );
VdbeModuleComment((v, "begin transitive constraint"));
- pEAlt = sqlite3StackAllocRaw(db, sizeof(*pEAlt));
- if( pEAlt ){
- *pEAlt = *pAlt->pExpr;
- pEAlt->pLeft = pE->pLeft;
- sqlite3ExprIfFalse(pParse, pEAlt, addrCont, SQLITE_JUMPIFNULL);
- sqlite3StackFree(db, pEAlt);
- }
+ sEAlt = *pAlt->pExpr;
+ sEAlt.pLeft = pE->pLeft;
+ sqlite3ExprIfFalse(pParse, &sEAlt, addrCont, SQLITE_JUMPIFNULL);
}
/* For a LEFT OUTER JOIN, generate code that will record the fact that
@@ -125400,7 +126441,6 @@ static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){
sqlite3DbFree(db, pOld);
}
pWC->nSlot = sqlite3DbMallocSize(db, pWC->a)/sizeof(pWC->a[0]);
- memset(&pWC->a[pWC->nTerm], 0, sizeof(pWC->a[0])*(pWC->nSlot-pWC->nTerm));
}
pTerm = &pWC->a[idx = pWC->nTerm++];
if( p && ExprHasProperty(p, EP_Unlikely) ){
@@ -125412,13 +126452,15 @@ static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){
pTerm->wtFlags = wtFlags;
pTerm->pWC = pWC;
pTerm->iParent = -1;
+ memset(&pTerm->eOperator, 0,
+ sizeof(WhereTerm) - offsetof(WhereTerm,eOperator));
return idx;
}
/*
** Return TRUE if the given operator is one of the operators that is
** allowed for an indexable WHERE clause term. The allowed operators are
-** "=", "<", ">", "<=", ">=", "IN", and "IS NULL"
+** "=", "<", ">", "<=", ">=", "IN", "IS", and "IS NULL"
*/
static int allowedOp(int op){
assert( TK_GT>TK_EQ && TK_GT<TK_GE );
@@ -125613,7 +126655,7 @@ static int isMatchOfColumn(
Expr *pExpr, /* Test this expression */
unsigned char *peOp2 /* OUT: 0 for MATCH, or else an op2 value */
){
- struct Op2 {
+ static const struct Op2 {
const char *zOp;
unsigned char eOp2;
} aOp[] = {
@@ -126146,7 +127188,8 @@ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){
** in any index. Return TRUE (1) if pExpr is an indexed term and return
** FALSE (0) if not. If TRUE is returned, also set *piCur to the cursor
** number of the table that is indexed and *piColumn to the column number
-** of the column that is indexed, or -2 if an expression is being indexed.
+** of the column that is indexed, or XN_EXPR (-2) if an expression is being
+** indexed.
**
** If pExpr is a TK_COLUMN column reference, then this routine always returns
** true even if that particular column is not indexed, because the column
@@ -126154,6 +127197,7 @@ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){
*/
static int exprMightBeIndexed(
SrcList *pFrom, /* The FROM clause */
+ int op, /* The specific comparison operator */
Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */
Expr *pExpr, /* An operand of a comparison operator */
int *piCur, /* Write the referenced table cursor number here */
@@ -126162,6 +127206,17 @@ static int exprMightBeIndexed(
Index *pIdx;
int i;
int iCur;
+
+ /* If this expression is a vector to the left or right of a
+ ** inequality constraint (>, <, >= or <=), perform the processing
+ ** on the first element of the vector. */
+ assert( TK_GT+1==TK_LE && TK_GT+2==TK_LT && TK_GT+3==TK_GE );
+ assert( TK_IS<TK_GE && TK_ISNULL<TK_GE && TK_IN<TK_GE );
+ assert( op<=TK_GE );
+ if( pExpr->op==TK_VECTOR && (op>=TK_GT && ALWAYS(op<=TK_GE)) ){
+ pExpr = pExpr->x.pList->a[0].pExpr;
+ }
+
if( pExpr->op==TK_COLUMN ){
*piCur = pExpr->iTable;
*piColumn = pExpr->iColumn;
@@ -126174,10 +127229,10 @@ static int exprMightBeIndexed(
for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){
if( pIdx->aColExpr==0 ) continue;
for(i=0; i<pIdx->nKeyCol; i++){
- if( pIdx->aiColumn[i]!=(-2) ) continue;
+ if( pIdx->aiColumn[i]!=XN_EXPR ) continue;
if( sqlite3ExprCompare(pExpr, pIdx->aColExpr->a[i].pExpr, iCur)==0 ){
*piCur = iCur;
- *piColumn = -2;
+ *piColumn = XN_EXPR;
return 1;
}
}
@@ -126234,6 +127289,7 @@ static void exprAnalyze(
op = pExpr->op;
if( op==TK_IN ){
assert( pExpr->pRight==0 );
+ if( sqlite3ExprCheckIN(pParse, pExpr) ) return;
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
pTerm->prereqRight = exprSelectUsage(pMaskSet, pExpr->x.pSelect);
}else{
@@ -126260,18 +127316,26 @@ static void exprAnalyze(
Expr *pLeft = sqlite3ExprSkipCollate(pExpr->pLeft);
Expr *pRight = sqlite3ExprSkipCollate(pExpr->pRight);
u16 opMask = (pTerm->prereqRight & prereqLeft)==0 ? WO_ALL : WO_EQUIV;
- if( exprMightBeIndexed(pSrc, prereqLeft, pLeft, &iCur, &iColumn) ){
+
+ if( pTerm->iField>0 ){
+ assert( op==TK_IN );
+ assert( pLeft->op==TK_VECTOR );
+ pLeft = pLeft->x.pList->a[pTerm->iField-1].pExpr;
+ }
+
+ if( exprMightBeIndexed(pSrc, op, prereqLeft, pLeft, &iCur, &iColumn) ){
pTerm->leftCursor = iCur;
pTerm->u.leftColumn = iColumn;
pTerm->eOperator = operatorMask(op) & opMask;
}
if( op==TK_IS ) pTerm->wtFlags |= TERM_IS;
if( pRight
- && exprMightBeIndexed(pSrc, pTerm->prereqRight, pRight, &iCur, &iColumn)
+ && exprMightBeIndexed(pSrc, op, pTerm->prereqRight, pRight, &iCur,&iColumn)
){
WhereTerm *pNew;
Expr *pDup;
u16 eExtraOp = 0; /* Extra bits for pNew->eOperator */
+ assert( pTerm->iField==0 );
if( pTerm->leftCursor>=0 ){
int idxNew;
pDup = sqlite3ExprDup(db, pExpr, 0);
@@ -126475,6 +127539,60 @@ static void exprAnalyze(
}
#endif /* SQLITE_OMIT_VIRTUALTABLE */
+ /* If there is a vector == or IS term - e.g. "(a, b) == (?, ?)" - create
+ ** new terms for each component comparison - "a = ?" and "b = ?". The
+ ** new terms completely replace the original vector comparison, which is
+ ** no longer used.
+ **
+ ** This is only required if at least one side of the comparison operation
+ ** is not a sub-select. */
+ if( pWC->op==TK_AND
+ && (pExpr->op==TK_EQ || pExpr->op==TK_IS)
+ && sqlite3ExprIsVector(pExpr->pLeft)
+ && ( (pExpr->pLeft->flags & EP_xIsSelect)==0
+ || (pExpr->pRight->flags & EP_xIsSelect)==0
+ )){
+ int nLeft = sqlite3ExprVectorSize(pExpr->pLeft);
+ int i;
+ assert( nLeft==sqlite3ExprVectorSize(pExpr->pRight) );
+ for(i=0; i<nLeft; i++){
+ int idxNew;
+ Expr *pNew;
+ Expr *pLeft = sqlite3ExprForVectorField(pParse, pExpr->pLeft, i);
+ Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i);
+
+ pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight, 0);
+ transferJoinMarkings(pNew, pExpr);
+ idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC);
+ exprAnalyze(pSrc, pWC, idxNew);
+ }
+ pTerm = &pWC->a[idxTerm];
+ pTerm->wtFlags = TERM_CODED|TERM_VIRTUAL; /* Disable the original */
+ pTerm->eOperator = 0;
+ }
+
+ /* If there is a vector IN term - e.g. "(a, b) IN (SELECT ...)" - create
+ ** a virtual term for each vector component. The expression object
+ ** used by each such virtual term is pExpr (the full vector IN(...)
+ ** expression). The WhereTerm.iField variable identifies the index within
+ ** the vector on the LHS that the virtual term represents.
+ **
+ ** This only works if the RHS is a simple SELECT, not a compound
+ */
+ if( pWC->op==TK_AND && pExpr->op==TK_IN && pTerm->iField==0
+ && pExpr->pLeft->op==TK_VECTOR
+ && pExpr->x.pSelect->pPrior==0
+ ){
+ int i;
+ for(i=0; i<sqlite3ExprVectorSize(pExpr->pLeft); i++){
+ int idxNew;
+ idxNew = whereClauseInsert(pWC, pExpr, TERM_VIRTUAL);
+ pWC->a[idxNew].iField = i+1;
+ exprAnalyze(pSrc, pWC, idxNew);
+ markTermAsChild(pWC, idxNew, idxTerm);
+ }
+ }
+
#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
/* When sqlite_stat3 histogram data is available an operator of the
** form "x IS NOT NULL" can sometimes be evaluated more efficiently
@@ -126495,7 +127613,7 @@ static void exprAnalyze(
pNewExpr = sqlite3PExpr(pParse, TK_GT,
sqlite3ExprDup(db, pLeft, 0),
- sqlite3PExpr(pParse, TK_NULL, 0, 0, 0), 0);
+ sqlite3ExprAlloc(db, TK_NULL, 0, 0), 0);
idxNew = whereClauseInsert(pWC, pNewExpr,
TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL);
@@ -126598,13 +127716,14 @@ SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause *pWC){
** tree.
*/
SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet *pMaskSet, Expr *p){
- Bitmask mask = 0;
+ Bitmask mask;
if( p==0 ) return 0;
if( p->op==TK_COLUMN ){
mask = sqlite3WhereGetMask(pMaskSet, p->iTable);
return mask;
}
- mask = sqlite3WhereExprUsage(pMaskSet, p->pRight);
+ assert( !ExprHasProperty(p, EP_TokenOnly) );
+ mask = p->pRight ? sqlite3WhereExprUsage(pMaskSet, p->pRight) : 0;
if( p->pLeft ) mask |= sqlite3WhereExprUsage(pMaskSet, p->pLeft);
if( ExprHasProperty(p, EP_xIsSelect) ){
mask |= exprSelectUsage(pMaskSet, p->x.pSelect);
@@ -126672,7 +127791,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(
pTab->zName, j);
return;
}
- pColRef = sqlite3PExpr(pParse, TK_COLUMN, 0, 0, 0);
+ pColRef = sqlite3ExprAlloc(pParse->db, TK_COLUMN, 0, 0);
if( pColRef==0 ) return;
pColRef->iTable = pItem->iCursor;
pColRef->iColumn = k++;
@@ -127338,7 +128457,7 @@ static void constructAutomaticIndex(
** transient index on 2nd and subsequent iterations of the loop. */
v = pParse->pVdbe;
assert( v!=0 );
- addrInit = sqlite3CodeOnce(pParse); VdbeCoverage(v);
+ addrInit = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
/* Count the number of columns that will be added to the index
** and used to match WHERE clause constraints */
@@ -127513,7 +128632,8 @@ static sqlite3_index_info *allocateIndexInfo(
WhereClause *pWC,
Bitmask mUnusable, /* Ignore terms with these prereqs */
struct SrcList_item *pSrc,
- ExprList *pOrderBy
+ ExprList *pOrderBy,
+ u16 *pmNoOmit /* Mask of terms not to omit */
){
int i, j;
int nTerm;
@@ -127523,6 +128643,7 @@ static sqlite3_index_info *allocateIndexInfo(
WhereTerm *pTerm;
int nOrderBy;
sqlite3_index_info *pIdxInfo;
+ u16 mNoOmit = 0;
/* Count the number of possible WHERE clause constraints referring
** to this virtual table */
@@ -127611,6 +128732,15 @@ static sqlite3_index_info *allocateIndexInfo(
assert( WO_GE==SQLITE_INDEX_CONSTRAINT_GE );
assert( WO_MATCH==SQLITE_INDEX_CONSTRAINT_MATCH );
assert( pTerm->eOperator & (WO_IN|WO_EQ|WO_LT|WO_LE|WO_GT|WO_GE|WO_MATCH) );
+
+ if( op & (WO_LT|WO_LE|WO_GT|WO_GE)
+ && sqlite3ExprIsVector(pTerm->pExpr->pRight)
+ ){
+ if( i<16 ) mNoOmit |= (1 << i);
+ if( op==WO_LT ) pIdxCons[j].op = WO_LE;
+ if( op==WO_GT ) pIdxCons[j].op = WO_GE;
+ }
+
j++;
}
for(i=0; i<nOrderBy; i++){
@@ -127619,6 +128749,7 @@ static sqlite3_index_info *allocateIndexInfo(
pIdxOrderBy[i].desc = pOrderBy->a[i].sortOrder;
}
+ *pmNoOmit = mNoOmit;
return pIdxInfo;
}
@@ -127894,7 +129025,7 @@ static LogEst whereRangeAdjust(WhereTerm *pTerm, LogEst nNew){
/*
** Return the affinity for a single column of an index.
*/
-static char sqlite3IndexColumnAffinity(sqlite3 *db, Index *pIdx, int iCol){
+SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3 *db, Index *pIdx, int iCol){
assert( iCol>=0 && iCol<pIdx->nColumn );
if( !pIdx->zColAff ){
if( sqlite3IndexAffinityStr(db, pIdx)==0 ) return SQLITE_AFF_BLOB;
@@ -128071,7 +129202,8 @@ static int whereRangeScanEst(
if( nEq==pBuilder->nRecValid ){
UnpackedRecord *pRec = pBuilder->pRec;
tRowcnt a[2];
- u8 aff;
+ int nBtm = pLoop->u.btree.nBtm;
+ int nTop = pLoop->u.btree.nTop;
/* Variable iLower will be set to the estimate of the number of rows in
** the index that are less than the lower bound of the range query. The
@@ -128101,8 +129233,6 @@ static int whereRangeScanEst(
testcase( pRec->nField!=pBuilder->nRecValid );
pRec->nField = pBuilder->nRecValid;
}
- aff = sqlite3IndexColumnAffinity(pParse->db, p, nEq);
- assert( nEq!=p->nKeyCol || aff==SQLITE_AFF_INTEGER );
/* Determine iLower and iUpper using ($P) only. */
if( nEq==0 ){
iLower = 0;
@@ -128121,17 +129251,20 @@ static int whereRangeScanEst(
if( p->aSortOrder[nEq] ){
/* The roles of pLower and pUpper are swapped for a DESC index */
SWAP(WhereTerm*, pLower, pUpper);
+ SWAP(int, nBtm, nTop);
}
/* If possible, improve on the iLower estimate using ($P:$L). */
if( pLower ){
- int bOk; /* True if value is extracted from pExpr */
+ int n; /* Values extracted from pExpr */
Expr *pExpr = pLower->pExpr->pRight;
- rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk);
- if( rc==SQLITE_OK && bOk ){
+ rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, nBtm, nEq, &n);
+ if( rc==SQLITE_OK && n ){
tRowcnt iNew;
+ u16 mask = WO_GT|WO_LE;
+ if( sqlite3ExprVectorSize(pExpr)>n ) mask = (WO_LE|WO_LT);
iLwrIdx = whereKeyStats(pParse, p, pRec, 0, a);
- iNew = a[0] + ((pLower->eOperator & (WO_GT|WO_LE)) ? a[1] : 0);
+ iNew = a[0] + ((pLower->eOperator & mask) ? a[1] : 0);
if( iNew>iLower ) iLower = iNew;
nOut--;
pLower = 0;
@@ -128140,13 +129273,15 @@ static int whereRangeScanEst(
/* If possible, improve on the iUpper estimate using ($P:$U). */
if( pUpper ){
- int bOk; /* True if value is extracted from pExpr */
+ int n; /* Values extracted from pExpr */
Expr *pExpr = pUpper->pExpr->pRight;
- rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq, &bOk);
- if( rc==SQLITE_OK && bOk ){
+ rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, nTop, nEq, &n);
+ if( rc==SQLITE_OK && n ){
tRowcnt iNew;
+ u16 mask = WO_GT|WO_LE;
+ if( sqlite3ExprVectorSize(pExpr)>n ) mask = (WO_LE|WO_LT);
iUprIdx = whereKeyStats(pParse, p, pRec, 1, a);
- iNew = a[0] + ((pUpper->eOperator & (WO_GT|WO_LE)) ? a[1] : 0);
+ iNew = a[0] + ((pUpper->eOperator & mask) ? a[1] : 0);
if( iNew<iUpper ) iUpper = iNew;
nOut--;
pUpper = 0;
@@ -128236,7 +129371,6 @@ static int whereEqualScanEst(
Index *p = pBuilder->pNew->u.btree.pIndex;
int nEq = pBuilder->pNew->u.btree.nEq;
UnpackedRecord *pRec = pBuilder->pRec;
- u8 aff; /* Column affinity */
int rc; /* Subfunction return code */
tRowcnt a[2]; /* Statistics */
int bOk;
@@ -128260,8 +129394,7 @@ static int whereEqualScanEst(
return SQLITE_OK;
}
- aff = sqlite3IndexColumnAffinity(pParse->db, p, nEq-1);
- rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, aff, nEq-1, &bOk);
+ rc = sqlite3Stat4ProbeSetValue(pParse, p, &pRec, pExpr, 1, nEq-1, &bOk);
pBuilder->pRec = pRec;
if( rc!=SQLITE_OK ) return rc;
if( bOk==0 ) return SQLITE_NOTFOUND;
@@ -128350,9 +129483,14 @@ static void whereTermPrint(WhereTerm *pTerm, int iTerm){
sqlite3_snprintf(sizeof(zLeft),zLeft,"left=%d", pTerm->leftCursor);
}
sqlite3DebugPrintf(
- "TERM-%-3d %p %s %-12s prob=%-3d op=0x%03x wtFlags=0x%04x\n",
+ "TERM-%-3d %p %s %-12s prob=%-3d op=0x%03x wtFlags=0x%04x",
iTerm, pTerm, zType, zLeft, pTerm->truthProb,
pTerm->eOperator, pTerm->wtFlags);
+ if( pTerm->iField ){
+ sqlite3DebugPrintf(" iField=%d\n", pTerm->iField);
+ }else{
+ sqlite3DebugPrintf("\n");
+ }
sqlite3TreeViewExpr(0, pTerm->pExpr, 0);
}
}
@@ -128874,6 +130012,72 @@ static void whereLoopOutputAdjust(
if( pLoop->nOut > nRow-iReduce ) pLoop->nOut = nRow - iReduce;
}
+/*
+** Term pTerm is a vector range comparison operation. The first comparison
+** in the vector can be optimized using column nEq of the index. This
+** function returns the total number of vector elements that can be used
+** as part of the range comparison.
+**
+** For example, if the query is:
+**
+** WHERE a = ? AND (b, c, d) > (?, ?, ?)
+**
+** and the index:
+**
+** CREATE INDEX ... ON (a, b, c, d, e)
+**
+** then this function would be invoked with nEq=1. The value returned in
+** this case is 3.
+*/
+static int whereRangeVectorLen(
+ Parse *pParse, /* Parsing context */
+ int iCur, /* Cursor open on pIdx */
+ Index *pIdx, /* The index to be used for a inequality constraint */
+ int nEq, /* Number of prior equality constraints on same index */
+ WhereTerm *pTerm /* The vector inequality constraint */
+){
+ int nCmp = sqlite3ExprVectorSize(pTerm->pExpr->pLeft);
+ int i;
+
+ nCmp = MIN(nCmp, (pIdx->nColumn - nEq));
+ for(i=1; i<nCmp; i++){
+ /* Test if comparison i of pTerm is compatible with column (i+nEq)
+ ** of the index. If not, exit the loop. */
+ char aff; /* Comparison affinity */
+ char idxaff = 0; /* Indexed columns affinity */
+ CollSeq *pColl; /* Comparison collation sequence */
+ Expr *pLhs = pTerm->pExpr->pLeft->x.pList->a[i].pExpr;
+ Expr *pRhs = pTerm->pExpr->pRight;
+ if( pRhs->flags & EP_xIsSelect ){
+ pRhs = pRhs->x.pSelect->pEList->a[i].pExpr;
+ }else{
+ pRhs = pRhs->x.pList->a[i].pExpr;
+ }
+
+ /* Check that the LHS of the comparison is a column reference to
+ ** the right column of the right source table. And that the sort
+ ** order of the index column is the same as the sort order of the
+ ** leftmost index column. */
+ if( pLhs->op!=TK_COLUMN
+ || pLhs->iTable!=iCur
+ || pLhs->iColumn!=pIdx->aiColumn[i+nEq]
+ || pIdx->aSortOrder[i+nEq]!=pIdx->aSortOrder[nEq]
+ ){
+ break;
+ }
+
+ testcase( pLhs->iColumn==XN_ROWID );
+ aff = sqlite3CompareAffinity(pRhs, sqlite3ExprAffinity(pLhs));
+ idxaff = sqlite3TableColumnAffinity(pIdx->pTable, pLhs->iColumn);
+ if( aff!=idxaff ) break;
+
+ pColl = sqlite3BinaryCompareCollSeq(pParse, pLhs, pRhs);
+ if( pColl==0 ) break;
+ if( sqlite3StrICmp(pColl->zName, pIdx->azColl[i+nEq]) ) break;
+ }
+ return i;
+}
+
/*
** Adjust the cost C by the costMult facter T. This only occurs if
** compiled with -DSQLITE_ENABLE_COSTMULT
@@ -128912,6 +130116,8 @@ static int whereLoopAddBtreeIndex(
Bitmask saved_prereq; /* Original value of pNew->prereq */
u16 saved_nLTerm; /* Original value of pNew->nLTerm */
u16 saved_nEq; /* Original value of pNew->u.btree.nEq */
+ u16 saved_nBtm; /* Original value of pNew->u.btree.nBtm */
+ u16 saved_nTop; /* Original value of pNew->u.btree.nTop */
u16 saved_nSkip; /* Original value of pNew->nSkip */
u32 saved_wsFlags; /* Original value of pNew->wsFlags */
LogEst saved_nOut; /* Original value of pNew->nOut */
@@ -128922,12 +130128,15 @@ static int whereLoopAddBtreeIndex(
pNew = pBuilder->pNew;
if( db->mallocFailed ) return SQLITE_NOMEM_BKPT;
+ WHERETRACE(0x800, ("BEGIN addBtreeIdx(%s), nEq=%d\n",
+ pProbe->zName, pNew->u.btree.nEq));
assert( (pNew->wsFlags & WHERE_VIRTUALTABLE)==0 );
assert( (pNew->wsFlags & WHERE_TOP_LIMIT)==0 );
if( pNew->wsFlags & WHERE_BTM_LIMIT ){
opMask = WO_LT|WO_LE;
}else{
+ assert( pNew->u.btree.nBtm==0 );
opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS;
}
if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE);
@@ -128935,6 +130144,8 @@ static int whereLoopAddBtreeIndex(
assert( pNew->u.btree.nEq<pProbe->nColumn );
saved_nEq = pNew->u.btree.nEq;
+ saved_nBtm = pNew->u.btree.nBtm;
+ saved_nTop = pNew->u.btree.nTop;
saved_nSkip = pNew->nSkip;
saved_nLTerm = pNew->nLTerm;
saved_wsFlags = pNew->wsFlags;
@@ -128978,6 +130189,8 @@ static int whereLoopAddBtreeIndex(
pNew->wsFlags = saved_wsFlags;
pNew->u.btree.nEq = saved_nEq;
+ pNew->u.btree.nBtm = saved_nBtm;
+ pNew->u.btree.nTop = saved_nTop;
pNew->nLTerm = saved_nLTerm;
if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */
pNew->aLTerm[pNew->nLTerm++] = pTerm;
@@ -128994,14 +130207,23 @@ static int whereLoopAddBtreeIndex(
pNew->wsFlags |= WHERE_COLUMN_IN;
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
/* "x IN (SELECT ...)": TUNING: the SELECT returns 25 rows */
+ int i;
nIn = 46; assert( 46==sqlite3LogEst(25) );
+
+ /* The expression may actually be of the form (x, y) IN (SELECT...).
+ ** In this case there is a separate term for each of (x) and (y).
+ ** However, the nIn multiplier should only be applied once, not once
+ ** for each such term. The following loop checks that pTerm is the
+ ** first such term in use, and sets nIn back to 0 if it is not. */
+ for(i=0; i<pNew->nLTerm-1; i++){
+ if( pNew->aLTerm[i] && pNew->aLTerm[i]->pExpr==pExpr ) nIn = 0;
+ }
}else if( ALWAYS(pExpr->x.pList && pExpr->x.pList->nExpr) ){
/* "x IN (value, value, ...)" */
nIn = sqlite3LogEst(pExpr->x.pList->nExpr);
+ assert( nIn>0 ); /* RHS always has 2 or more terms... The parser
+ ** changes "x IN (?)" into "x=?". */
}
- assert( nIn>0 ); /* RHS always has 2 or more terms... The parser
- ** changes "x IN (?)" into "x=?". */
-
}else if( eOp & (WO_EQ|WO_IS) ){
int iCol = pProbe->aiColumn[saved_nEq];
pNew->wsFlags |= WHERE_COLUMN_EQ;
@@ -129021,6 +130243,9 @@ static int whereLoopAddBtreeIndex(
testcase( eOp & WO_GT );
testcase( eOp & WO_GE );
pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT;
+ pNew->u.btree.nBtm = whereRangeVectorLen(
+ pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm
+ );
pBtm = pTerm;
pTop = 0;
if( pTerm->wtFlags & TERM_LIKEOPT ){
@@ -129033,12 +130258,16 @@ static int whereLoopAddBtreeIndex(
if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */
pNew->aLTerm[pNew->nLTerm++] = pTop;
pNew->wsFlags |= WHERE_TOP_LIMIT;
+ pNew->u.btree.nTop = 1;
}
}else{
assert( eOp & (WO_LT|WO_LE) );
testcase( eOp & WO_LT );
testcase( eOp & WO_LE );
pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT;
+ pNew->u.btree.nTop = whereRangeVectorLen(
+ pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm
+ );
pTop = pTerm;
pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ?
pNew->aLTerm[pNew->nLTerm-2] : 0;
@@ -129138,6 +130367,8 @@ static int whereLoopAddBtreeIndex(
}
pNew->prereq = saved_prereq;
pNew->u.btree.nEq = saved_nEq;
+ pNew->u.btree.nBtm = saved_nBtm;
+ pNew->u.btree.nTop = saved_nTop;
pNew->nSkip = saved_nSkip;
pNew->wsFlags = saved_wsFlags;
pNew->nOut = saved_nOut;
@@ -129177,6 +130408,8 @@ static int whereLoopAddBtreeIndex(
pNew->wsFlags = saved_wsFlags;
}
+ WHERETRACE(0x800, ("END addBtreeIdx(%s), nEq=%d, rc=%d\n",
+ pProbe->zName, saved_nEq, rc));
return rc;
}
@@ -129259,7 +130492,7 @@ static int whereUsablePartialIndex(int iTab, WhereClause *pWC, Expr *pWhere){
/*
** Add all WhereLoop objects for a single table of the join where the table
-** is idenfied by pBuilder->pNew->iTab. That table is guaranteed to be
+** is identified by pBuilder->pNew->iTab. That table is guaranteed to be
** a b-tree table, not a virtual table.
**
** The costs (WhereLoop.rRun) of the b-tree loops added by this function
@@ -129413,6 +130646,8 @@ static int whereLoopAddBtree(
}
rSize = pProbe->aiRowLogEst[0];
pNew->u.btree.nEq = 0;
+ pNew->u.btree.nBtm = 0;
+ pNew->u.btree.nTop = 0;
pNew->nSkip = 0;
pNew->nLTerm = 0;
pNew->iSortIdx = 0;
@@ -129541,6 +130776,7 @@ static int whereLoopAddVirtualOne(
Bitmask mUsable, /* Mask of usable tables */
u16 mExclude, /* Exclude terms using these operators */
sqlite3_index_info *pIdxInfo, /* Populated object for xBestIndex */
+ u16 mNoOmit, /* Do not omit these constraints */
int *pbIn /* OUT: True if plan uses an IN(...) op */
){
WhereClause *pWC = pBuilder->pWC;
@@ -129629,6 +130865,7 @@ static int whereLoopAddVirtualOne(
}
}
}
+ pNew->u.vtab.omitMask &= ~mNoOmit;
pNew->nLTerm = mxTerm+1;
assert( pNew->nLTerm<=pNew->nLSlot );
@@ -129702,6 +130939,7 @@ static int whereLoopAddVirtual(
int bIn; /* True if plan uses IN(...) operator */
WhereLoop *pNew;
Bitmask mBest; /* Tables used by best possible plan */
+ u16 mNoOmit;
assert( (mPrereq & mUnusable)==0 );
pWInfo = pBuilder->pWInfo;
@@ -129710,7 +130948,8 @@ static int whereLoopAddVirtual(
pNew = pBuilder->pNew;
pSrc = &pWInfo->pTabList->a[pNew->iTab];
assert( IsVirtual(pSrc->pTab) );
- p = allocateIndexInfo(pParse, pWC, mUnusable, pSrc, pBuilder->pOrderBy);
+ p = allocateIndexInfo(pParse, pWC, mUnusable, pSrc, pBuilder->pOrderBy,
+ &mNoOmit);
if( p==0 ) return SQLITE_NOMEM_BKPT;
pNew->rSetup = 0;
pNew->wsFlags = WHERE_VIRTUALTABLE;
@@ -129724,7 +130963,7 @@ static int whereLoopAddVirtual(
/* First call xBestIndex() with all constraints usable. */
WHERETRACE(0x40, (" VirtualOne: all usable\n"));
- rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, 0, p, &bIn);
+ rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn);
/* If the call to xBestIndex() with all terms enabled produced a plan
** that does not require any source tables (IOW: a plan with mBest==0),
@@ -129741,7 +130980,8 @@ static int whereLoopAddVirtual(
** xBestIndex again, this time with IN(...) terms disabled. */
if( bIn ){
WHERETRACE(0x40, (" VirtualOne: all usable w/o IN\n"));
- rc = whereLoopAddVirtualOne(pBuilder, mPrereq, ALLBITS, WO_IN, p, &bIn);
+ rc = whereLoopAddVirtualOne(
+ pBuilder, mPrereq, ALLBITS, WO_IN, p, mNoOmit, &bIn);
assert( bIn==0 );
mBestNoIn = pNew->prereq & ~mPrereq;
if( mBestNoIn==0 ){
@@ -129767,7 +131007,8 @@ static int whereLoopAddVirtual(
if( mNext==mBest || mNext==mBestNoIn ) continue;
WHERETRACE(0x40, (" VirtualOne: mPrev=%04llx mNext=%04llx\n",
(sqlite3_uint64)mPrev, (sqlite3_uint64)mNext));
- rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mNext|mPrereq, 0, p, &bIn);
+ rc = whereLoopAddVirtualOne(
+ pBuilder, mPrereq, mNext|mPrereq, 0, p, mNoOmit, &bIn);
if( pNew->prereq==mPrereq ){
seenZero = 1;
if( bIn==0 ) seenZeroNoIN = 1;
@@ -129779,7 +131020,8 @@ static int whereLoopAddVirtual(
** usable), make a call here with all source tables disabled */
if( rc==SQLITE_OK && seenZero==0 ){
WHERETRACE(0x40, (" VirtualOne: all disabled\n"));
- rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mPrereq, 0, p, &bIn);
+ rc = whereLoopAddVirtualOne(
+ pBuilder, mPrereq, mPrereq, 0, p, mNoOmit, &bIn);
if( bIn==0 ) seenZeroNoIN = 1;
}
@@ -129788,7 +131030,8 @@ static int whereLoopAddVirtual(
** operator, make a final call to obtain one here. */
if( rc==SQLITE_OK && seenZeroNoIN==0 ){
WHERETRACE(0x40, (" VirtualOne: all disabled and w/o IN\n"));
- rc = whereLoopAddVirtualOne(pBuilder, mPrereq, mPrereq, WO_IN, p, &bIn);
+ rc = whereLoopAddVirtualOne(
+ pBuilder, mPrereq, mPrereq, WO_IN, p, mNoOmit, &bIn);
}
}
@@ -130088,6 +131331,14 @@ static i8 wherePathSatisfiesOrderBy(
pTerm = sqlite3WhereFindTerm(&pWInfo->sWC, iCur, pOBExpr->iColumn,
~ready, eqOpMask, 0);
if( pTerm==0 ) continue;
+ if( pTerm->eOperator==WO_IN ){
+ /* IN terms are only valid for sorting in the ORDER BY LIMIT
+ ** optimization, and then only if they are actually used
+ ** by the query plan */
+ assert( wctrlFlags & WHERE_ORDERBY_LIMIT );
+ for(j=0; j<pLoop->nLTerm && pTerm!=pLoop->aLTerm[j]; j++){}
+ if( j>=pLoop->nLTerm ) continue;
+ }
if( (pTerm->eOperator&(WO_EQ|WO_IS))!=0 && pOBExpr->iColumn>=0 ){
const char *z1, *z2;
pColl = sqlite3ExprCollSeq(pWInfo->pParse, pOrderBy->a[i].pExpr);
@@ -130124,20 +131375,42 @@ static i8 wherePathSatisfiesOrderBy(
rev = revSet = 0;
distinctColumns = 0;
for(j=0; j<nColumn; j++){
- u8 bOnce; /* True to run the ORDER BY search loop */
+ u8 bOnce = 1; /* True to run the ORDER BY search loop */
- /* Skip over == and IS and ISNULL terms.
- ** (Also skip IN terms when doing WHERE_ORDERBY_LIMIT processing)
- */
- if( j<pLoop->u.btree.nEq
- && pLoop->nSkip==0
- && ((i = pLoop->aLTerm[j]->eOperator) & eqOpMask)!=0
- ){
- if( i & WO_ISNULL ){
- testcase( isOrderDistinct );
- isOrderDistinct = 0;
+ assert( j>=pLoop->u.btree.nEq
+ || (pLoop->aLTerm[j]==0)==(j<pLoop->nSkip)
+ );
+ if( j<pLoop->u.btree.nEq && j>=pLoop->nSkip ){
+ u16 eOp = pLoop->aLTerm[j]->eOperator;
+
+ /* Skip over == and IS and ISNULL terms. (Also skip IN terms when
+ ** doing WHERE_ORDERBY_LIMIT processing).
+ **
+ ** If the current term is a column of an ((?,?) IN (SELECT...))
+ ** expression for which the SELECT returns more than one column,
+ ** check that it is the only column used by this loop. Otherwise,
+ ** if it is one of two or more, none of the columns can be
+ ** considered to match an ORDER BY term. */
+ if( (eOp & eqOpMask)!=0 ){
+ if( eOp & WO_ISNULL ){
+ testcase( isOrderDistinct );
+ isOrderDistinct = 0;
+ }
+ continue;
+ }else if( ALWAYS(eOp & WO_IN) ){
+ /* ALWAYS() justification: eOp is an equality operator due to the
+ ** j<pLoop->u.btree.nEq constraint above. Any equality other
+ ** than WO_IN is captured by the previous "if". So this one
+ ** always has to be WO_IN. */
+ Expr *pX = pLoop->aLTerm[j]->pExpr;
+ for(i=j+1; i<pLoop->u.btree.nEq; i++){
+ if( pLoop->aLTerm[i]->pExpr==pX ){
+ assert( (pLoop->aLTerm[i]->eOperator & WO_IN) );
+ bOnce = 0;
+ break;
+ }
+ }
}
- continue;
}
/* Get the column number in the table (iColumn) and sort order
@@ -130166,7 +131439,6 @@ static i8 wherePathSatisfiesOrderBy(
/* Find the ORDER BY term that corresponds to the j-th column
** of the index and mark that ORDER BY term off
*/
- bOnce = 1;
isMatch = 0;
for(i=0; bOnce && i<nOrderBy; i++){
if( MASKBIT(i) & obSat ) continue;
@@ -130203,7 +131475,7 @@ static i8 wherePathSatisfiesOrderBy(
}
}
if( isMatch ){
- if( iColumn<0 ){
+ if( iColumn==XN_ROWID ){
testcase( distinctColumns==0 );
distinctColumns = 1;
}
@@ -130658,13 +131930,20 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
pWInfo->revMask = pFrom->revLoop;
if( pWInfo->nOBSat<=0 ){
pWInfo->nOBSat = 0;
- if( nLoop>0 && (pFrom->aLoop[nLoop-1]->wsFlags & WHERE_ONEROW)==0 ){
- Bitmask m = 0;
- int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, pFrom,
+ if( nLoop>0 ){
+ u32 wsFlags = pFrom->aLoop[nLoop-1]->wsFlags;
+ if( (wsFlags & WHERE_ONEROW)==0
+ && (wsFlags&(WHERE_IPK|WHERE_COLUMN_IN))!=(WHERE_IPK|WHERE_COLUMN_IN)
+ ){
+ Bitmask m = 0;
+ int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, pFrom,
WHERE_ORDERBY_LIMIT, nLoop-1, pFrom->aLoop[nLoop-1], &m);
- if( rc==pWInfo->pOrderBy->nExpr ){
- pWInfo->bOrderedInnerLoop = 1;
- pWInfo->revMask = m;
+ testcase( wsFlags & WHERE_IPK );
+ testcase( wsFlags & WHERE_COLUMN_IN );
+ if( rc==pWInfo->pOrderBy->nExpr ){
+ pWInfo->bOrderedInnerLoop = 1;
+ pWInfo->revMask = m;
+ }
}
}
}
@@ -130941,22 +132220,25 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
** some architectures. Hence the ROUND8() below.
*/
nByteWInfo = ROUND8(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel));
- pWInfo = sqlite3DbMallocZero(db, nByteWInfo + sizeof(WhereLoop));
+ pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop));
if( db->mallocFailed ){
sqlite3DbFree(db, pWInfo);
pWInfo = 0;
goto whereBeginError;
}
- pWInfo->aiCurOnePass[0] = pWInfo->aiCurOnePass[1] = -1;
- pWInfo->nLevel = nTabList;
pWInfo->pParse = pParse;
pWInfo->pTabList = pTabList;
pWInfo->pOrderBy = pOrderBy;
pWInfo->pDistinctSet = pDistinctSet;
+ pWInfo->aiCurOnePass[0] = pWInfo->aiCurOnePass[1] = -1;
+ pWInfo->nLevel = nTabList;
pWInfo->iBreak = pWInfo->iContinue = sqlite3VdbeMakeLabel(v);
pWInfo->wctrlFlags = wctrlFlags;
pWInfo->iLimit = iAuxArg;
pWInfo->savedNQueryLoop = pParse->nQueryLoop;
+ memset(&pWInfo->nOBSat, 0,
+ offsetof(WhereInfo,sWC) - offsetof(WhereInfo,nOBSat));
+ memset(&pWInfo->a[0], 0, sizeof(WhereLoop)+nTabList*sizeof(WhereLevel));
assert( pWInfo->eOnePass==ONEPASS_OFF ); /* ONEPASS defaults to OFF */
pMaskSet = &pWInfo->sMaskSet;
sWLB.pWInfo = pWInfo;
@@ -131360,10 +132642,12 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
sqlite3VdbeResolveLabel(v, pLevel->addrNxt);
for(j=pLevel->u.in.nIn, pIn=&pLevel->u.in.aInLoop[j-1]; j>0; j--, pIn--){
sqlite3VdbeJumpHere(v, pIn->addrInTop+1);
- sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop);
- VdbeCoverage(v);
- VdbeCoverageIf(v, pIn->eEndLoopOp==OP_PrevIfOpen);
- VdbeCoverageIf(v, pIn->eEndLoopOp==OP_NextIfOpen);
+ if( pIn->eEndLoopOp!=OP_Noop ){
+ sqlite3VdbeAddOp2(v, pIn->eEndLoopOp, pIn->iCur, pIn->addrInTop);
+ VdbeCoverage(v);
+ VdbeCoverageIf(v, pIn->eEndLoopOp==OP_PrevIfOpen);
+ VdbeCoverageIf(v, pIn->eEndLoopOp==OP_NextIfOpen);
+ }
sqlite3VdbeJumpHere(v, pIn->addrInTop-1);
}
}
@@ -131382,13 +132666,15 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
}
#endif
if( pLevel->iLeftJoin ){
+ int ws = pLoop->wsFlags;
addr = sqlite3VdbeAddOp1(v, OP_IfPos, pLevel->iLeftJoin); VdbeCoverage(v);
- assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0
- || (pLoop->wsFlags & WHERE_INDEXED)!=0 );
- if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 ){
+ assert( (ws & WHERE_IDX_ONLY)==0 || (ws & WHERE_INDEXED)!=0 );
+ if( (ws & WHERE_IDX_ONLY)==0 ){
sqlite3VdbeAddOp1(v, OP_NullRow, pTabList->a[i].iCursor);
}
- if( pLoop->wsFlags & WHERE_INDEXED ){
+ if( (ws & WHERE_INDEXED)
+ || ((ws & WHERE_MULTI_OR) && pLevel->u.pCovidx)
+ ){
sqlite3VdbeAddOp1(v, OP_NullRow, pLevel->iIdxCur);
}
if( pLevel->op==OP_Return ){
@@ -131566,15 +132852,6 @@ struct LimitVal {
};
/*
-** An instance of this structure is used to store the LIKE,
-** GLOB, NOT LIKE, and NOT GLOB operators.
-*/
-struct LikeOp {
- Token eOperator; /* "like" or "glob" or "regexp" */
- int bNot; /* True if the NOT keyword is present */
-};
-
-/*
** An instance of the following structure describes the event of a
** TRIGGER. "a" is the event type, one of TK_UPDATE, TK_INSERT,
** TK_DELETE, or TK_INSTEAD. If the event is of the form
@@ -131586,11 +132863,6 @@ struct LikeOp {
struct TrigEvent { int a; IdList * b; };
/*
-** An instance of this structure holds the ATTACH key and the key type.
-*/
-struct AttachKey { int type; Token key; };
-
-/*
** Disable lookaside memory allocation for objects that might be
** shared across database connections.
*/
@@ -131636,7 +132908,24 @@ static void disableLookaside(Parse *pParse){
** that created the expression.
*/
static void spanExpr(ExprSpan *pOut, Parse *pParse, int op, Token t){
- pOut->pExpr = sqlite3PExpr(pParse, op, 0, 0, &t);
+ Expr *p = sqlite3DbMallocRawNN(pParse->db, sizeof(Expr)+t.n+1);
+ if( p ){
+ memset(p, 0, sizeof(Expr));
+ p->op = (u8)op;
+ p->flags = EP_Leaf;
+ p->iAgg = -1;
+ p->u.zToken = (char*)&p[1];
+ memcpy(p->u.zToken, t.z, t.n);
+ p->u.zToken[t.n] = 0;
+ if( sqlite3Isquote(p->u.zToken[0]) ){
+ if( p->u.zToken[0]=='"' ) p->flags |= EP_DblQuoted;
+ sqlite3Dequote(p->u.zToken);
+ }
+#if SQLITE_MAX_EXPR_DEPTH>0
+ p->nHeight = 1;
+#endif
+ }
+ pOut->pExpr = p;
pOut->zStart = t.z;
pOut->zEnd = &t.z[t.n];
}
@@ -131799,7 +133088,6 @@ typedef union {
With* yy285;
struct TrigEvent yy332;
struct LimitVal yy354;
- struct LikeOp yy392;
struct {int value; int mask;} yy497;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
@@ -131810,16 +133098,16 @@ typedef union {
#define sqlite3ParserARG_FETCH Parse *pParse = yypParser->pParse
#define sqlite3ParserARG_STORE yypParser->pParse = pParse
#define YYFALLBACK 1
-#define YYNSTATE 443
-#define YYNRULE 328
-#define YY_MAX_SHIFT 442
-#define YY_MIN_SHIFTREDUCE 653
-#define YY_MAX_SHIFTREDUCE 980
-#define YY_MIN_REDUCE 981
-#define YY_MAX_REDUCE 1308
-#define YY_ERROR_ACTION 1309
-#define YY_ACCEPT_ACTION 1310
-#define YY_NO_ACTION 1311
+#define YYNSTATE 456
+#define YYNRULE 332
+#define YY_MAX_SHIFT 455
+#define YY_MIN_SHIFTREDUCE 668
+#define YY_MAX_SHIFTREDUCE 999
+#define YY_MIN_REDUCE 1000
+#define YY_MAX_REDUCE 1331
+#define YY_ERROR_ACTION 1332
+#define YY_ACCEPT_ACTION 1333
+#define YY_NO_ACTION 1334
/************* End control #defines *******************************************/
/* Define the yytestcase() macro to be a no-op if is not already defined
@@ -131851,7 +133139,7 @@ typedef union {
**
** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE
** and YY_MAX_REDUCE
-
+**
** N == YY_ERROR_ACTION A syntax error has occurred.
**
** N == YY_ACCEPT_ACTION The parser accepts its input.
@@ -131860,16 +133148,20 @@ typedef union {
** slots in the yy_action[] table.
**
** The action table is constructed as a single large table named yy_action[].
-** Given state S and lookahead X, the action is computed as
+** Given state S and lookahead X, the action is computed as either:
**
-** yy_action[ yy_shift_ofst[S] + X ]
+** (A) N = yy_action[ yy_shift_ofst[S] + X ]
+** (B) N = yy_default[S]
**
-** If the index value yy_shift_ofst[S]+X is out of range or if the value
-** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X or if yy_shift_ofst[S]
-** is equal to YY_SHIFT_USE_DFLT, it means that the action is not in the table
-** and that yy_default[S] should be used instead.
+** The (A) formula is preferred. The B formula is used instead if:
+** (1) The yy_shift_ofst[S]+X value is out of range, or
+** (2) yy_lookahead[yy_shift_ofst[S]+X] is not equal to X, or
+** (3) yy_shift_ofst[S] equal YY_SHIFT_USE_DFLT.
+** (Implementation note: YY_SHIFT_USE_DFLT is chosen so that
+** YY_SHIFT_USE_DFLT+X will be out of range for all possible lookaheads X.
+** Hence only tests (1) and (2) need to be evaluated.)
**
-** The formula above is for computing the action when the lookahead is
+** The formulas above are for computing the action when the lookahead is
** a terminal symbol. If the lookahead is a non-terminal (as occurs after
** a reduce action) then the yy_reduce_ofst[] array is used in place of
** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of
@@ -131887,159 +133179,165 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (1507)
+#define YY_ACTTAB_COUNT (1567)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 317, 814, 341, 808, 5, 195, 195, 802, 93, 94,
- /* 10 */ 84, 823, 823, 835, 838, 827, 827, 91, 91, 92,
- /* 20 */ 92, 92, 92, 293, 90, 90, 90, 90, 89, 89,
- /* 30 */ 88, 88, 88, 87, 341, 317, 958, 958, 807, 807,
- /* 40 */ 807, 928, 344, 93, 94, 84, 823, 823, 835, 838,
- /* 50 */ 827, 827, 91, 91, 92, 92, 92, 92, 328, 90,
- /* 60 */ 90, 90, 90, 89, 89, 88, 88, 88, 87, 341,
- /* 70 */ 89, 89, 88, 88, 88, 87, 341, 776, 958, 958,
- /* 80 */ 317, 88, 88, 88, 87, 341, 777, 69, 93, 94,
- /* 90 */ 84, 823, 823, 835, 838, 827, 827, 91, 91, 92,
- /* 100 */ 92, 92, 92, 437, 90, 90, 90, 90, 89, 89,
- /* 110 */ 88, 88, 88, 87, 341, 1310, 147, 147, 2, 317,
- /* 120 */ 76, 25, 74, 49, 49, 87, 341, 93, 94, 84,
- /* 130 */ 823, 823, 835, 838, 827, 827, 91, 91, 92, 92,
- /* 140 */ 92, 92, 95, 90, 90, 90, 90, 89, 89, 88,
- /* 150 */ 88, 88, 87, 341, 939, 939, 317, 260, 415, 400,
- /* 160 */ 398, 58, 737, 737, 93, 94, 84, 823, 823, 835,
- /* 170 */ 838, 827, 827, 91, 91, 92, 92, 92, 92, 57,
- /* 180 */ 90, 90, 90, 90, 89, 89, 88, 88, 88, 87,
- /* 190 */ 341, 317, 1253, 928, 344, 269, 940, 941, 242, 93,
- /* 200 */ 94, 84, 823, 823, 835, 838, 827, 827, 91, 91,
- /* 210 */ 92, 92, 92, 92, 293, 90, 90, 90, 90, 89,
- /* 220 */ 89, 88, 88, 88, 87, 341, 317, 919, 1303, 793,
- /* 230 */ 691, 1303, 724, 724, 93, 94, 84, 823, 823, 835,
- /* 240 */ 838, 827, 827, 91, 91, 92, 92, 92, 92, 337,
- /* 250 */ 90, 90, 90, 90, 89, 89, 88, 88, 88, 87,
- /* 260 */ 341, 317, 114, 919, 1304, 684, 395, 1304, 124, 93,
- /* 270 */ 94, 84, 823, 823, 835, 838, 827, 827, 91, 91,
- /* 280 */ 92, 92, 92, 92, 683, 90, 90, 90, 90, 89,
- /* 290 */ 89, 88, 88, 88, 87, 341, 317, 86, 83, 169,
- /* 300 */ 801, 917, 234, 399, 93, 94, 84, 823, 823, 835,
- /* 310 */ 838, 827, 827, 91, 91, 92, 92, 92, 92, 686,
- /* 320 */ 90, 90, 90, 90, 89, 89, 88, 88, 88, 87,
- /* 330 */ 341, 317, 436, 742, 86, 83, 169, 917, 741, 93,
- /* 340 */ 94, 84, 823, 823, 835, 838, 827, 827, 91, 91,
- /* 350 */ 92, 92, 92, 92, 902, 90, 90, 90, 90, 89,
- /* 360 */ 89, 88, 88, 88, 87, 341, 317, 321, 434, 434,
- /* 370 */ 434, 1, 722, 722, 93, 94, 84, 823, 823, 835,
- /* 380 */ 838, 827, 827, 91, 91, 92, 92, 92, 92, 190,
- /* 390 */ 90, 90, 90, 90, 89, 89, 88, 88, 88, 87,
- /* 400 */ 341, 317, 685, 292, 939, 939, 150, 977, 310, 93,
- /* 410 */ 94, 84, 823, 823, 835, 838, 827, 827, 91, 91,
- /* 420 */ 92, 92, 92, 92, 437, 90, 90, 90, 90, 89,
- /* 430 */ 89, 88, 88, 88, 87, 341, 926, 2, 372, 719,
- /* 440 */ 698, 369, 950, 317, 49, 49, 940, 941, 719, 177,
- /* 450 */ 72, 93, 94, 84, 823, 823, 835, 838, 827, 827,
- /* 460 */ 91, 91, 92, 92, 92, 92, 322, 90, 90, 90,
- /* 470 */ 90, 89, 89, 88, 88, 88, 87, 341, 317, 415,
- /* 480 */ 405, 824, 824, 836, 839, 75, 93, 82, 84, 823,
- /* 490 */ 823, 835, 838, 827, 827, 91, 91, 92, 92, 92,
- /* 500 */ 92, 430, 90, 90, 90, 90, 89, 89, 88, 88,
- /* 510 */ 88, 87, 341, 317, 340, 340, 340, 658, 659, 660,
- /* 520 */ 333, 288, 94, 84, 823, 823, 835, 838, 827, 827,
- /* 530 */ 91, 91, 92, 92, 92, 92, 437, 90, 90, 90,
- /* 540 */ 90, 89, 89, 88, 88, 88, 87, 341, 317, 882,
- /* 550 */ 882, 375, 828, 66, 330, 409, 49, 49, 84, 823,
- /* 560 */ 823, 835, 838, 827, 827, 91, 91, 92, 92, 92,
- /* 570 */ 92, 351, 90, 90, 90, 90, 89, 89, 88, 88,
- /* 580 */ 88, 87, 341, 80, 432, 742, 3, 1180, 351, 350,
- /* 590 */ 741, 334, 796, 939, 939, 761, 80, 432, 278, 3,
- /* 600 */ 204, 161, 279, 393, 274, 392, 191, 362, 437, 277,
- /* 610 */ 745, 77, 78, 272, 800, 254, 355, 243, 79, 342,
- /* 620 */ 342, 86, 83, 169, 77, 78, 234, 399, 49, 49,
- /* 630 */ 435, 79, 342, 342, 437, 940, 941, 186, 442, 655,
- /* 640 */ 390, 387, 386, 435, 235, 213, 108, 421, 761, 351,
- /* 650 */ 437, 385, 167, 732, 10, 10, 124, 124, 671, 814,
- /* 660 */ 421, 439, 438, 415, 414, 802, 362, 168, 327, 124,
- /* 670 */ 49, 49, 814, 219, 439, 438, 800, 186, 802, 326,
- /* 680 */ 390, 387, 386, 437, 1248, 1248, 23, 939, 939, 80,
- /* 690 */ 432, 385, 3, 761, 416, 876, 807, 807, 807, 809,
- /* 700 */ 19, 290, 149, 49, 49, 415, 396, 260, 910, 807,
- /* 710 */ 807, 807, 809, 19, 312, 237, 145, 77, 78, 746,
- /* 720 */ 168, 702, 437, 149, 79, 342, 342, 114, 358, 940,
- /* 730 */ 941, 302, 223, 397, 345, 313, 435, 260, 415, 417,
- /* 740 */ 858, 374, 31, 31, 80, 432, 761, 3, 348, 92,
- /* 750 */ 92, 92, 92, 421, 90, 90, 90, 90, 89, 89,
- /* 760 */ 88, 88, 88, 87, 341, 814, 114, 439, 438, 796,
- /* 770 */ 367, 802, 77, 78, 701, 796, 124, 1187, 220, 79,
- /* 780 */ 342, 342, 124, 747, 734, 939, 939, 775, 404, 939,
- /* 790 */ 939, 435, 254, 360, 253, 402, 895, 346, 254, 360,
- /* 800 */ 253, 774, 807, 807, 807, 809, 19, 800, 421, 90,
- /* 810 */ 90, 90, 90, 89, 89, 88, 88, 88, 87, 341,
- /* 820 */ 814, 114, 439, 438, 939, 939, 802, 940, 941, 114,
- /* 830 */ 437, 940, 941, 86, 83, 169, 192, 166, 309, 979,
- /* 840 */ 70, 432, 700, 3, 382, 870, 238, 86, 83, 169,
- /* 850 */ 10, 10, 361, 406, 763, 190, 222, 807, 807, 807,
- /* 860 */ 809, 19, 870, 872, 329, 24, 940, 941, 77, 78,
- /* 870 */ 359, 437, 335, 260, 218, 79, 342, 342, 437, 307,
- /* 880 */ 306, 305, 207, 303, 339, 338, 668, 435, 339, 338,
- /* 890 */ 407, 10, 10, 762, 216, 216, 939, 939, 49, 49,
- /* 900 */ 437, 260, 97, 241, 421, 225, 402, 189, 188, 187,
- /* 910 */ 309, 918, 980, 149, 221, 898, 814, 868, 439, 438,
- /* 920 */ 10, 10, 802, 870, 915, 316, 898, 163, 162, 171,
- /* 930 */ 249, 240, 322, 410, 412, 687, 687, 272, 940, 941,
- /* 940 */ 239, 965, 901, 437, 226, 403, 226, 437, 963, 367,
- /* 950 */ 964, 173, 248, 807, 807, 807, 809, 19, 174, 367,
- /* 960 */ 899, 124, 172, 48, 48, 9, 9, 35, 35, 966,
- /* 970 */ 966, 899, 363, 966, 966, 814, 900, 808, 725, 939,
- /* 980 */ 939, 802, 895, 318, 980, 324, 125, 900, 726, 420,
- /* 990 */ 92, 92, 92, 92, 85, 90, 90, 90, 90, 89,
- /* 1000 */ 89, 88, 88, 88, 87, 341, 216, 216, 437, 946,
- /* 1010 */ 349, 292, 807, 807, 807, 114, 291, 693, 402, 705,
- /* 1020 */ 890, 940, 941, 437, 245, 889, 247, 437, 36, 36,
- /* 1030 */ 437, 353, 391, 437, 260, 252, 260, 437, 361, 437,
- /* 1040 */ 706, 437, 370, 12, 12, 224, 437, 27, 27, 437,
- /* 1050 */ 37, 37, 437, 38, 38, 752, 368, 39, 39, 28,
- /* 1060 */ 28, 29, 29, 215, 166, 331, 40, 40, 437, 41,
- /* 1070 */ 41, 437, 42, 42, 437, 866, 246, 731, 437, 879,
- /* 1080 */ 437, 256, 437, 878, 437, 267, 437, 261, 11, 11,
- /* 1090 */ 437, 43, 43, 437, 99, 99, 437, 373, 44, 44,
- /* 1100 */ 45, 45, 32, 32, 46, 46, 47, 47, 437, 426,
- /* 1110 */ 33, 33, 776, 116, 116, 437, 117, 117, 437, 124,
- /* 1120 */ 437, 777, 437, 260, 437, 957, 437, 352, 118, 118,
- /* 1130 */ 437, 195, 437, 111, 437, 53, 53, 264, 34, 34,
- /* 1140 */ 100, 100, 50, 50, 101, 101, 102, 102, 437, 260,
- /* 1150 */ 98, 98, 115, 115, 113, 113, 437, 262, 437, 265,
- /* 1160 */ 437, 943, 958, 437, 727, 437, 681, 437, 106, 106,
- /* 1170 */ 68, 437, 893, 730, 437, 365, 105, 105, 103, 103,
- /* 1180 */ 104, 104, 217, 52, 52, 54, 54, 51, 51, 694,
- /* 1190 */ 259, 26, 26, 266, 30, 30, 677, 323, 433, 323,
- /* 1200 */ 674, 423, 427, 943, 958, 114, 114, 431, 681, 865,
- /* 1210 */ 1277, 233, 366, 714, 112, 20, 154, 704, 703, 810,
- /* 1220 */ 914, 55, 159, 311, 798, 255, 383, 194, 68, 200,
- /* 1230 */ 21, 694, 268, 114, 114, 114, 270, 711, 712, 68,
- /* 1240 */ 114, 739, 770, 715, 71, 194, 861, 875, 875, 200,
- /* 1250 */ 696, 865, 874, 874, 679, 699, 273, 110, 229, 419,
- /* 1260 */ 768, 810, 799, 378, 748, 759, 418, 210, 294, 281,
- /* 1270 */ 295, 806, 283, 682, 676, 665, 664, 666, 933, 151,
- /* 1280 */ 285, 7, 1267, 308, 251, 790, 354, 244, 892, 364,
- /* 1290 */ 287, 422, 300, 164, 160, 936, 974, 127, 197, 137,
- /* 1300 */ 909, 907, 971, 388, 276, 863, 862, 56, 698, 325,
- /* 1310 */ 148, 59, 122, 66, 356, 381, 357, 176, 152, 62,
- /* 1320 */ 371, 130, 877, 181, 377, 760, 211, 182, 132, 133,
- /* 1330 */ 134, 135, 258, 146, 140, 795, 787, 263, 183, 379,
- /* 1340 */ 667, 394, 184, 332, 894, 314, 718, 717, 857, 716,
- /* 1350 */ 696, 315, 709, 690, 65, 196, 6, 408, 289, 708,
- /* 1360 */ 275, 689, 688, 948, 756, 757, 280, 282, 425, 755,
- /* 1370 */ 284, 336, 73, 67, 754, 429, 411, 96, 286, 413,
- /* 1380 */ 205, 934, 673, 22, 209, 440, 119, 120, 109, 206,
- /* 1390 */ 208, 441, 662, 661, 656, 843, 654, 343, 158, 236,
- /* 1400 */ 170, 347, 107, 227, 121, 738, 873, 298, 296, 297,
- /* 1410 */ 299, 871, 794, 128, 129, 728, 230, 131, 175, 250,
- /* 1420 */ 888, 136, 138, 231, 232, 139, 60, 61, 891, 178,
- /* 1430 */ 179, 887, 8, 13, 180, 257, 880, 968, 194, 141,
- /* 1440 */ 142, 376, 153, 670, 380, 185, 143, 277, 63, 384,
- /* 1450 */ 14, 707, 271, 15, 389, 64, 319, 320, 126, 228,
- /* 1460 */ 813, 812, 841, 736, 123, 16, 401, 740, 4, 769,
- /* 1470 */ 165, 212, 214, 193, 144, 764, 71, 68, 17, 18,
- /* 1480 */ 856, 842, 840, 897, 845, 896, 199, 198, 923, 155,
- /* 1490 */ 424, 929, 924, 156, 201, 202, 428, 844, 157, 203,
- /* 1500 */ 811, 680, 81, 1269, 1268, 301, 304,
+ /* 0 */ 325, 832, 351, 825, 5, 203, 203, 819, 99, 100,
+ /* 10 */ 90, 842, 842, 854, 857, 846, 846, 97, 97, 98,
+ /* 20 */ 98, 98, 98, 301, 96, 96, 96, 96, 95, 95,
+ /* 30 */ 94, 94, 94, 93, 351, 325, 977, 977, 824, 824,
+ /* 40 */ 826, 947, 354, 99, 100, 90, 842, 842, 854, 857,
+ /* 50 */ 846, 846, 97, 97, 98, 98, 98, 98, 338, 96,
+ /* 60 */ 96, 96, 96, 95, 95, 94, 94, 94, 93, 351,
+ /* 70 */ 95, 95, 94, 94, 94, 93, 351, 791, 977, 977,
+ /* 80 */ 325, 94, 94, 94, 93, 351, 792, 75, 99, 100,
+ /* 90 */ 90, 842, 842, 854, 857, 846, 846, 97, 97, 98,
+ /* 100 */ 98, 98, 98, 450, 96, 96, 96, 96, 95, 95,
+ /* 110 */ 94, 94, 94, 93, 351, 1333, 155, 155, 2, 325,
+ /* 120 */ 275, 146, 132, 52, 52, 93, 351, 99, 100, 90,
+ /* 130 */ 842, 842, 854, 857, 846, 846, 97, 97, 98, 98,
+ /* 140 */ 98, 98, 101, 96, 96, 96, 96, 95, 95, 94,
+ /* 150 */ 94, 94, 93, 351, 958, 958, 325, 268, 428, 413,
+ /* 160 */ 411, 61, 752, 752, 99, 100, 90, 842, 842, 854,
+ /* 170 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 60,
+ /* 180 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93,
+ /* 190 */ 351, 325, 270, 329, 273, 277, 959, 960, 250, 99,
+ /* 200 */ 100, 90, 842, 842, 854, 857, 846, 846, 97, 97,
+ /* 210 */ 98, 98, 98, 98, 301, 96, 96, 96, 96, 95,
+ /* 220 */ 95, 94, 94, 94, 93, 351, 325, 938, 1326, 698,
+ /* 230 */ 706, 1326, 242, 412, 99, 100, 90, 842, 842, 854,
+ /* 240 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 347,
+ /* 250 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93,
+ /* 260 */ 351, 325, 938, 1327, 384, 699, 1327, 381, 379, 99,
+ /* 270 */ 100, 90, 842, 842, 854, 857, 846, 846, 97, 97,
+ /* 280 */ 98, 98, 98, 98, 701, 96, 96, 96, 96, 95,
+ /* 290 */ 95, 94, 94, 94, 93, 351, 325, 92, 89, 178,
+ /* 300 */ 833, 936, 373, 700, 99, 100, 90, 842, 842, 854,
+ /* 310 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 375,
+ /* 320 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93,
+ /* 330 */ 351, 325, 1276, 947, 354, 818, 936, 739, 739, 99,
+ /* 340 */ 100, 90, 842, 842, 854, 857, 846, 846, 97, 97,
+ /* 350 */ 98, 98, 98, 98, 230, 96, 96, 96, 96, 95,
+ /* 360 */ 95, 94, 94, 94, 93, 351, 325, 969, 227, 92,
+ /* 370 */ 89, 178, 373, 300, 99, 100, 90, 842, 842, 854,
+ /* 380 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 921,
+ /* 390 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93,
+ /* 400 */ 351, 325, 449, 447, 447, 447, 147, 737, 737, 99,
+ /* 410 */ 100, 90, 842, 842, 854, 857, 846, 846, 97, 97,
+ /* 420 */ 98, 98, 98, 98, 296, 96, 96, 96, 96, 95,
+ /* 430 */ 95, 94, 94, 94, 93, 351, 325, 419, 231, 958,
+ /* 440 */ 958, 158, 25, 422, 99, 100, 90, 842, 842, 854,
+ /* 450 */ 857, 846, 846, 97, 97, 98, 98, 98, 98, 450,
+ /* 460 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93,
+ /* 470 */ 351, 443, 224, 224, 420, 958, 958, 962, 325, 52,
+ /* 480 */ 52, 959, 960, 176, 415, 78, 99, 100, 90, 842,
+ /* 490 */ 842, 854, 857, 846, 846, 97, 97, 98, 98, 98,
+ /* 500 */ 98, 379, 96, 96, 96, 96, 95, 95, 94, 94,
+ /* 510 */ 94, 93, 351, 325, 428, 418, 298, 959, 960, 962,
+ /* 520 */ 81, 99, 88, 90, 842, 842, 854, 857, 846, 846,
+ /* 530 */ 97, 97, 98, 98, 98, 98, 717, 96, 96, 96,
+ /* 540 */ 96, 95, 95, 94, 94, 94, 93, 351, 325, 843,
+ /* 550 */ 843, 855, 858, 996, 318, 343, 379, 100, 90, 842,
+ /* 560 */ 842, 854, 857, 846, 846, 97, 97, 98, 98, 98,
+ /* 570 */ 98, 450, 96, 96, 96, 96, 95, 95, 94, 94,
+ /* 580 */ 94, 93, 351, 325, 350, 350, 350, 260, 377, 340,
+ /* 590 */ 929, 52, 52, 90, 842, 842, 854, 857, 846, 846,
+ /* 600 */ 97, 97, 98, 98, 98, 98, 361, 96, 96, 96,
+ /* 610 */ 96, 95, 95, 94, 94, 94, 93, 351, 86, 445,
+ /* 620 */ 847, 3, 1203, 361, 360, 378, 344, 813, 958, 958,
+ /* 630 */ 1300, 86, 445, 729, 3, 212, 169, 287, 405, 282,
+ /* 640 */ 404, 199, 232, 450, 300, 760, 83, 84, 280, 245,
+ /* 650 */ 262, 365, 251, 85, 352, 352, 92, 89, 178, 83,
+ /* 660 */ 84, 242, 412, 52, 52, 448, 85, 352, 352, 246,
+ /* 670 */ 959, 960, 194, 455, 670, 402, 399, 398, 448, 243,
+ /* 680 */ 221, 114, 434, 776, 361, 450, 397, 268, 747, 224,
+ /* 690 */ 224, 132, 132, 198, 832, 434, 452, 451, 428, 427,
+ /* 700 */ 819, 415, 734, 713, 132, 52, 52, 832, 268, 452,
+ /* 710 */ 451, 734, 194, 819, 363, 402, 399, 398, 450, 1271,
+ /* 720 */ 1271, 23, 958, 958, 86, 445, 397, 3, 228, 429,
+ /* 730 */ 895, 824, 824, 826, 827, 19, 203, 720, 52, 52,
+ /* 740 */ 428, 408, 439, 249, 824, 824, 826, 827, 19, 229,
+ /* 750 */ 403, 153, 83, 84, 761, 177, 241, 450, 721, 85,
+ /* 760 */ 352, 352, 120, 157, 959, 960, 58, 977, 409, 355,
+ /* 770 */ 330, 448, 268, 428, 430, 320, 790, 32, 32, 86,
+ /* 780 */ 445, 776, 3, 341, 98, 98, 98, 98, 434, 96,
+ /* 790 */ 96, 96, 96, 95, 95, 94, 94, 94, 93, 351,
+ /* 800 */ 832, 120, 452, 451, 813, 887, 819, 83, 84, 977,
+ /* 810 */ 813, 132, 410, 920, 85, 352, 352, 132, 407, 789,
+ /* 820 */ 958, 958, 92, 89, 178, 917, 448, 262, 370, 261,
+ /* 830 */ 82, 914, 80, 262, 370, 261, 776, 824, 824, 826,
+ /* 840 */ 827, 19, 934, 434, 96, 96, 96, 96, 95, 95,
+ /* 850 */ 94, 94, 94, 93, 351, 832, 74, 452, 451, 958,
+ /* 860 */ 958, 819, 959, 960, 120, 92, 89, 178, 945, 2,
+ /* 870 */ 918, 965, 268, 1, 976, 76, 445, 762, 3, 708,
+ /* 880 */ 901, 901, 387, 958, 958, 757, 919, 371, 740, 778,
+ /* 890 */ 756, 257, 824, 824, 826, 827, 19, 417, 741, 450,
+ /* 900 */ 24, 959, 960, 83, 84, 369, 958, 958, 177, 226,
+ /* 910 */ 85, 352, 352, 885, 315, 314, 313, 215, 311, 10,
+ /* 920 */ 10, 683, 448, 349, 348, 959, 960, 909, 777, 157,
+ /* 930 */ 120, 958, 958, 337, 776, 416, 711, 310, 450, 434,
+ /* 940 */ 450, 321, 450, 791, 103, 200, 175, 450, 959, 960,
+ /* 950 */ 908, 832, 792, 452, 451, 9, 9, 819, 10, 10,
+ /* 960 */ 52, 52, 51, 51, 180, 716, 248, 10, 10, 171,
+ /* 970 */ 170, 167, 339, 959, 960, 247, 984, 702, 702, 450,
+ /* 980 */ 715, 233, 686, 982, 889, 983, 182, 914, 824, 824,
+ /* 990 */ 826, 827, 19, 183, 256, 423, 132, 181, 394, 10,
+ /* 1000 */ 10, 889, 891, 749, 958, 958, 917, 268, 985, 198,
+ /* 1010 */ 985, 349, 348, 425, 415, 299, 817, 832, 326, 825,
+ /* 1020 */ 120, 332, 133, 819, 268, 98, 98, 98, 98, 91,
+ /* 1030 */ 96, 96, 96, 96, 95, 95, 94, 94, 94, 93,
+ /* 1040 */ 351, 157, 810, 371, 382, 359, 959, 960, 358, 268,
+ /* 1050 */ 450, 918, 368, 324, 824, 824, 826, 450, 709, 450,
+ /* 1060 */ 264, 380, 889, 450, 877, 746, 253, 919, 255, 433,
+ /* 1070 */ 36, 36, 234, 450, 234, 120, 269, 37, 37, 12,
+ /* 1080 */ 12, 334, 272, 27, 27, 450, 330, 118, 450, 162,
+ /* 1090 */ 742, 280, 450, 38, 38, 450, 985, 356, 985, 450,
+ /* 1100 */ 709, 1210, 450, 132, 450, 39, 39, 450, 40, 40,
+ /* 1110 */ 450, 362, 41, 41, 450, 42, 42, 450, 254, 28,
+ /* 1120 */ 28, 450, 29, 29, 31, 31, 450, 43, 43, 450,
+ /* 1130 */ 44, 44, 450, 714, 45, 45, 450, 11, 11, 767,
+ /* 1140 */ 450, 46, 46, 450, 268, 450, 105, 105, 450, 47,
+ /* 1150 */ 47, 450, 48, 48, 450, 237, 33, 33, 450, 172,
+ /* 1160 */ 49, 49, 450, 50, 50, 34, 34, 274, 122, 122,
+ /* 1170 */ 450, 123, 123, 450, 124, 124, 450, 898, 56, 56,
+ /* 1180 */ 450, 897, 35, 35, 450, 267, 450, 817, 450, 817,
+ /* 1190 */ 106, 106, 450, 53, 53, 385, 107, 107, 450, 817,
+ /* 1200 */ 108, 108, 817, 450, 104, 104, 121, 121, 119, 119,
+ /* 1210 */ 450, 117, 112, 112, 450, 276, 450, 225, 111, 111,
+ /* 1220 */ 450, 730, 450, 109, 109, 450, 673, 674, 675, 912,
+ /* 1230 */ 110, 110, 317, 998, 55, 55, 57, 57, 692, 331,
+ /* 1240 */ 54, 54, 26, 26, 696, 30, 30, 317, 937, 197,
+ /* 1250 */ 196, 195, 335, 281, 336, 446, 331, 745, 689, 436,
+ /* 1260 */ 440, 444, 120, 72, 386, 223, 175, 345, 757, 933,
+ /* 1270 */ 20, 286, 319, 756, 815, 372, 374, 202, 202, 202,
+ /* 1280 */ 263, 395, 285, 74, 208, 21, 696, 719, 718, 884,
+ /* 1290 */ 120, 120, 120, 120, 120, 754, 278, 828, 77, 74,
+ /* 1300 */ 726, 727, 785, 783, 880, 202, 999, 208, 894, 893,
+ /* 1310 */ 894, 893, 694, 816, 763, 116, 774, 1290, 431, 432,
+ /* 1320 */ 302, 999, 390, 303, 823, 697, 691, 680, 159, 289,
+ /* 1330 */ 679, 884, 681, 952, 291, 218, 293, 7, 316, 828,
+ /* 1340 */ 173, 805, 259, 364, 252, 911, 376, 713, 295, 435,
+ /* 1350 */ 308, 168, 955, 993, 135, 400, 990, 284, 882, 881,
+ /* 1360 */ 205, 928, 926, 59, 333, 62, 144, 156, 130, 72,
+ /* 1370 */ 802, 366, 367, 393, 137, 185, 189, 160, 139, 383,
+ /* 1380 */ 67, 896, 140, 141, 142, 148, 389, 812, 775, 266,
+ /* 1390 */ 219, 190, 154, 391, 913, 876, 271, 406, 191, 322,
+ /* 1400 */ 682, 733, 192, 342, 732, 724, 731, 711, 723, 421,
+ /* 1410 */ 705, 71, 323, 6, 204, 771, 288, 79, 297, 346,
+ /* 1420 */ 772, 704, 290, 283, 703, 770, 292, 294, 967, 239,
+ /* 1430 */ 769, 102, 862, 438, 426, 240, 424, 442, 73, 213,
+ /* 1440 */ 688, 238, 22, 453, 953, 214, 217, 216, 454, 677,
+ /* 1450 */ 676, 671, 753, 125, 115, 235, 126, 669, 353, 166,
+ /* 1460 */ 127, 244, 179, 357, 306, 304, 305, 307, 113, 892,
+ /* 1470 */ 327, 890, 811, 328, 134, 128, 136, 138, 743, 258,
+ /* 1480 */ 907, 184, 143, 129, 910, 186, 63, 64, 145, 187,
+ /* 1490 */ 906, 65, 8, 66, 13, 188, 202, 899, 265, 149,
+ /* 1500 */ 987, 388, 150, 685, 161, 392, 285, 193, 279, 396,
+ /* 1510 */ 151, 401, 68, 14, 15, 722, 69, 236, 831, 131,
+ /* 1520 */ 830, 860, 70, 751, 16, 414, 755, 4, 174, 220,
+ /* 1530 */ 222, 784, 201, 152, 779, 77, 74, 17, 18, 875,
+ /* 1540 */ 861, 859, 916, 864, 915, 207, 206, 942, 163, 437,
+ /* 1550 */ 948, 943, 164, 209, 1002, 441, 863, 165, 210, 829,
+ /* 1560 */ 695, 87, 312, 211, 1292, 1291, 309,
};
static const YYCODETYPE yy_lookahead[] = {
/* 0 */ 19, 95, 53, 97, 22, 24, 24, 101, 27, 28,
@@ -132054,281 +133352,290 @@ static const YYCODETYPE yy_lookahead[] = {
/* 90 */ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
/* 100 */ 39, 40, 41, 152, 43, 44, 45, 46, 47, 48,
/* 110 */ 49, 50, 51, 52, 53, 144, 145, 146, 147, 19,
- /* 120 */ 137, 22, 139, 172, 173, 52, 53, 27, 28, 29,
+ /* 120 */ 16, 22, 92, 172, 173, 52, 53, 27, 28, 29,
/* 130 */ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
/* 140 */ 40, 41, 81, 43, 44, 45, 46, 47, 48, 49,
/* 150 */ 50, 51, 52, 53, 55, 56, 19, 152, 207, 208,
/* 160 */ 115, 24, 117, 118, 27, 28, 29, 30, 31, 32,
/* 170 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 79,
/* 180 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- /* 190 */ 53, 19, 0, 1, 2, 23, 97, 98, 193, 27,
+ /* 190 */ 53, 19, 88, 157, 90, 23, 97, 98, 193, 27,
/* 200 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
/* 210 */ 38, 39, 40, 41, 152, 43, 44, 45, 46, 47,
- /* 220 */ 48, 49, 50, 51, 52, 53, 19, 22, 23, 163,
- /* 230 */ 23, 26, 190, 191, 27, 28, 29, 30, 31, 32,
+ /* 220 */ 48, 49, 50, 51, 52, 53, 19, 22, 23, 172,
+ /* 230 */ 23, 26, 119, 120, 27, 28, 29, 30, 31, 32,
/* 240 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 187,
/* 250 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- /* 260 */ 53, 19, 196, 22, 23, 23, 49, 26, 92, 27,
+ /* 260 */ 53, 19, 22, 23, 228, 23, 26, 231, 152, 27,
/* 270 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
/* 280 */ 38, 39, 40, 41, 172, 43, 44, 45, 46, 47,
/* 290 */ 48, 49, 50, 51, 52, 53, 19, 221, 222, 223,
- /* 300 */ 23, 96, 119, 120, 27, 28, 29, 30, 31, 32,
- /* 310 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 172,
+ /* 300 */ 23, 96, 152, 172, 27, 28, 29, 30, 31, 32,
+ /* 310 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 152,
/* 320 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- /* 330 */ 53, 19, 152, 116, 221, 222, 223, 96, 121, 27,
+ /* 330 */ 53, 19, 0, 1, 2, 23, 96, 190, 191, 27,
/* 340 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
- /* 350 */ 38, 39, 40, 41, 241, 43, 44, 45, 46, 47,
- /* 360 */ 48, 49, 50, 51, 52, 53, 19, 157, 168, 169,
- /* 370 */ 170, 22, 190, 191, 27, 28, 29, 30, 31, 32,
- /* 380 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 30,
+ /* 350 */ 38, 39, 40, 41, 238, 43, 44, 45, 46, 47,
+ /* 360 */ 48, 49, 50, 51, 52, 53, 19, 185, 218, 221,
+ /* 370 */ 222, 223, 152, 152, 27, 28, 29, 30, 31, 32,
+ /* 380 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 241,
/* 390 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
- /* 400 */ 53, 19, 172, 152, 55, 56, 24, 247, 248, 27,
+ /* 400 */ 53, 19, 152, 168, 169, 170, 22, 190, 191, 27,
/* 410 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
/* 420 */ 38, 39, 40, 41, 152, 43, 44, 45, 46, 47,
- /* 430 */ 48, 49, 50, 51, 52, 53, 146, 147, 228, 179,
- /* 440 */ 180, 231, 185, 19, 172, 173, 97, 98, 188, 26,
- /* 450 */ 138, 27, 28, 29, 30, 31, 32, 33, 34, 35,
- /* 460 */ 36, 37, 38, 39, 40, 41, 107, 43, 44, 45,
- /* 470 */ 46, 47, 48, 49, 50, 51, 52, 53, 19, 207,
- /* 480 */ 208, 30, 31, 32, 33, 138, 27, 28, 29, 30,
+ /* 430 */ 48, 49, 50, 51, 52, 53, 19, 19, 218, 55,
+ /* 440 */ 56, 24, 22, 152, 27, 28, 29, 30, 31, 32,
+ /* 450 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 152,
+ /* 460 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ /* 470 */ 53, 250, 194, 195, 56, 55, 56, 55, 19, 172,
+ /* 480 */ 173, 97, 98, 152, 206, 138, 27, 28, 29, 30,
/* 490 */ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
- /* 500 */ 41, 250, 43, 44, 45, 46, 47, 48, 49, 50,
- /* 510 */ 51, 52, 53, 19, 168, 169, 170, 7, 8, 9,
- /* 520 */ 19, 152, 28, 29, 30, 31, 32, 33, 34, 35,
- /* 530 */ 36, 37, 38, 39, 40, 41, 152, 43, 44, 45,
- /* 540 */ 46, 47, 48, 49, 50, 51, 52, 53, 19, 108,
- /* 550 */ 109, 110, 101, 130, 53, 152, 172, 173, 29, 30,
+ /* 500 */ 41, 152, 43, 44, 45, 46, 47, 48, 49, 50,
+ /* 510 */ 51, 52, 53, 19, 207, 208, 152, 97, 98, 97,
+ /* 520 */ 138, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ /* 530 */ 36, 37, 38, 39, 40, 41, 181, 43, 44, 45,
+ /* 540 */ 46, 47, 48, 49, 50, 51, 52, 53, 19, 30,
+ /* 550 */ 31, 32, 33, 247, 248, 19, 152, 28, 29, 30,
/* 560 */ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
/* 570 */ 41, 152, 43, 44, 45, 46, 47, 48, 49, 50,
- /* 580 */ 51, 52, 53, 19, 20, 116, 22, 23, 169, 170,
- /* 590 */ 121, 207, 85, 55, 56, 26, 19, 20, 101, 22,
- /* 600 */ 99, 100, 101, 102, 103, 104, 105, 152, 152, 112,
- /* 610 */ 210, 47, 48, 112, 152, 108, 109, 110, 54, 55,
- /* 620 */ 56, 221, 222, 223, 47, 48, 119, 120, 172, 173,
- /* 630 */ 66, 54, 55, 56, 152, 97, 98, 99, 148, 149,
- /* 640 */ 102, 103, 104, 66, 154, 23, 156, 83, 26, 230,
- /* 650 */ 152, 113, 152, 163, 172, 173, 92, 92, 21, 95,
- /* 660 */ 83, 97, 98, 207, 208, 101, 152, 98, 186, 92,
- /* 670 */ 172, 173, 95, 218, 97, 98, 152, 99, 101, 217,
- /* 680 */ 102, 103, 104, 152, 119, 120, 196, 55, 56, 19,
- /* 690 */ 20, 113, 22, 124, 163, 11, 132, 133, 134, 135,
- /* 700 */ 136, 152, 152, 172, 173, 207, 208, 152, 152, 132,
- /* 710 */ 133, 134, 135, 136, 164, 152, 84, 47, 48, 49,
- /* 720 */ 98, 181, 152, 152, 54, 55, 56, 196, 91, 97,
- /* 730 */ 98, 160, 218, 163, 244, 164, 66, 152, 207, 208,
- /* 740 */ 103, 217, 172, 173, 19, 20, 124, 22, 193, 38,
- /* 750 */ 39, 40, 41, 83, 43, 44, 45, 46, 47, 48,
- /* 760 */ 49, 50, 51, 52, 53, 95, 196, 97, 98, 85,
- /* 770 */ 152, 101, 47, 48, 181, 85, 92, 140, 193, 54,
- /* 780 */ 55, 56, 92, 49, 195, 55, 56, 175, 163, 55,
- /* 790 */ 56, 66, 108, 109, 110, 206, 163, 242, 108, 109,
- /* 800 */ 110, 175, 132, 133, 134, 135, 136, 152, 83, 43,
- /* 810 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- /* 820 */ 95, 196, 97, 98, 55, 56, 101, 97, 98, 196,
- /* 830 */ 152, 97, 98, 221, 222, 223, 211, 212, 22, 23,
- /* 840 */ 19, 20, 181, 22, 19, 152, 152, 221, 222, 223,
- /* 850 */ 172, 173, 219, 19, 124, 30, 238, 132, 133, 134,
- /* 860 */ 135, 136, 169, 170, 186, 232, 97, 98, 47, 48,
- /* 870 */ 237, 152, 217, 152, 5, 54, 55, 56, 152, 10,
- /* 880 */ 11, 12, 13, 14, 47, 48, 17, 66, 47, 48,
- /* 890 */ 56, 172, 173, 124, 194, 195, 55, 56, 172, 173,
- /* 900 */ 152, 152, 22, 152, 83, 186, 206, 108, 109, 110,
- /* 910 */ 22, 23, 96, 152, 193, 12, 95, 152, 97, 98,
- /* 920 */ 172, 173, 101, 230, 152, 164, 12, 47, 48, 60,
- /* 930 */ 152, 62, 107, 207, 186, 55, 56, 112, 97, 98,
- /* 940 */ 71, 100, 193, 152, 183, 152, 185, 152, 107, 152,
- /* 950 */ 109, 82, 16, 132, 133, 134, 135, 136, 89, 152,
- /* 960 */ 57, 92, 93, 172, 173, 172, 173, 172, 173, 132,
- /* 970 */ 133, 57, 152, 132, 133, 95, 73, 97, 75, 55,
- /* 980 */ 56, 101, 163, 114, 96, 245, 246, 73, 85, 75,
- /* 990 */ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- /* 1000 */ 48, 49, 50, 51, 52, 53, 194, 195, 152, 171,
- /* 1010 */ 141, 152, 132, 133, 134, 196, 225, 179, 206, 65,
- /* 1020 */ 152, 97, 98, 152, 88, 152, 90, 152, 172, 173,
- /* 1030 */ 152, 219, 78, 152, 152, 238, 152, 152, 219, 152,
- /* 1040 */ 86, 152, 152, 172, 173, 238, 152, 172, 173, 152,
- /* 1050 */ 172, 173, 152, 172, 173, 213, 237, 172, 173, 172,
- /* 1060 */ 173, 172, 173, 211, 212, 111, 172, 173, 152, 172,
- /* 1070 */ 173, 152, 172, 173, 152, 193, 140, 193, 152, 59,
- /* 1080 */ 152, 152, 152, 63, 152, 16, 152, 152, 172, 173,
- /* 1090 */ 152, 172, 173, 152, 172, 173, 152, 77, 172, 173,
- /* 1100 */ 172, 173, 172, 173, 172, 173, 172, 173, 152, 250,
- /* 1110 */ 172, 173, 61, 172, 173, 152, 172, 173, 152, 92,
- /* 1120 */ 152, 70, 152, 152, 152, 26, 152, 100, 172, 173,
- /* 1130 */ 152, 24, 152, 22, 152, 172, 173, 152, 172, 173,
- /* 1140 */ 172, 173, 172, 173, 172, 173, 172, 173, 152, 152,
- /* 1150 */ 172, 173, 172, 173, 172, 173, 152, 88, 152, 90,
- /* 1160 */ 152, 55, 55, 152, 193, 152, 55, 152, 172, 173,
- /* 1170 */ 26, 152, 163, 163, 152, 19, 172, 173, 172, 173,
- /* 1180 */ 172, 173, 22, 172, 173, 172, 173, 172, 173, 55,
- /* 1190 */ 193, 172, 173, 152, 172, 173, 166, 167, 166, 167,
- /* 1200 */ 163, 163, 163, 97, 97, 196, 196, 163, 97, 55,
- /* 1210 */ 23, 199, 56, 26, 22, 22, 24, 100, 101, 55,
- /* 1220 */ 23, 209, 123, 26, 23, 23, 23, 26, 26, 26,
- /* 1230 */ 37, 97, 152, 196, 196, 196, 23, 7, 8, 26,
- /* 1240 */ 196, 23, 23, 152, 26, 26, 23, 132, 133, 26,
- /* 1250 */ 106, 97, 132, 133, 23, 152, 152, 26, 210, 191,
- /* 1260 */ 152, 97, 152, 234, 152, 152, 152, 233, 152, 210,
- /* 1270 */ 152, 152, 210, 152, 152, 152, 152, 152, 152, 197,
- /* 1280 */ 210, 198, 122, 150, 239, 201, 214, 214, 201, 239,
- /* 1290 */ 214, 227, 200, 184, 198, 155, 67, 243, 122, 22,
- /* 1300 */ 159, 159, 69, 176, 175, 175, 175, 240, 180, 159,
- /* 1310 */ 220, 240, 27, 130, 18, 18, 159, 158, 220, 137,
- /* 1320 */ 159, 189, 236, 158, 74, 159, 159, 158, 192, 192,
- /* 1330 */ 192, 192, 235, 22, 189, 189, 201, 159, 158, 177,
- /* 1340 */ 159, 107, 158, 76, 201, 177, 174, 174, 201, 174,
- /* 1350 */ 106, 177, 182, 174, 107, 159, 22, 125, 159, 182,
- /* 1360 */ 174, 176, 174, 174, 216, 216, 215, 215, 177, 216,
- /* 1370 */ 215, 53, 137, 128, 216, 177, 127, 129, 215, 126,
- /* 1380 */ 25, 13, 162, 26, 6, 161, 165, 165, 178, 153,
- /* 1390 */ 153, 151, 151, 151, 151, 224, 4, 3, 22, 142,
- /* 1400 */ 15, 94, 16, 178, 165, 205, 23, 202, 204, 203,
- /* 1410 */ 201, 23, 120, 131, 111, 20, 226, 123, 125, 16,
- /* 1420 */ 1, 123, 131, 229, 229, 111, 37, 37, 56, 64,
- /* 1430 */ 122, 1, 5, 22, 107, 140, 80, 87, 26, 80,
- /* 1440 */ 107, 72, 24, 20, 19, 105, 22, 112, 22, 79,
- /* 1450 */ 22, 58, 23, 22, 79, 22, 249, 249, 246, 79,
- /* 1460 */ 23, 23, 23, 116, 68, 22, 26, 23, 22, 56,
- /* 1470 */ 122, 23, 23, 64, 22, 124, 26, 26, 64, 64,
- /* 1480 */ 23, 23, 23, 23, 11, 23, 22, 26, 23, 22,
- /* 1490 */ 24, 1, 23, 22, 26, 122, 24, 23, 22, 122,
- /* 1500 */ 23, 23, 22, 122, 122, 23, 15,
+ /* 580 */ 51, 52, 53, 19, 168, 169, 170, 238, 19, 53,
+ /* 590 */ 152, 172, 173, 29, 30, 31, 32, 33, 34, 35,
+ /* 600 */ 36, 37, 38, 39, 40, 41, 152, 43, 44, 45,
+ /* 610 */ 46, 47, 48, 49, 50, 51, 52, 53, 19, 20,
+ /* 620 */ 101, 22, 23, 169, 170, 56, 207, 85, 55, 56,
+ /* 630 */ 23, 19, 20, 26, 22, 99, 100, 101, 102, 103,
+ /* 640 */ 104, 105, 238, 152, 152, 210, 47, 48, 112, 152,
+ /* 650 */ 108, 109, 110, 54, 55, 56, 221, 222, 223, 47,
+ /* 660 */ 48, 119, 120, 172, 173, 66, 54, 55, 56, 152,
+ /* 670 */ 97, 98, 99, 148, 149, 102, 103, 104, 66, 154,
+ /* 680 */ 23, 156, 83, 26, 230, 152, 113, 152, 163, 194,
+ /* 690 */ 195, 92, 92, 30, 95, 83, 97, 98, 207, 208,
+ /* 700 */ 101, 206, 179, 180, 92, 172, 173, 95, 152, 97,
+ /* 710 */ 98, 188, 99, 101, 219, 102, 103, 104, 152, 119,
+ /* 720 */ 120, 196, 55, 56, 19, 20, 113, 22, 193, 163,
+ /* 730 */ 11, 132, 133, 134, 135, 136, 24, 65, 172, 173,
+ /* 740 */ 207, 208, 250, 152, 132, 133, 134, 135, 136, 193,
+ /* 750 */ 78, 84, 47, 48, 49, 98, 199, 152, 86, 54,
+ /* 760 */ 55, 56, 196, 152, 97, 98, 209, 55, 163, 244,
+ /* 770 */ 107, 66, 152, 207, 208, 164, 175, 172, 173, 19,
+ /* 780 */ 20, 124, 22, 111, 38, 39, 40, 41, 83, 43,
+ /* 790 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ /* 800 */ 95, 196, 97, 98, 85, 152, 101, 47, 48, 97,
+ /* 810 */ 85, 92, 207, 193, 54, 55, 56, 92, 49, 175,
+ /* 820 */ 55, 56, 221, 222, 223, 12, 66, 108, 109, 110,
+ /* 830 */ 137, 163, 139, 108, 109, 110, 26, 132, 133, 134,
+ /* 840 */ 135, 136, 152, 83, 43, 44, 45, 46, 47, 48,
+ /* 850 */ 49, 50, 51, 52, 53, 95, 26, 97, 98, 55,
+ /* 860 */ 56, 101, 97, 98, 196, 221, 222, 223, 146, 147,
+ /* 870 */ 57, 171, 152, 22, 26, 19, 20, 49, 22, 179,
+ /* 880 */ 108, 109, 110, 55, 56, 116, 73, 219, 75, 124,
+ /* 890 */ 121, 152, 132, 133, 134, 135, 136, 163, 85, 152,
+ /* 900 */ 232, 97, 98, 47, 48, 237, 55, 56, 98, 5,
+ /* 910 */ 54, 55, 56, 193, 10, 11, 12, 13, 14, 172,
+ /* 920 */ 173, 17, 66, 47, 48, 97, 98, 152, 124, 152,
+ /* 930 */ 196, 55, 56, 186, 124, 152, 106, 160, 152, 83,
+ /* 940 */ 152, 164, 152, 61, 22, 211, 212, 152, 97, 98,
+ /* 950 */ 152, 95, 70, 97, 98, 172, 173, 101, 172, 173,
+ /* 960 */ 172, 173, 172, 173, 60, 181, 62, 172, 173, 47,
+ /* 970 */ 48, 123, 186, 97, 98, 71, 100, 55, 56, 152,
+ /* 980 */ 181, 186, 21, 107, 152, 109, 82, 163, 132, 133,
+ /* 990 */ 134, 135, 136, 89, 16, 207, 92, 93, 19, 172,
+ /* 1000 */ 173, 169, 170, 195, 55, 56, 12, 152, 132, 30,
+ /* 1010 */ 134, 47, 48, 186, 206, 225, 152, 95, 114, 97,
+ /* 1020 */ 196, 245, 246, 101, 152, 38, 39, 40, 41, 42,
+ /* 1030 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+ /* 1040 */ 53, 152, 163, 219, 152, 141, 97, 98, 193, 152,
+ /* 1050 */ 152, 57, 91, 164, 132, 133, 134, 152, 55, 152,
+ /* 1060 */ 152, 237, 230, 152, 103, 193, 88, 73, 90, 75,
+ /* 1070 */ 172, 173, 183, 152, 185, 196, 152, 172, 173, 172,
+ /* 1080 */ 173, 217, 152, 172, 173, 152, 107, 22, 152, 24,
+ /* 1090 */ 193, 112, 152, 172, 173, 152, 132, 242, 134, 152,
+ /* 1100 */ 97, 140, 152, 92, 152, 172, 173, 152, 172, 173,
+ /* 1110 */ 152, 100, 172, 173, 152, 172, 173, 152, 140, 172,
+ /* 1120 */ 173, 152, 172, 173, 172, 173, 152, 172, 173, 152,
+ /* 1130 */ 172, 173, 152, 152, 172, 173, 152, 172, 173, 213,
+ /* 1140 */ 152, 172, 173, 152, 152, 152, 172, 173, 152, 172,
+ /* 1150 */ 173, 152, 172, 173, 152, 210, 172, 173, 152, 26,
+ /* 1160 */ 172, 173, 152, 172, 173, 172, 173, 152, 172, 173,
+ /* 1170 */ 152, 172, 173, 152, 172, 173, 152, 59, 172, 173,
+ /* 1180 */ 152, 63, 172, 173, 152, 193, 152, 152, 152, 152,
+ /* 1190 */ 172, 173, 152, 172, 173, 77, 172, 173, 152, 152,
+ /* 1200 */ 172, 173, 152, 152, 172, 173, 172, 173, 172, 173,
+ /* 1210 */ 152, 22, 172, 173, 152, 152, 152, 22, 172, 173,
+ /* 1220 */ 152, 152, 152, 172, 173, 152, 7, 8, 9, 163,
+ /* 1230 */ 172, 173, 22, 23, 172, 173, 172, 173, 166, 167,
+ /* 1240 */ 172, 173, 172, 173, 55, 172, 173, 22, 23, 108,
+ /* 1250 */ 109, 110, 217, 152, 217, 166, 167, 163, 163, 163,
+ /* 1260 */ 163, 163, 196, 130, 217, 211, 212, 217, 116, 23,
+ /* 1270 */ 22, 101, 26, 121, 23, 23, 23, 26, 26, 26,
+ /* 1280 */ 23, 23, 112, 26, 26, 37, 97, 100, 101, 55,
+ /* 1290 */ 196, 196, 196, 196, 196, 23, 23, 55, 26, 26,
+ /* 1300 */ 7, 8, 23, 152, 23, 26, 96, 26, 132, 132,
+ /* 1310 */ 134, 134, 23, 152, 152, 26, 152, 122, 152, 191,
+ /* 1320 */ 152, 96, 234, 152, 152, 152, 152, 152, 197, 210,
+ /* 1330 */ 152, 97, 152, 152, 210, 233, 210, 198, 150, 97,
+ /* 1340 */ 184, 201, 239, 214, 214, 201, 239, 180, 214, 227,
+ /* 1350 */ 200, 198, 155, 67, 243, 176, 69, 175, 175, 175,
+ /* 1360 */ 122, 159, 159, 240, 159, 240, 22, 220, 27, 130,
+ /* 1370 */ 201, 18, 159, 18, 189, 158, 158, 220, 192, 159,
+ /* 1380 */ 137, 236, 192, 192, 192, 189, 74, 189, 159, 235,
+ /* 1390 */ 159, 158, 22, 177, 201, 201, 159, 107, 158, 177,
+ /* 1400 */ 159, 174, 158, 76, 174, 182, 174, 106, 182, 125,
+ /* 1410 */ 174, 107, 177, 22, 159, 216, 215, 137, 159, 53,
+ /* 1420 */ 216, 176, 215, 174, 174, 216, 215, 215, 174, 229,
+ /* 1430 */ 216, 129, 224, 177, 126, 229, 127, 177, 128, 25,
+ /* 1440 */ 162, 226, 26, 161, 13, 153, 6, 153, 151, 151,
+ /* 1450 */ 151, 151, 205, 165, 178, 178, 165, 4, 3, 22,
+ /* 1460 */ 165, 142, 15, 94, 202, 204, 203, 201, 16, 23,
+ /* 1470 */ 249, 23, 120, 249, 246, 111, 131, 123, 20, 16,
+ /* 1480 */ 1, 125, 123, 111, 56, 64, 37, 37, 131, 122,
+ /* 1490 */ 1, 37, 5, 37, 22, 107, 26, 80, 140, 80,
+ /* 1500 */ 87, 72, 107, 20, 24, 19, 112, 105, 23, 79,
+ /* 1510 */ 22, 79, 22, 22, 22, 58, 22, 79, 23, 68,
+ /* 1520 */ 23, 23, 26, 116, 22, 26, 23, 22, 122, 23,
+ /* 1530 */ 23, 56, 64, 22, 124, 26, 26, 64, 64, 23,
+ /* 1540 */ 23, 23, 23, 11, 23, 22, 26, 23, 22, 24,
+ /* 1550 */ 1, 23, 22, 26, 251, 24, 23, 22, 122, 23,
+ /* 1560 */ 23, 22, 15, 122, 122, 122, 23,
};
-#define YY_SHIFT_USE_DFLT (-95)
-#define YY_SHIFT_COUNT (442)
-#define YY_SHIFT_MIN (-94)
-#define YY_SHIFT_MAX (1491)
+#define YY_SHIFT_USE_DFLT (1567)
+#define YY_SHIFT_COUNT (455)
+#define YY_SHIFT_MIN (-94)
+#define YY_SHIFT_MAX (1549)
static const short yy_shift_ofst[] = {
- /* 0 */ 40, 564, 869, 577, 725, 725, 725, 725, 690, -19,
- /* 10 */ 16, 16, 100, 725, 725, 725, 725, 725, 725, 725,
- /* 20 */ 841, 841, 538, 507, 684, 565, 61, 137, 172, 207,
- /* 30 */ 242, 277, 312, 347, 382, 424, 424, 424, 424, 424,
- /* 40 */ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
- /* 50 */ 459, 424, 494, 529, 529, 670, 725, 725, 725, 725,
- /* 60 */ 725, 725, 725, 725, 725, 725, 725, 725, 725, 725,
- /* 70 */ 725, 725, 725, 725, 725, 725, 725, 725, 725, 725,
- /* 80 */ 725, 725, 725, 725, 821, 725, 725, 725, 725, 725,
- /* 90 */ 725, 725, 725, 725, 725, 725, 725, 725, 952, 711,
- /* 100 */ 711, 711, 711, 711, 766, 23, 32, 924, 637, 825,
- /* 110 */ 837, 837, 924, 73, 183, -51, -95, -95, -95, 501,
- /* 120 */ 501, 501, 903, 903, 632, 205, 241, 924, 924, 924,
- /* 130 */ 924, 924, 924, 924, 924, 924, 924, 924, 924, 924,
- /* 140 */ 924, 924, 924, 924, 924, 924, 924, 192, 1027, 1106,
- /* 150 */ 1106, 183, 176, 176, 176, 176, 176, 176, -95, -95,
- /* 160 */ -95, 880, -94, -94, 578, 734, 99, 730, 769, 349,
- /* 170 */ 924, 924, 924, 924, 924, 924, 924, 924, 924, 924,
- /* 180 */ 924, 924, 924, 924, 924, 924, 924, 954, 954, 954,
- /* 190 */ 924, 924, 622, 924, 924, 924, -18, 924, 924, 914,
- /* 200 */ 924, 924, 924, 924, 924, 924, 924, 924, 924, 924,
- /* 210 */ 441, 1020, 1107, 1107, 1107, 569, 45, 217, 510, 423,
- /* 220 */ 834, 834, 1156, 423, 1156, 1144, 1187, 359, 1051, 834,
- /* 230 */ -17, 1051, 1051, 1099, 469, 1192, 1229, 1176, 1176, 1233,
- /* 240 */ 1233, 1176, 1277, 1285, 1183, 1296, 1296, 1296, 1296, 1176,
- /* 250 */ 1297, 1183, 1277, 1285, 1285, 1183, 1176, 1297, 1182, 1250,
- /* 260 */ 1176, 1176, 1297, 1311, 1176, 1297, 1176, 1297, 1311, 1234,
- /* 270 */ 1234, 1234, 1267, 1311, 1234, 1244, 1234, 1267, 1234, 1234,
- /* 280 */ 1232, 1247, 1232, 1247, 1232, 1247, 1232, 1247, 1176, 1334,
- /* 290 */ 1176, 1235, 1311, 1318, 1318, 1311, 1248, 1253, 1245, 1249,
- /* 300 */ 1183, 1355, 1357, 1368, 1368, 1378, 1378, 1378, 1378, -95,
- /* 310 */ -95, -95, -95, -95, -95, -95, -95, 451, 936, 816,
- /* 320 */ 888, 1069, 799, 1111, 1197, 1193, 1201, 1202, 1203, 1213,
- /* 330 */ 1134, 1117, 1230, 497, 1218, 1219, 1154, 1223, 1115, 1120,
- /* 340 */ 1231, 1164, 1160, 1392, 1394, 1376, 1257, 1385, 1307, 1386,
- /* 350 */ 1383, 1388, 1292, 1282, 1303, 1294, 1395, 1293, 1403, 1419,
- /* 360 */ 1298, 1291, 1389, 1390, 1314, 1372, 1365, 1308, 1430, 1427,
- /* 370 */ 1411, 1327, 1295, 1356, 1412, 1359, 1350, 1369, 1333, 1418,
- /* 380 */ 1423, 1425, 1335, 1340, 1424, 1370, 1426, 1428, 1429, 1431,
- /* 390 */ 1375, 1393, 1433, 1380, 1396, 1437, 1438, 1439, 1347, 1443,
- /* 400 */ 1444, 1446, 1440, 1348, 1448, 1449, 1413, 1409, 1452, 1351,
- /* 410 */ 1450, 1414, 1451, 1415, 1457, 1450, 1458, 1459, 1460, 1461,
- /* 420 */ 1462, 1464, 1473, 1465, 1467, 1466, 1468, 1469, 1471, 1472,
- /* 430 */ 1468, 1474, 1476, 1477, 1478, 1480, 1373, 1377, 1381, 1382,
- /* 440 */ 1482, 1491, 1490,
+ /* 0 */ 40, 599, 904, 612, 760, 760, 760, 760, 725, -19,
+ /* 10 */ 16, 16, 100, 760, 760, 760, 760, 760, 760, 760,
+ /* 20 */ 876, 876, 573, 542, 719, 600, 61, 137, 172, 207,
+ /* 30 */ 242, 277, 312, 347, 382, 417, 459, 459, 459, 459,
+ /* 40 */ 459, 459, 459, 459, 459, 459, 459, 459, 459, 459,
+ /* 50 */ 459, 459, 459, 494, 459, 529, 564, 564, 705, 760,
+ /* 60 */ 760, 760, 760, 760, 760, 760, 760, 760, 760, 760,
+ /* 70 */ 760, 760, 760, 760, 760, 760, 760, 760, 760, 760,
+ /* 80 */ 760, 760, 760, 760, 760, 760, 760, 760, 760, 760,
+ /* 90 */ 856, 760, 760, 760, 760, 760, 760, 760, 760, 760,
+ /* 100 */ 760, 760, 760, 760, 987, 746, 746, 746, 746, 746,
+ /* 110 */ 801, 23, 32, 949, 961, 979, 964, 964, 949, 73,
+ /* 120 */ 113, -51, 1567, 1567, 1567, 536, 536, 536, 99, 99,
+ /* 130 */ 813, 813, 667, 205, 240, 949, 949, 949, 949, 949,
+ /* 140 */ 949, 949, 949, 949, 949, 949, 949, 949, 949, 949,
+ /* 150 */ 949, 949, 949, 949, 949, 332, 1011, 422, 422, 113,
+ /* 160 */ 30, 30, 30, 30, 30, 30, 1567, 1567, 1567, 922,
+ /* 170 */ -94, -94, 384, 613, 828, 420, 765, 804, 851, 949,
+ /* 180 */ 949, 949, 949, 949, 949, 949, 949, 949, 949, 949,
+ /* 190 */ 949, 949, 949, 949, 949, 672, 672, 672, 949, 949,
+ /* 200 */ 657, 949, 949, 949, -18, 949, 949, 994, 949, 949,
+ /* 210 */ 949, 949, 949, 949, 949, 949, 949, 949, 772, 1118,
+ /* 220 */ 712, 712, 712, 810, 45, 769, 1219, 1133, 418, 418,
+ /* 230 */ 569, 1133, 569, 830, 607, 663, 882, 418, 693, 882,
+ /* 240 */ 882, 848, 1152, 1065, 1286, 1238, 1238, 1287, 1287, 1238,
+ /* 250 */ 1344, 1341, 1239, 1353, 1353, 1353, 1353, 1238, 1355, 1239,
+ /* 260 */ 1344, 1341, 1341, 1239, 1238, 1355, 1243, 1312, 1238, 1238,
+ /* 270 */ 1355, 1370, 1238, 1355, 1238, 1355, 1370, 1290, 1290, 1290,
+ /* 280 */ 1327, 1370, 1290, 1301, 1290, 1327, 1290, 1290, 1284, 1304,
+ /* 290 */ 1284, 1304, 1284, 1304, 1284, 1304, 1238, 1391, 1238, 1280,
+ /* 300 */ 1370, 1366, 1366, 1370, 1302, 1308, 1310, 1309, 1239, 1414,
+ /* 310 */ 1416, 1431, 1431, 1440, 1440, 1440, 1440, 1567, 1567, 1567,
+ /* 320 */ 1567, 1567, 1567, 1567, 1567, 519, 978, 1210, 1225, 104,
+ /* 330 */ 1141, 1189, 1246, 1248, 1251, 1252, 1253, 1257, 1258, 1273,
+ /* 340 */ 1003, 1187, 1293, 1170, 1272, 1279, 1234, 1281, 1176, 1177,
+ /* 350 */ 1289, 1242, 1195, 1453, 1455, 1437, 1319, 1447, 1369, 1452,
+ /* 360 */ 1446, 1448, 1352, 1345, 1364, 1354, 1458, 1356, 1463, 1479,
+ /* 370 */ 1359, 1357, 1449, 1450, 1454, 1456, 1372, 1428, 1421, 1367,
+ /* 380 */ 1489, 1487, 1472, 1388, 1358, 1417, 1470, 1419, 1413, 1429,
+ /* 390 */ 1395, 1480, 1483, 1486, 1394, 1402, 1488, 1430, 1490, 1491,
+ /* 400 */ 1485, 1492, 1432, 1457, 1494, 1438, 1451, 1495, 1497, 1498,
+ /* 410 */ 1496, 1407, 1502, 1503, 1505, 1499, 1406, 1506, 1507, 1475,
+ /* 420 */ 1468, 1511, 1410, 1509, 1473, 1510, 1474, 1516, 1509, 1517,
+ /* 430 */ 1518, 1519, 1520, 1521, 1523, 1532, 1524, 1526, 1525, 1527,
+ /* 440 */ 1528, 1530, 1531, 1527, 1533, 1535, 1536, 1537, 1539, 1436,
+ /* 450 */ 1441, 1442, 1443, 1543, 1547, 1549,
};
#define YY_REDUCE_USE_DFLT (-130)
-#define YY_REDUCE_COUNT (316)
+#define YY_REDUCE_COUNT (324)
#define YY_REDUCE_MIN (-129)
-#define YY_REDUCE_MAX (1243)
+#define YY_REDUCE_MAX (1300)
static const short yy_reduce_ofst[] = {
- /* 0 */ -29, 531, 490, 570, -49, 272, 456, 498, 633, 400,
- /* 10 */ 612, 626, 113, 482, 678, 719, 384, 726, 748, 791,
- /* 20 */ 419, 693, 761, 812, 819, 625, 76, 76, 76, 76,
+ /* 0 */ -29, 566, 525, 605, -49, 307, 491, 533, 668, 435,
+ /* 10 */ 601, 644, 148, 747, 786, 795, 419, 788, 827, 790,
+ /* 20 */ 454, 832, 889, 495, 824, 734, 76, 76, 76, 76,
/* 30 */ 76, 76, 76, 76, 76, 76, 76, 76, 76, 76,
/* 40 */ 76, 76, 76, 76, 76, 76, 76, 76, 76, 76,
- /* 50 */ 76, 76, 76, 76, 76, 793, 795, 856, 871, 875,
- /* 60 */ 878, 881, 885, 887, 889, 894, 897, 900, 916, 919,
- /* 70 */ 922, 926, 928, 930, 932, 934, 938, 941, 944, 956,
- /* 80 */ 963, 966, 968, 970, 972, 974, 978, 980, 982, 996,
- /* 90 */ 1004, 1006, 1008, 1011, 1013, 1015, 1019, 1022, 76, 76,
- /* 100 */ 76, 76, 76, 76, 76, 76, 76, 555, 210, 260,
- /* 110 */ 200, 346, 571, 76, 700, 76, 76, 76, 76, 838,
- /* 120 */ 838, 838, 42, 182, 251, 160, 160, 550, 5, 455,
- /* 130 */ 585, 721, 749, 882, 884, 971, 618, 462, 797, 514,
- /* 140 */ 807, 524, 997, -129, 655, 859, 62, 290, 66, 1030,
- /* 150 */ 1032, 589, 1009, 1010, 1037, 1038, 1039, 1044, 740, 852,
- /* 160 */ 1012, 112, 147, 230, 257, 180, 369, 403, 500, 549,
- /* 170 */ 556, 563, 694, 751, 765, 772, 778, 820, 868, 873,
- /* 180 */ 890, 929, 935, 985, 1041, 1080, 1091, 540, 593, 661,
- /* 190 */ 1103, 1104, 842, 1108, 1110, 1112, 1048, 1113, 1114, 1068,
- /* 200 */ 1116, 1118, 1119, 180, 1121, 1122, 1123, 1124, 1125, 1126,
- /* 210 */ 1029, 1034, 1059, 1062, 1070, 842, 1082, 1083, 1133, 1084,
- /* 220 */ 1072, 1073, 1045, 1087, 1050, 1127, 1109, 1128, 1129, 1076,
- /* 230 */ 1064, 1130, 1131, 1092, 1096, 1140, 1054, 1141, 1142, 1067,
- /* 240 */ 1071, 1150, 1090, 1132, 1135, 1136, 1137, 1138, 1139, 1157,
- /* 250 */ 1159, 1143, 1098, 1145, 1146, 1147, 1161, 1165, 1086, 1097,
- /* 260 */ 1166, 1167, 1169, 1162, 1178, 1180, 1181, 1184, 1168, 1172,
- /* 270 */ 1173, 1175, 1170, 1174, 1179, 1185, 1186, 1177, 1188, 1189,
- /* 280 */ 1148, 1151, 1149, 1152, 1153, 1155, 1158, 1163, 1196, 1171,
- /* 290 */ 1199, 1190, 1191, 1194, 1195, 1198, 1200, 1204, 1206, 1205,
- /* 300 */ 1209, 1220, 1224, 1236, 1237, 1240, 1241, 1242, 1243, 1207,
- /* 310 */ 1208, 1212, 1221, 1222, 1210, 1225, 1239,
+ /* 50 */ 76, 76, 76, 76, 76, 76, 76, 76, 783, 898,
+ /* 60 */ 905, 907, 911, 921, 933, 936, 940, 943, 947, 950,
+ /* 70 */ 952, 955, 958, 962, 965, 969, 974, 977, 980, 984,
+ /* 80 */ 988, 991, 993, 996, 999, 1002, 1006, 1010, 1018, 1021,
+ /* 90 */ 1024, 1028, 1032, 1034, 1036, 1040, 1046, 1051, 1058, 1062,
+ /* 100 */ 1064, 1068, 1070, 1073, 76, 76, 76, 76, 76, 76,
+ /* 110 */ 76, 76, 76, 855, 36, 523, 235, 416, 777, 76,
+ /* 120 */ 278, 76, 76, 76, 76, 700, 700, 700, 150, 220,
+ /* 130 */ 147, 217, 221, 306, 306, 611, 5, 535, 556, 620,
+ /* 140 */ 720, 872, 897, 116, 864, 349, 1035, 1037, 404, 1047,
+ /* 150 */ 992, -129, 1050, 492, 62, 722, 879, 1072, 1089, 808,
+ /* 160 */ 1066, 1094, 1095, 1096, 1097, 1098, 776, 1054, 557, 57,
+ /* 170 */ 112, 131, 167, 182, 250, 272, 291, 331, 364, 438,
+ /* 180 */ 497, 517, 591, 653, 690, 739, 775, 798, 892, 908,
+ /* 190 */ 924, 930, 1015, 1063, 1069, 355, 784, 799, 981, 1101,
+ /* 200 */ 926, 1151, 1161, 1162, 945, 1164, 1166, 1128, 1168, 1171,
+ /* 210 */ 1172, 250, 1173, 1174, 1175, 1178, 1180, 1181, 1088, 1102,
+ /* 220 */ 1119, 1124, 1126, 926, 1131, 1139, 1188, 1140, 1129, 1130,
+ /* 230 */ 1103, 1144, 1107, 1179, 1156, 1167, 1182, 1134, 1122, 1183,
+ /* 240 */ 1184, 1150, 1153, 1197, 1111, 1202, 1203, 1123, 1125, 1205,
+ /* 250 */ 1147, 1185, 1169, 1186, 1190, 1191, 1192, 1213, 1217, 1193,
+ /* 260 */ 1157, 1196, 1198, 1194, 1220, 1218, 1145, 1154, 1229, 1231,
+ /* 270 */ 1233, 1216, 1237, 1240, 1241, 1244, 1222, 1227, 1230, 1232,
+ /* 280 */ 1223, 1235, 1236, 1245, 1249, 1226, 1250, 1254, 1199, 1201,
+ /* 290 */ 1204, 1207, 1209, 1211, 1214, 1212, 1255, 1208, 1259, 1215,
+ /* 300 */ 1256, 1200, 1206, 1260, 1247, 1261, 1263, 1262, 1266, 1278,
+ /* 310 */ 1282, 1292, 1294, 1297, 1298, 1299, 1300, 1221, 1224, 1228,
+ /* 320 */ 1288, 1291, 1276, 1277, 1295,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1258, 1248, 1248, 1248, 1180, 1180, 1180, 1180, 1248, 1077,
- /* 10 */ 1106, 1106, 1232, 1309, 1309, 1309, 1309, 1309, 1309, 1179,
- /* 20 */ 1309, 1309, 1309, 1309, 1248, 1081, 1112, 1309, 1309, 1309,
- /* 30 */ 1309, 1309, 1309, 1309, 1309, 1231, 1233, 1120, 1119, 1214,
- /* 40 */ 1093, 1117, 1110, 1114, 1181, 1175, 1176, 1174, 1178, 1182,
- /* 50 */ 1309, 1113, 1144, 1159, 1143, 1309, 1309, 1309, 1309, 1309,
- /* 60 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 70 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 80 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 90 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1153, 1158,
- /* 100 */ 1165, 1157, 1154, 1146, 1145, 1147, 1148, 1309, 1000, 1048,
- /* 110 */ 1309, 1309, 1309, 1149, 1309, 1150, 1162, 1161, 1160, 1239,
- /* 120 */ 1266, 1265, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 130 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 140 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1258, 1248, 1006,
- /* 150 */ 1006, 1309, 1248, 1248, 1248, 1248, 1248, 1248, 1244, 1081,
- /* 160 */ 1072, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 170 */ 1309, 1236, 1234, 1309, 1195, 1309, 1309, 1309, 1309, 1309,
- /* 180 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 190 */ 1309, 1309, 1309, 1309, 1309, 1309, 1077, 1309, 1309, 1309,
- /* 200 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1260,
- /* 210 */ 1309, 1209, 1077, 1077, 1077, 1079, 1061, 1071, 985, 1116,
- /* 220 */ 1095, 1095, 1298, 1116, 1298, 1023, 1280, 1020, 1106, 1095,
- /* 230 */ 1177, 1106, 1106, 1078, 1071, 1309, 1301, 1086, 1086, 1300,
- /* 240 */ 1300, 1086, 1125, 1051, 1116, 1057, 1057, 1057, 1057, 1086,
- /* 250 */ 997, 1116, 1125, 1051, 1051, 1116, 1086, 997, 1213, 1295,
- /* 260 */ 1086, 1086, 997, 1188, 1086, 997, 1086, 997, 1188, 1049,
- /* 270 */ 1049, 1049, 1038, 1188, 1049, 1023, 1049, 1038, 1049, 1049,
- /* 280 */ 1099, 1094, 1099, 1094, 1099, 1094, 1099, 1094, 1086, 1183,
- /* 290 */ 1086, 1309, 1188, 1192, 1192, 1188, 1111, 1100, 1109, 1107,
- /* 300 */ 1116, 1003, 1041, 1263, 1263, 1259, 1259, 1259, 1259, 1306,
- /* 310 */ 1306, 1244, 1275, 1275, 1025, 1025, 1275, 1309, 1309, 1309,
- /* 320 */ 1309, 1309, 1309, 1270, 1309, 1197, 1309, 1309, 1309, 1309,
- /* 330 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 340 */ 1309, 1309, 1131, 1309, 981, 1241, 1309, 1309, 1240, 1309,
- /* 350 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 360 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1297, 1309, 1309,
- /* 370 */ 1309, 1309, 1309, 1309, 1212, 1211, 1309, 1309, 1309, 1309,
- /* 380 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 390 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1063, 1309,
- /* 400 */ 1309, 1309, 1284, 1309, 1309, 1309, 1309, 1309, 1309, 1309,
- /* 410 */ 1108, 1309, 1101, 1309, 1309, 1288, 1309, 1309, 1309, 1309,
- /* 420 */ 1309, 1309, 1309, 1309, 1309, 1309, 1250, 1309, 1309, 1309,
- /* 430 */ 1249, 1309, 1309, 1309, 1309, 1309, 1133, 1309, 1132, 1136,
- /* 440 */ 1309, 991, 1309,
+ /* 0 */ 1281, 1271, 1271, 1271, 1203, 1203, 1203, 1203, 1271, 1096,
+ /* 10 */ 1125, 1125, 1255, 1332, 1332, 1332, 1332, 1332, 1332, 1202,
+ /* 20 */ 1332, 1332, 1332, 1332, 1271, 1100, 1131, 1332, 1332, 1332,
+ /* 30 */ 1332, 1204, 1205, 1332, 1332, 1332, 1254, 1256, 1141, 1140,
+ /* 40 */ 1139, 1138, 1237, 1112, 1136, 1129, 1133, 1204, 1198, 1199,
+ /* 50 */ 1197, 1201, 1205, 1332, 1132, 1167, 1182, 1166, 1332, 1332,
+ /* 60 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 70 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 80 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 90 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 100 */ 1332, 1332, 1332, 1332, 1176, 1181, 1188, 1180, 1177, 1169,
+ /* 110 */ 1168, 1170, 1171, 1332, 1019, 1067, 1332, 1332, 1332, 1172,
+ /* 120 */ 1332, 1173, 1185, 1184, 1183, 1262, 1289, 1288, 1332, 1332,
+ /* 130 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 140 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 150 */ 1332, 1332, 1332, 1332, 1332, 1281, 1271, 1025, 1025, 1332,
+ /* 160 */ 1271, 1271, 1271, 1271, 1271, 1271, 1267, 1100, 1091, 1332,
+ /* 170 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 180 */ 1259, 1257, 1332, 1218, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 190 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 200 */ 1332, 1332, 1332, 1332, 1096, 1332, 1332, 1332, 1332, 1332,
+ /* 210 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1283, 1332, 1232,
+ /* 220 */ 1096, 1096, 1096, 1098, 1080, 1090, 1004, 1135, 1114, 1114,
+ /* 230 */ 1321, 1135, 1321, 1042, 1303, 1039, 1125, 1114, 1200, 1125,
+ /* 240 */ 1125, 1097, 1090, 1332, 1324, 1105, 1105, 1323, 1323, 1105,
+ /* 250 */ 1146, 1070, 1135, 1076, 1076, 1076, 1076, 1105, 1016, 1135,
+ /* 260 */ 1146, 1070, 1070, 1135, 1105, 1016, 1236, 1318, 1105, 1105,
+ /* 270 */ 1016, 1211, 1105, 1016, 1105, 1016, 1211, 1068, 1068, 1068,
+ /* 280 */ 1057, 1211, 1068, 1042, 1068, 1057, 1068, 1068, 1118, 1113,
+ /* 290 */ 1118, 1113, 1118, 1113, 1118, 1113, 1105, 1206, 1105, 1332,
+ /* 300 */ 1211, 1215, 1215, 1211, 1130, 1119, 1128, 1126, 1135, 1022,
+ /* 310 */ 1060, 1286, 1286, 1282, 1282, 1282, 1282, 1329, 1329, 1267,
+ /* 320 */ 1298, 1298, 1044, 1044, 1298, 1332, 1332, 1332, 1332, 1332,
+ /* 330 */ 1332, 1293, 1332, 1220, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 340 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 350 */ 1332, 1332, 1152, 1332, 1000, 1264, 1332, 1332, 1263, 1332,
+ /* 360 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 370 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1320,
+ /* 380 */ 1332, 1332, 1332, 1332, 1332, 1332, 1235, 1234, 1332, 1332,
+ /* 390 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 400 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332,
+ /* 410 */ 1332, 1082, 1332, 1332, 1332, 1307, 1332, 1332, 1332, 1332,
+ /* 420 */ 1332, 1332, 1332, 1127, 1332, 1120, 1332, 1332, 1311, 1332,
+ /* 430 */ 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1332, 1273,
+ /* 440 */ 1332, 1332, 1332, 1272, 1332, 1332, 1332, 1332, 1332, 1154,
+ /* 450 */ 1332, 1153, 1157, 1332, 1010, 1332,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -132562,7 +133869,7 @@ static const char *const yyTokenName[] = {
"VALUES", "DISTINCT", "DOT", "FROM",
"JOIN", "USING", "ORDER", "GROUP",
"HAVING", "LIMIT", "WHERE", "INTO",
- "INTEGER", "FLOAT", "BLOB", "VARIABLE",
+ "FLOAT", "BLOB", "INTEGER", "VARIABLE",
"CASE", "WHEN", "THEN", "ELSE",
"INDEX", "ALTER", "ADD", "error",
"input", "cmdlist", "ecmd", "explain",
@@ -132738,195 +134045,199 @@ static const char *const yyRuleName[] = {
/* 136 */ "where_opt ::= WHERE expr",
/* 137 */ "cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt",
/* 138 */ "setlist ::= setlist COMMA nm EQ expr",
- /* 139 */ "setlist ::= nm EQ expr",
- /* 140 */ "cmd ::= with insert_cmd INTO fullname idlist_opt select",
- /* 141 */ "cmd ::= with insert_cmd INTO fullname idlist_opt DEFAULT VALUES",
- /* 142 */ "insert_cmd ::= INSERT orconf",
- /* 143 */ "insert_cmd ::= REPLACE",
- /* 144 */ "idlist_opt ::=",
- /* 145 */ "idlist_opt ::= LP idlist RP",
- /* 146 */ "idlist ::= idlist COMMA nm",
- /* 147 */ "idlist ::= nm",
- /* 148 */ "expr ::= LP expr RP",
- /* 149 */ "term ::= NULL",
- /* 150 */ "expr ::= ID|INDEXED",
- /* 151 */ "expr ::= JOIN_KW",
- /* 152 */ "expr ::= nm DOT nm",
- /* 153 */ "expr ::= nm DOT nm DOT nm",
- /* 154 */ "term ::= INTEGER|FLOAT|BLOB",
- /* 155 */ "term ::= STRING",
- /* 156 */ "expr ::= VARIABLE",
- /* 157 */ "expr ::= expr COLLATE ID|STRING",
- /* 158 */ "expr ::= CAST LP expr AS typetoken RP",
- /* 159 */ "expr ::= ID|INDEXED LP distinct exprlist RP",
- /* 160 */ "expr ::= ID|INDEXED LP STAR RP",
- /* 161 */ "term ::= CTIME_KW",
- /* 162 */ "expr ::= expr AND expr",
- /* 163 */ "expr ::= expr OR expr",
- /* 164 */ "expr ::= expr LT|GT|GE|LE expr",
- /* 165 */ "expr ::= expr EQ|NE expr",
- /* 166 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
- /* 167 */ "expr ::= expr PLUS|MINUS expr",
- /* 168 */ "expr ::= expr STAR|SLASH|REM expr",
- /* 169 */ "expr ::= expr CONCAT expr",
- /* 170 */ "likeop ::= LIKE_KW|MATCH",
- /* 171 */ "likeop ::= NOT LIKE_KW|MATCH",
- /* 172 */ "expr ::= expr likeop expr",
- /* 173 */ "expr ::= expr likeop expr ESCAPE expr",
- /* 174 */ "expr ::= expr ISNULL|NOTNULL",
- /* 175 */ "expr ::= expr NOT NULL",
- /* 176 */ "expr ::= expr IS expr",
- /* 177 */ "expr ::= expr IS NOT expr",
- /* 178 */ "expr ::= NOT expr",
- /* 179 */ "expr ::= BITNOT expr",
- /* 180 */ "expr ::= MINUS expr",
- /* 181 */ "expr ::= PLUS expr",
- /* 182 */ "between_op ::= BETWEEN",
- /* 183 */ "between_op ::= NOT BETWEEN",
- /* 184 */ "expr ::= expr between_op expr AND expr",
- /* 185 */ "in_op ::= IN",
- /* 186 */ "in_op ::= NOT IN",
- /* 187 */ "expr ::= expr in_op LP exprlist RP",
- /* 188 */ "expr ::= LP select RP",
- /* 189 */ "expr ::= expr in_op LP select RP",
- /* 190 */ "expr ::= expr in_op nm dbnm paren_exprlist",
- /* 191 */ "expr ::= EXISTS LP select RP",
- /* 192 */ "expr ::= CASE case_operand case_exprlist case_else END",
- /* 193 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
- /* 194 */ "case_exprlist ::= WHEN expr THEN expr",
- /* 195 */ "case_else ::= ELSE expr",
- /* 196 */ "case_else ::=",
- /* 197 */ "case_operand ::= expr",
- /* 198 */ "case_operand ::=",
- /* 199 */ "exprlist ::=",
- /* 200 */ "nexprlist ::= nexprlist COMMA expr",
- /* 201 */ "nexprlist ::= expr",
- /* 202 */ "paren_exprlist ::=",
- /* 203 */ "paren_exprlist ::= LP exprlist RP",
- /* 204 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
- /* 205 */ "uniqueflag ::= UNIQUE",
- /* 206 */ "uniqueflag ::=",
- /* 207 */ "eidlist_opt ::=",
- /* 208 */ "eidlist_opt ::= LP eidlist RP",
- /* 209 */ "eidlist ::= eidlist COMMA nm collate sortorder",
- /* 210 */ "eidlist ::= nm collate sortorder",
- /* 211 */ "collate ::=",
- /* 212 */ "collate ::= COLLATE ID|STRING",
- /* 213 */ "cmd ::= DROP INDEX ifexists fullname",
- /* 214 */ "cmd ::= VACUUM",
- /* 215 */ "cmd ::= VACUUM nm",
- /* 216 */ "cmd ::= PRAGMA nm dbnm",
- /* 217 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
- /* 218 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
- /* 219 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
- /* 220 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
- /* 221 */ "plus_num ::= PLUS INTEGER|FLOAT",
- /* 222 */ "minus_num ::= MINUS INTEGER|FLOAT",
- /* 223 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
- /* 224 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
- /* 225 */ "trigger_time ::= BEFORE",
- /* 226 */ "trigger_time ::= AFTER",
- /* 227 */ "trigger_time ::= INSTEAD OF",
- /* 228 */ "trigger_time ::=",
- /* 229 */ "trigger_event ::= DELETE|INSERT",
- /* 230 */ "trigger_event ::= UPDATE",
- /* 231 */ "trigger_event ::= UPDATE OF idlist",
- /* 232 */ "when_clause ::=",
- /* 233 */ "when_clause ::= WHEN expr",
- /* 234 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
- /* 235 */ "trigger_cmd_list ::= trigger_cmd SEMI",
- /* 236 */ "trnm ::= nm DOT nm",
- /* 237 */ "tridxby ::= INDEXED BY nm",
- /* 238 */ "tridxby ::= NOT INDEXED",
- /* 239 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt",
- /* 240 */ "trigger_cmd ::= insert_cmd INTO trnm idlist_opt select",
- /* 241 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt",
- /* 242 */ "trigger_cmd ::= select",
- /* 243 */ "expr ::= RAISE LP IGNORE RP",
- /* 244 */ "expr ::= RAISE LP raisetype COMMA nm RP",
- /* 245 */ "raisetype ::= ROLLBACK",
- /* 246 */ "raisetype ::= ABORT",
- /* 247 */ "raisetype ::= FAIL",
- /* 248 */ "cmd ::= DROP TRIGGER ifexists fullname",
- /* 249 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
- /* 250 */ "cmd ::= DETACH database_kw_opt expr",
- /* 251 */ "key_opt ::=",
- /* 252 */ "key_opt ::= KEY expr",
- /* 253 */ "cmd ::= REINDEX",
- /* 254 */ "cmd ::= REINDEX nm dbnm",
- /* 255 */ "cmd ::= ANALYZE",
- /* 256 */ "cmd ::= ANALYZE nm dbnm",
- /* 257 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
- /* 258 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
- /* 259 */ "add_column_fullname ::= fullname",
- /* 260 */ "cmd ::= create_vtab",
- /* 261 */ "cmd ::= create_vtab LP vtabarglist RP",
- /* 262 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
- /* 263 */ "vtabarg ::=",
- /* 264 */ "vtabargtoken ::= ANY",
- /* 265 */ "vtabargtoken ::= lp anylist RP",
- /* 266 */ "lp ::= LP",
- /* 267 */ "with ::=",
- /* 268 */ "with ::= WITH wqlist",
- /* 269 */ "with ::= WITH RECURSIVE wqlist",
- /* 270 */ "wqlist ::= nm eidlist_opt AS LP select RP",
- /* 271 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP",
- /* 272 */ "input ::= cmdlist",
- /* 273 */ "cmdlist ::= cmdlist ecmd",
- /* 274 */ "cmdlist ::= ecmd",
- /* 275 */ "ecmd ::= SEMI",
- /* 276 */ "ecmd ::= explain cmdx SEMI",
- /* 277 */ "explain ::=",
- /* 278 */ "trans_opt ::=",
- /* 279 */ "trans_opt ::= TRANSACTION",
- /* 280 */ "trans_opt ::= TRANSACTION nm",
- /* 281 */ "savepoint_opt ::= SAVEPOINT",
- /* 282 */ "savepoint_opt ::=",
- /* 283 */ "cmd ::= create_table create_table_args",
- /* 284 */ "columnlist ::= columnlist COMMA columnname carglist",
- /* 285 */ "columnlist ::= columnname carglist",
- /* 286 */ "nm ::= ID|INDEXED",
- /* 287 */ "nm ::= STRING",
- /* 288 */ "nm ::= JOIN_KW",
- /* 289 */ "typetoken ::= typename",
- /* 290 */ "typename ::= ID|STRING",
- /* 291 */ "signed ::= plus_num",
- /* 292 */ "signed ::= minus_num",
- /* 293 */ "carglist ::= carglist ccons",
- /* 294 */ "carglist ::=",
- /* 295 */ "ccons ::= NULL onconf",
- /* 296 */ "conslist_opt ::= COMMA conslist",
- /* 297 */ "conslist ::= conslist tconscomma tcons",
- /* 298 */ "conslist ::= tcons",
- /* 299 */ "tconscomma ::=",
- /* 300 */ "defer_subclause_opt ::= defer_subclause",
- /* 301 */ "resolvetype ::= raisetype",
- /* 302 */ "selectnowith ::= oneselect",
- /* 303 */ "oneselect ::= values",
- /* 304 */ "sclp ::= selcollist COMMA",
- /* 305 */ "as ::= ID|STRING",
- /* 306 */ "expr ::= term",
- /* 307 */ "exprlist ::= nexprlist",
- /* 308 */ "nmnum ::= plus_num",
- /* 309 */ "nmnum ::= nm",
- /* 310 */ "nmnum ::= ON",
- /* 311 */ "nmnum ::= DELETE",
- /* 312 */ "nmnum ::= DEFAULT",
- /* 313 */ "plus_num ::= INTEGER|FLOAT",
- /* 314 */ "foreach_clause ::=",
- /* 315 */ "foreach_clause ::= FOR EACH ROW",
- /* 316 */ "trnm ::= nm",
- /* 317 */ "tridxby ::=",
- /* 318 */ "database_kw_opt ::= DATABASE",
- /* 319 */ "database_kw_opt ::=",
- /* 320 */ "kwcolumn_opt ::=",
- /* 321 */ "kwcolumn_opt ::= COLUMNKW",
- /* 322 */ "vtabarglist ::= vtabarg",
- /* 323 */ "vtabarglist ::= vtabarglist COMMA vtabarg",
- /* 324 */ "vtabarg ::= vtabarg vtabargtoken",
- /* 325 */ "anylist ::=",
- /* 326 */ "anylist ::= anylist LP anylist RP",
- /* 327 */ "anylist ::= anylist ANY",
+ /* 139 */ "setlist ::= setlist COMMA LP idlist RP EQ expr",
+ /* 140 */ "setlist ::= nm EQ expr",
+ /* 141 */ "setlist ::= LP idlist RP EQ expr",
+ /* 142 */ "cmd ::= with insert_cmd INTO fullname idlist_opt select",
+ /* 143 */ "cmd ::= with insert_cmd INTO fullname idlist_opt DEFAULT VALUES",
+ /* 144 */ "insert_cmd ::= INSERT orconf",
+ /* 145 */ "insert_cmd ::= REPLACE",
+ /* 146 */ "idlist_opt ::=",
+ /* 147 */ "idlist_opt ::= LP idlist RP",
+ /* 148 */ "idlist ::= idlist COMMA nm",
+ /* 149 */ "idlist ::= nm",
+ /* 150 */ "expr ::= LP expr RP",
+ /* 151 */ "term ::= NULL",
+ /* 152 */ "expr ::= ID|INDEXED",
+ /* 153 */ "expr ::= JOIN_KW",
+ /* 154 */ "expr ::= nm DOT nm",
+ /* 155 */ "expr ::= nm DOT nm DOT nm",
+ /* 156 */ "term ::= FLOAT|BLOB",
+ /* 157 */ "term ::= STRING",
+ /* 158 */ "term ::= INTEGER",
+ /* 159 */ "expr ::= VARIABLE",
+ /* 160 */ "expr ::= expr COLLATE ID|STRING",
+ /* 161 */ "expr ::= CAST LP expr AS typetoken RP",
+ /* 162 */ "expr ::= ID|INDEXED LP distinct exprlist RP",
+ /* 163 */ "expr ::= ID|INDEXED LP STAR RP",
+ /* 164 */ "term ::= CTIME_KW",
+ /* 165 */ "expr ::= LP nexprlist COMMA expr RP",
+ /* 166 */ "expr ::= expr AND expr",
+ /* 167 */ "expr ::= expr OR expr",
+ /* 168 */ "expr ::= expr LT|GT|GE|LE expr",
+ /* 169 */ "expr ::= expr EQ|NE expr",
+ /* 170 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr",
+ /* 171 */ "expr ::= expr PLUS|MINUS expr",
+ /* 172 */ "expr ::= expr STAR|SLASH|REM expr",
+ /* 173 */ "expr ::= expr CONCAT expr",
+ /* 174 */ "likeop ::= LIKE_KW|MATCH",
+ /* 175 */ "likeop ::= NOT LIKE_KW|MATCH",
+ /* 176 */ "expr ::= expr likeop expr",
+ /* 177 */ "expr ::= expr likeop expr ESCAPE expr",
+ /* 178 */ "expr ::= expr ISNULL|NOTNULL",
+ /* 179 */ "expr ::= expr NOT NULL",
+ /* 180 */ "expr ::= expr IS expr",
+ /* 181 */ "expr ::= expr IS NOT expr",
+ /* 182 */ "expr ::= NOT expr",
+ /* 183 */ "expr ::= BITNOT expr",
+ /* 184 */ "expr ::= MINUS expr",
+ /* 185 */ "expr ::= PLUS expr",
+ /* 186 */ "between_op ::= BETWEEN",
+ /* 187 */ "between_op ::= NOT BETWEEN",
+ /* 188 */ "expr ::= expr between_op expr AND expr",
+ /* 189 */ "in_op ::= IN",
+ /* 190 */ "in_op ::= NOT IN",
+ /* 191 */ "expr ::= expr in_op LP exprlist RP",
+ /* 192 */ "expr ::= LP select RP",
+ /* 193 */ "expr ::= expr in_op LP select RP",
+ /* 194 */ "expr ::= expr in_op nm dbnm paren_exprlist",
+ /* 195 */ "expr ::= EXISTS LP select RP",
+ /* 196 */ "expr ::= CASE case_operand case_exprlist case_else END",
+ /* 197 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
+ /* 198 */ "case_exprlist ::= WHEN expr THEN expr",
+ /* 199 */ "case_else ::= ELSE expr",
+ /* 200 */ "case_else ::=",
+ /* 201 */ "case_operand ::= expr",
+ /* 202 */ "case_operand ::=",
+ /* 203 */ "exprlist ::=",
+ /* 204 */ "nexprlist ::= nexprlist COMMA expr",
+ /* 205 */ "nexprlist ::= expr",
+ /* 206 */ "paren_exprlist ::=",
+ /* 207 */ "paren_exprlist ::= LP exprlist RP",
+ /* 208 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
+ /* 209 */ "uniqueflag ::= UNIQUE",
+ /* 210 */ "uniqueflag ::=",
+ /* 211 */ "eidlist_opt ::=",
+ /* 212 */ "eidlist_opt ::= LP eidlist RP",
+ /* 213 */ "eidlist ::= eidlist COMMA nm collate sortorder",
+ /* 214 */ "eidlist ::= nm collate sortorder",
+ /* 215 */ "collate ::=",
+ /* 216 */ "collate ::= COLLATE ID|STRING",
+ /* 217 */ "cmd ::= DROP INDEX ifexists fullname",
+ /* 218 */ "cmd ::= VACUUM",
+ /* 219 */ "cmd ::= VACUUM nm",
+ /* 220 */ "cmd ::= PRAGMA nm dbnm",
+ /* 221 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
+ /* 222 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
+ /* 223 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
+ /* 224 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
+ /* 225 */ "plus_num ::= PLUS INTEGER|FLOAT",
+ /* 226 */ "minus_num ::= MINUS INTEGER|FLOAT",
+ /* 227 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
+ /* 228 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
+ /* 229 */ "trigger_time ::= BEFORE",
+ /* 230 */ "trigger_time ::= AFTER",
+ /* 231 */ "trigger_time ::= INSTEAD OF",
+ /* 232 */ "trigger_time ::=",
+ /* 233 */ "trigger_event ::= DELETE|INSERT",
+ /* 234 */ "trigger_event ::= UPDATE",
+ /* 235 */ "trigger_event ::= UPDATE OF idlist",
+ /* 236 */ "when_clause ::=",
+ /* 237 */ "when_clause ::= WHEN expr",
+ /* 238 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
+ /* 239 */ "trigger_cmd_list ::= trigger_cmd SEMI",
+ /* 240 */ "trnm ::= nm DOT nm",
+ /* 241 */ "tridxby ::= INDEXED BY nm",
+ /* 242 */ "tridxby ::= NOT INDEXED",
+ /* 243 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt",
+ /* 244 */ "trigger_cmd ::= insert_cmd INTO trnm idlist_opt select",
+ /* 245 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt",
+ /* 246 */ "trigger_cmd ::= select",
+ /* 247 */ "expr ::= RAISE LP IGNORE RP",
+ /* 248 */ "expr ::= RAISE LP raisetype COMMA nm RP",
+ /* 249 */ "raisetype ::= ROLLBACK",
+ /* 250 */ "raisetype ::= ABORT",
+ /* 251 */ "raisetype ::= FAIL",
+ /* 252 */ "cmd ::= DROP TRIGGER ifexists fullname",
+ /* 253 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
+ /* 254 */ "cmd ::= DETACH database_kw_opt expr",
+ /* 255 */ "key_opt ::=",
+ /* 256 */ "key_opt ::= KEY expr",
+ /* 257 */ "cmd ::= REINDEX",
+ /* 258 */ "cmd ::= REINDEX nm dbnm",
+ /* 259 */ "cmd ::= ANALYZE",
+ /* 260 */ "cmd ::= ANALYZE nm dbnm",
+ /* 261 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
+ /* 262 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
+ /* 263 */ "add_column_fullname ::= fullname",
+ /* 264 */ "cmd ::= create_vtab",
+ /* 265 */ "cmd ::= create_vtab LP vtabarglist RP",
+ /* 266 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
+ /* 267 */ "vtabarg ::=",
+ /* 268 */ "vtabargtoken ::= ANY",
+ /* 269 */ "vtabargtoken ::= lp anylist RP",
+ /* 270 */ "lp ::= LP",
+ /* 271 */ "with ::=",
+ /* 272 */ "with ::= WITH wqlist",
+ /* 273 */ "with ::= WITH RECURSIVE wqlist",
+ /* 274 */ "wqlist ::= nm eidlist_opt AS LP select RP",
+ /* 275 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP",
+ /* 276 */ "input ::= cmdlist",
+ /* 277 */ "cmdlist ::= cmdlist ecmd",
+ /* 278 */ "cmdlist ::= ecmd",
+ /* 279 */ "ecmd ::= SEMI",
+ /* 280 */ "ecmd ::= explain cmdx SEMI",
+ /* 281 */ "explain ::=",
+ /* 282 */ "trans_opt ::=",
+ /* 283 */ "trans_opt ::= TRANSACTION",
+ /* 284 */ "trans_opt ::= TRANSACTION nm",
+ /* 285 */ "savepoint_opt ::= SAVEPOINT",
+ /* 286 */ "savepoint_opt ::=",
+ /* 287 */ "cmd ::= create_table create_table_args",
+ /* 288 */ "columnlist ::= columnlist COMMA columnname carglist",
+ /* 289 */ "columnlist ::= columnname carglist",
+ /* 290 */ "nm ::= ID|INDEXED",
+ /* 291 */ "nm ::= STRING",
+ /* 292 */ "nm ::= JOIN_KW",
+ /* 293 */ "typetoken ::= typename",
+ /* 294 */ "typename ::= ID|STRING",
+ /* 295 */ "signed ::= plus_num",
+ /* 296 */ "signed ::= minus_num",
+ /* 297 */ "carglist ::= carglist ccons",
+ /* 298 */ "carglist ::=",
+ /* 299 */ "ccons ::= NULL onconf",
+ /* 300 */ "conslist_opt ::= COMMA conslist",
+ /* 301 */ "conslist ::= conslist tconscomma tcons",
+ /* 302 */ "conslist ::= tcons",
+ /* 303 */ "tconscomma ::=",
+ /* 304 */ "defer_subclause_opt ::= defer_subclause",
+ /* 305 */ "resolvetype ::= raisetype",
+ /* 306 */ "selectnowith ::= oneselect",
+ /* 307 */ "oneselect ::= values",
+ /* 308 */ "sclp ::= selcollist COMMA",
+ /* 309 */ "as ::= ID|STRING",
+ /* 310 */ "expr ::= term",
+ /* 311 */ "exprlist ::= nexprlist",
+ /* 312 */ "nmnum ::= plus_num",
+ /* 313 */ "nmnum ::= nm",
+ /* 314 */ "nmnum ::= ON",
+ /* 315 */ "nmnum ::= DELETE",
+ /* 316 */ "nmnum ::= DEFAULT",
+ /* 317 */ "plus_num ::= INTEGER|FLOAT",
+ /* 318 */ "foreach_clause ::=",
+ /* 319 */ "foreach_clause ::= FOR EACH ROW",
+ /* 320 */ "trnm ::= nm",
+ /* 321 */ "tridxby ::=",
+ /* 322 */ "database_kw_opt ::= DATABASE",
+ /* 323 */ "database_kw_opt ::=",
+ /* 324 */ "kwcolumn_opt ::=",
+ /* 325 */ "kwcolumn_opt ::= COLUMNKW",
+ /* 326 */ "vtabarglist ::= vtabarg",
+ /* 327 */ "vtabarglist ::= vtabarglist COMMA vtabarg",
+ /* 328 */ "vtabarg ::= vtabarg vtabargtoken",
+ /* 329 */ "anylist ::=",
+ /* 330 */ "anylist ::= anylist LP anylist RP",
+ /* 331 */ "anylist ::= anylist ANY",
};
#endif /* NDEBUG */
@@ -133183,50 +134494,47 @@ static unsigned int yy_find_shift_action(
assert( stateno <= YY_SHIFT_COUNT );
do{
i = yy_shift_ofst[stateno];
- if( i==YY_SHIFT_USE_DFLT ) return yy_default[stateno];
assert( iLookAhead!=YYNOCODE );
i += iLookAhead;
if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){
- if( iLookAhead>0 ){
#ifdef YYFALLBACK
- YYCODETYPE iFallback; /* Fallback token */
- if( iLookAhead<sizeof(yyFallback)/sizeof(yyFallback[0])
- && (iFallback = yyFallback[iLookAhead])!=0 ){
+ YYCODETYPE iFallback; /* Fallback token */
+ if( iLookAhead<sizeof(yyFallback)/sizeof(yyFallback[0])
+ && (iFallback = yyFallback[iLookAhead])!=0 ){
#ifndef NDEBUG
- if( yyTraceFILE ){
- fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n",
- yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]);
- }
-#endif
- assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
- iLookAhead = iFallback;
- continue;
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n",
+ yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]);
}
#endif
+ assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
+ iLookAhead = iFallback;
+ continue;
+ }
+#endif
#ifdef YYWILDCARD
- {
- int j = i - iLookAhead + YYWILDCARD;
- if(
+ {
+ int j = i - iLookAhead + YYWILDCARD;
+ if(
#if YY_SHIFT_MIN+YYWILDCARD<0
- j>=0 &&
+ j>=0 &&
#endif
#if YY_SHIFT_MAX+YYWILDCARD>=YY_ACTTAB_COUNT
- j<YY_ACTTAB_COUNT &&
+ j<YY_ACTTAB_COUNT &&
#endif
- yy_lookahead[j]==YYWILDCARD
- ){
+ yy_lookahead[j]==YYWILDCARD && iLookAhead>0
+ ){
#ifndef NDEBUG
- if( yyTraceFILE ){
- fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
- yyTracePrompt, yyTokenName[iLookAhead],
- yyTokenName[YYWILDCARD]);
- }
-#endif /* NDEBUG */
- return yy_action[j];
+ if( yyTraceFILE ){
+ fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
+ yyTracePrompt, yyTokenName[iLookAhead],
+ yyTokenName[YYWILDCARD]);
}
+#endif /* NDEBUG */
+ return yy_action[j];
}
-#endif /* YYWILDCARD */
}
+#endif /* YYWILDCARD */
return yy_default[stateno];
}else{
return yy_action[i];
@@ -133492,7 +134800,9 @@ static const struct {
{ 201, 2 },
{ 149, 8 },
{ 218, 5 },
+ { 218, 7 },
{ 218, 3 },
+ { 218, 5 },
{ 149, 6 },
{ 149, 7 },
{ 219, 2 },
@@ -133509,12 +134819,14 @@ static const struct {
{ 173, 5 },
{ 172, 1 },
{ 172, 1 },
+ { 172, 1 },
{ 173, 1 },
{ 173, 3 },
{ 173, 6 },
{ 173, 5 },
{ 173, 4 },
{ 172, 1 },
+ { 173, 5 },
{ 173, 3 },
{ 173, 3 },
{ 173, 3 },
@@ -133802,7 +135114,7 @@ static void yy_reduce(
case 67: /* defer_subclause_opt ::= */ yytestcase(yyruleno==67);
case 76: /* ifexists ::= */ yytestcase(yyruleno==76);
case 90: /* distinct ::= */ yytestcase(yyruleno==90);
- case 211: /* collate ::= */ yytestcase(yyruleno==211);
+ case 215: /* collate ::= */ yytestcase(yyruleno==215);
{yymsp[1].minor.yy194 = 0;}
break;
case 17: /* ifnotexists ::= IF NOT EXISTS */
@@ -133941,14 +135253,14 @@ static void yy_reduce(
break;
case 56: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */
case 71: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==71);
- case 142: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==142);
+ case 144: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==144);
{yymsp[-1].minor.yy194 = yymsp[0].minor.yy194;}
break;
case 58: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */
case 75: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==75);
- case 183: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==183);
- case 186: /* in_op ::= NOT IN */ yytestcase(yyruleno==186);
- case 212: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==212);
+ case 187: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==187);
+ case 190: /* in_op ::= NOT IN */ yytestcase(yyruleno==190);
+ case 216: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==216);
{yymsp[-1].minor.yy194 = 1;}
break;
case 59: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
@@ -133984,7 +135296,7 @@ static void yy_reduce(
{yymsp[0].minor.yy194 = OE_Ignore;}
break;
case 73: /* resolvetype ::= REPLACE */
- case 143: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==143);
+ case 145: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==145);
{yymsp[0].minor.yy194 = OE_Replace;}
break;
case 74: /* cmd ::= DROP TABLE ifexists fullname */
@@ -134112,9 +135424,9 @@ static void yy_reduce(
case 91: /* sclp ::= */
case 119: /* orderby_opt ::= */ yytestcase(yyruleno==119);
case 126: /* groupby_opt ::= */ yytestcase(yyruleno==126);
- case 199: /* exprlist ::= */ yytestcase(yyruleno==199);
- case 202: /* paren_exprlist ::= */ yytestcase(yyruleno==202);
- case 207: /* eidlist_opt ::= */ yytestcase(yyruleno==207);
+ case 203: /* exprlist ::= */ yytestcase(yyruleno==203);
+ case 206: /* paren_exprlist ::= */ yytestcase(yyruleno==206);
+ case 211: /* eidlist_opt ::= */ yytestcase(yyruleno==211);
{yymsp[1].minor.yy148 = 0;}
break;
case 92: /* selcollist ::= sclp expr as */
@@ -134132,7 +135444,7 @@ static void yy_reduce(
break;
case 94: /* selcollist ::= sclp nm DOT STAR */
{
- Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0, &yymsp[0].minor.yy0);
+ Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0, 0);
Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0);
Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0);
yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148, pDot);
@@ -134140,8 +135452,8 @@ static void yy_reduce(
break;
case 95: /* as ::= AS nm */
case 106: /* dbnm ::= DOT nm */ yytestcase(yyruleno==106);
- case 221: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==221);
- case 222: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==222);
+ case 225: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==225);
+ case 226: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==226);
{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;}
break;
case 97: /* from ::= */
@@ -134224,14 +135536,14 @@ static void yy_reduce(
case 112: /* on_opt ::= ON expr */
case 129: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==129);
case 136: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==136);
- case 195: /* case_else ::= ELSE expr */ yytestcase(yyruleno==195);
+ case 199: /* case_else ::= ELSE expr */ yytestcase(yyruleno==199);
{yymsp[-1].minor.yy72 = yymsp[0].minor.yy190.pExpr;}
break;
case 113: /* on_opt ::= */
case 128: /* having_opt ::= */ yytestcase(yyruleno==128);
case 135: /* where_opt ::= */ yytestcase(yyruleno==135);
- case 196: /* case_else ::= */ yytestcase(yyruleno==196);
- case 198: /* case_operand ::= */ yytestcase(yyruleno==198);
+ case 200: /* case_else ::= */ yytestcase(yyruleno==200);
+ case 202: /* case_operand ::= */ yytestcase(yyruleno==202);
{yymsp[1].minor.yy72 = 0;}
break;
case 115: /* indexed_opt ::= INDEXED BY nm */
@@ -134244,7 +135556,7 @@ static void yy_reduce(
{yymsp[-3].minor.yy254 = yymsp[-1].minor.yy254;}
break;
case 118: /* using_opt ::= */
- case 144: /* idlist_opt ::= */ yytestcase(yyruleno==144);
+ case 146: /* idlist_opt ::= */ yytestcase(yyruleno==146);
{yymsp[1].minor.yy254 = 0;}
break;
case 120: /* orderby_opt ::= ORDER BY sortlist */
@@ -134305,69 +135617,89 @@ static void yy_reduce(
sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy148, &yymsp[-2].minor.yy0, 1);
}
break;
- case 139: /* setlist ::= nm EQ expr */
+ case 139: /* setlist ::= setlist COMMA LP idlist RP EQ expr */
+{
+ yymsp[-6].minor.yy148 = sqlite3ExprListAppendVector(pParse, yymsp[-6].minor.yy148, yymsp[-3].minor.yy254, yymsp[0].minor.yy190.pExpr);
+}
+ break;
+ case 140: /* setlist ::= nm EQ expr */
{
yylhsminor.yy148 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy190.pExpr);
sqlite3ExprListSetName(pParse, yylhsminor.yy148, &yymsp[-2].minor.yy0, 1);
}
yymsp[-2].minor.yy148 = yylhsminor.yy148;
break;
- case 140: /* cmd ::= with insert_cmd INTO fullname idlist_opt select */
+ case 141: /* setlist ::= LP idlist RP EQ expr */
+{
+ yymsp[-4].minor.yy148 = sqlite3ExprListAppendVector(pParse, 0, yymsp[-3].minor.yy254, yymsp[0].minor.yy190.pExpr);
+}
+ break;
+ case 142: /* cmd ::= with insert_cmd INTO fullname idlist_opt select */
{
sqlite3WithPush(pParse, yymsp[-5].minor.yy285, 1);
sqlite3Insert(pParse, yymsp[-2].minor.yy185, yymsp[0].minor.yy243, yymsp[-1].minor.yy254, yymsp[-4].minor.yy194);
}
break;
- case 141: /* cmd ::= with insert_cmd INTO fullname idlist_opt DEFAULT VALUES */
+ case 143: /* cmd ::= with insert_cmd INTO fullname idlist_opt DEFAULT VALUES */
{
sqlite3WithPush(pParse, yymsp[-6].minor.yy285, 1);
sqlite3Insert(pParse, yymsp[-3].minor.yy185, 0, yymsp[-2].minor.yy254, yymsp[-5].minor.yy194);
}
break;
- case 145: /* idlist_opt ::= LP idlist RP */
+ case 147: /* idlist_opt ::= LP idlist RP */
{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;}
break;
- case 146: /* idlist ::= idlist COMMA nm */
+ case 148: /* idlist ::= idlist COMMA nm */
{yymsp[-2].minor.yy254 = sqlite3IdListAppend(pParse->db,yymsp[-2].minor.yy254,&yymsp[0].minor.yy0);}
break;
- case 147: /* idlist ::= nm */
+ case 149: /* idlist ::= nm */
{yymsp[0].minor.yy254 = sqlite3IdListAppend(pParse->db,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/}
break;
- case 148: /* expr ::= LP expr RP */
+ case 150: /* expr ::= LP expr RP */
{spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ yymsp[-2].minor.yy190.pExpr = yymsp[-1].minor.yy190.pExpr;}
break;
- case 149: /* term ::= NULL */
- case 154: /* term ::= INTEGER|FLOAT|BLOB */ yytestcase(yyruleno==154);
- case 155: /* term ::= STRING */ yytestcase(yyruleno==155);
+ case 151: /* term ::= NULL */
+ case 156: /* term ::= FLOAT|BLOB */ yytestcase(yyruleno==156);
+ case 157: /* term ::= STRING */ yytestcase(yyruleno==157);
{spanExpr(&yymsp[0].minor.yy190,pParse,yymsp[0].major,yymsp[0].minor.yy0);/*A-overwrites-X*/}
break;
- case 150: /* expr ::= ID|INDEXED */
- case 151: /* expr ::= JOIN_KW */ yytestcase(yyruleno==151);
+ case 152: /* expr ::= ID|INDEXED */
+ case 153: /* expr ::= JOIN_KW */ yytestcase(yyruleno==153);
{spanExpr(&yymsp[0].minor.yy190,pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/}
break;
- case 152: /* expr ::= nm DOT nm */
+ case 154: /* expr ::= nm DOT nm */
{
- Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0);
- Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0);
+ Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
+ Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1);
spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0);
}
break;
- case 153: /* expr ::= nm DOT nm DOT nm */
+ case 155: /* expr ::= nm DOT nm DOT nm */
{
- Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-4].minor.yy0);
- Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0);
- Expr *temp3 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0);
+ Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-4].minor.yy0, 1);
+ Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
+ Expr *temp3 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1);
Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3, 0);
spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0);
}
break;
- case 156: /* expr ::= VARIABLE */
+ case 158: /* term ::= INTEGER */
+{
+ yylhsminor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1);
+ yylhsminor.yy190.zStart = yymsp[0].minor.yy0.z;
+ yylhsminor.yy190.zEnd = yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n;
+ if( yylhsminor.yy190.pExpr ) yylhsminor.yy190.pExpr->flags |= EP_Leaf;
+}
+ yymsp[0].minor.yy190 = yylhsminor.yy190;
+ break;
+ case 159: /* expr ::= VARIABLE */
{
if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){
+ u32 n = yymsp[0].minor.yy0.n;
spanExpr(&yymsp[0].minor.yy190, pParse, TK_VARIABLE, yymsp[0].minor.yy0);
- sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy190.pExpr);
+ sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy190.pExpr, n);
}else{
/* When doing a nested parse, one can include terms in an expression
** that look like this: #1 #2 ... These terms refer to registers
@@ -134379,25 +135711,25 @@ static void yy_reduce(
sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t);
yymsp[0].minor.yy190.pExpr = 0;
}else{
- yymsp[0].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, &t);
+ yymsp[0].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, 0);
if( yymsp[0].minor.yy190.pExpr ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy190.pExpr->iTable);
}
}
}
break;
- case 157: /* expr ::= expr COLLATE ID|STRING */
+ case 160: /* expr ::= expr COLLATE ID|STRING */
{
yymsp[-2].minor.yy190.pExpr = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy190.pExpr, &yymsp[0].minor.yy0, 1);
yymsp[-2].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 158: /* expr ::= CAST LP expr AS typetoken RP */
+ case 161: /* expr ::= CAST LP expr AS typetoken RP */
{
spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
yymsp[-5].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CAST, yymsp[-3].minor.yy190.pExpr, 0, &yymsp[-1].minor.yy0);
}
break;
- case 159: /* expr ::= ID|INDEXED LP distinct exprlist RP */
+ case 162: /* expr ::= ID|INDEXED LP distinct exprlist RP */
{
if( yymsp[-1].minor.yy148 && yymsp[-1].minor.yy148->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){
sqlite3ErrorMsg(pParse, "too many arguments on function %T", &yymsp[-4].minor.yy0);
@@ -134410,92 +135742,109 @@ static void yy_reduce(
}
yymsp[-4].minor.yy190 = yylhsminor.yy190;
break;
- case 160: /* expr ::= ID|INDEXED LP STAR RP */
+ case 163: /* expr ::= ID|INDEXED LP STAR RP */
{
yylhsminor.yy190.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0);
spanSet(&yylhsminor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0);
}
yymsp[-3].minor.yy190 = yylhsminor.yy190;
break;
- case 161: /* term ::= CTIME_KW */
+ case 164: /* term ::= CTIME_KW */
{
yylhsminor.yy190.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0);
spanSet(&yylhsminor.yy190, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0);
}
yymsp[0].minor.yy190 = yylhsminor.yy190;
break;
- case 162: /* expr ::= expr AND expr */
- case 163: /* expr ::= expr OR expr */ yytestcase(yyruleno==163);
- case 164: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==164);
- case 165: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==165);
- case 166: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==166);
- case 167: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==167);
- case 168: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==168);
- case 169: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==169);
+ case 165: /* expr ::= LP nexprlist COMMA expr RP */
+{
+ ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy148, yymsp[-1].minor.yy190.pExpr);
+ yylhsminor.yy190.pExpr = sqlite3PExpr(pParse, TK_VECTOR, 0, 0, 0);
+ if( yylhsminor.yy190.pExpr ){
+ yylhsminor.yy190.pExpr->x.pList = pList;
+ spanSet(&yylhsminor.yy190, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0);
+ }else{
+ sqlite3ExprListDelete(pParse->db, pList);
+ }
+}
+ yymsp[-4].minor.yy190 = yylhsminor.yy190;
+ break;
+ case 166: /* expr ::= expr AND expr */
+ case 167: /* expr ::= expr OR expr */ yytestcase(yyruleno==167);
+ case 168: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==168);
+ case 169: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==169);
+ case 170: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==170);
+ case 171: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==171);
+ case 172: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==172);
+ case 173: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==173);
{spanBinaryExpr(pParse,yymsp[-1].major,&yymsp[-2].minor.yy190,&yymsp[0].minor.yy190);}
break;
- case 170: /* likeop ::= LIKE_KW|MATCH */
-{yymsp[0].minor.yy392.eOperator = yymsp[0].minor.yy0; yymsp[0].minor.yy392.bNot = 0;/*A-overwrites-X*/}
+ case 174: /* likeop ::= LIKE_KW|MATCH */
+{yymsp[0].minor.yy0=yymsp[0].minor.yy0;/*A-overwrites-X*/}
break;
- case 171: /* likeop ::= NOT LIKE_KW|MATCH */
-{yymsp[-1].minor.yy392.eOperator = yymsp[0].minor.yy0; yymsp[-1].minor.yy392.bNot = 1;}
+ case 175: /* likeop ::= NOT LIKE_KW|MATCH */
+{yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/}
break;
- case 172: /* expr ::= expr likeop expr */
+ case 176: /* expr ::= expr likeop expr */
{
ExprList *pList;
+ int bNot = yymsp[-1].minor.yy0.n & 0x80000000;
+ yymsp[-1].minor.yy0.n &= 0x7fffffff;
pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy190.pExpr);
pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy190.pExpr);
- yymsp[-2].minor.yy190.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy392.eOperator);
- exprNot(pParse, yymsp[-1].minor.yy392.bNot, &yymsp[-2].minor.yy190);
+ yymsp[-2].minor.yy190.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy0);
+ exprNot(pParse, bNot, &yymsp[-2].minor.yy190);
yymsp[-2].minor.yy190.zEnd = yymsp[0].minor.yy190.zEnd;
if( yymsp[-2].minor.yy190.pExpr ) yymsp[-2].minor.yy190.pExpr->flags |= EP_InfixFunc;
}
break;
- case 173: /* expr ::= expr likeop expr ESCAPE expr */
+ case 177: /* expr ::= expr likeop expr ESCAPE expr */
{
ExprList *pList;
+ int bNot = yymsp[-3].minor.yy0.n & 0x80000000;
+ yymsp[-3].minor.yy0.n &= 0x7fffffff;
pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr);
pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy190.pExpr);
pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy190.pExpr);
- yymsp[-4].minor.yy190.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy392.eOperator);
- exprNot(pParse, yymsp[-3].minor.yy392.bNot, &yymsp[-4].minor.yy190);
+ yymsp[-4].minor.yy190.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy0);
+ exprNot(pParse, bNot, &yymsp[-4].minor.yy190);
yymsp[-4].minor.yy190.zEnd = yymsp[0].minor.yy190.zEnd;
if( yymsp[-4].minor.yy190.pExpr ) yymsp[-4].minor.yy190.pExpr->flags |= EP_InfixFunc;
}
break;
- case 174: /* expr ::= expr ISNULL|NOTNULL */
+ case 178: /* expr ::= expr ISNULL|NOTNULL */
{spanUnaryPostfix(pParse,yymsp[0].major,&yymsp[-1].minor.yy190,&yymsp[0].minor.yy0);}
break;
- case 175: /* expr ::= expr NOT NULL */
+ case 179: /* expr ::= expr NOT NULL */
{spanUnaryPostfix(pParse,TK_NOTNULL,&yymsp[-2].minor.yy190,&yymsp[0].minor.yy0);}
break;
- case 176: /* expr ::= expr IS expr */
+ case 180: /* expr ::= expr IS expr */
{
spanBinaryExpr(pParse,TK_IS,&yymsp[-2].minor.yy190,&yymsp[0].minor.yy190);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy190.pExpr, yymsp[-2].minor.yy190.pExpr, TK_ISNULL);
}
break;
- case 177: /* expr ::= expr IS NOT expr */
+ case 181: /* expr ::= expr IS NOT expr */
{
spanBinaryExpr(pParse,TK_ISNOT,&yymsp[-3].minor.yy190,&yymsp[0].minor.yy190);
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy190.pExpr, yymsp[-3].minor.yy190.pExpr, TK_NOTNULL);
}
break;
- case 178: /* expr ::= NOT expr */
- case 179: /* expr ::= BITNOT expr */ yytestcase(yyruleno==179);
+ case 182: /* expr ::= NOT expr */
+ case 183: /* expr ::= BITNOT expr */ yytestcase(yyruleno==183);
{spanUnaryPrefix(&yymsp[-1].minor.yy190,pParse,yymsp[-1].major,&yymsp[0].minor.yy190,&yymsp[-1].minor.yy0);/*A-overwrites-B*/}
break;
- case 180: /* expr ::= MINUS expr */
+ case 184: /* expr ::= MINUS expr */
{spanUnaryPrefix(&yymsp[-1].minor.yy190,pParse,TK_UMINUS,&yymsp[0].minor.yy190,&yymsp[-1].minor.yy0);/*A-overwrites-B*/}
break;
- case 181: /* expr ::= PLUS expr */
+ case 185: /* expr ::= PLUS expr */
{spanUnaryPrefix(&yymsp[-1].minor.yy190,pParse,TK_UPLUS,&yymsp[0].minor.yy190,&yymsp[-1].minor.yy0);/*A-overwrites-B*/}
break;
- case 182: /* between_op ::= BETWEEN */
- case 185: /* in_op ::= IN */ yytestcase(yyruleno==185);
+ case 186: /* between_op ::= BETWEEN */
+ case 189: /* in_op ::= IN */ yytestcase(yyruleno==189);
{yymsp[0].minor.yy194 = 0;}
break;
- case 184: /* expr ::= expr between_op expr AND expr */
+ case 188: /* expr ::= expr between_op expr AND expr */
{
ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr);
pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy190.pExpr);
@@ -134509,7 +135858,7 @@ static void yy_reduce(
yymsp[-4].minor.yy190.zEnd = yymsp[0].minor.yy190.zEnd;
}
break;
- case 187: /* expr ::= expr in_op LP exprlist RP */
+ case 191: /* expr ::= expr in_op LP exprlist RP */
{
if( yymsp[-1].minor.yy148==0 ){
/* Expressions of the form
@@ -134562,14 +135911,14 @@ static void yy_reduce(
yymsp[-4].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 188: /* expr ::= LP select RP */
+ case 192: /* expr ::= LP select RP */
{
spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/
yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0);
sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy190.pExpr, yymsp[-1].minor.yy243);
}
break;
- case 189: /* expr ::= expr in_op LP select RP */
+ case 193: /* expr ::= expr in_op LP select RP */
{
yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0);
sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy190.pExpr, yymsp[-1].minor.yy243);
@@ -134577,7 +135926,7 @@ static void yy_reduce(
yymsp[-4].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
}
break;
- case 190: /* expr ::= expr in_op nm dbnm paren_exprlist */
+ case 194: /* expr ::= expr in_op nm dbnm paren_exprlist */
{
SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);
Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0);
@@ -134588,7 +135937,7 @@ static void yy_reduce(
yymsp[-4].minor.yy190.zEnd = yymsp[-1].minor.yy0.z ? &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n] : &yymsp[-2].minor.yy0.z[yymsp[-2].minor.yy0.n];
}
break;
- case 191: /* expr ::= EXISTS LP select RP */
+ case 195: /* expr ::= EXISTS LP select RP */
{
Expr *p;
spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/
@@ -134596,7 +135945,7 @@ static void yy_reduce(
sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy243);
}
break;
- case 192: /* expr ::= CASE case_operand case_exprlist case_else END */
+ case 196: /* expr ::= CASE case_operand case_exprlist case_else END */
{
spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-C*/
yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy72, 0, 0);
@@ -134609,78 +135958,80 @@ static void yy_reduce(
}
}
break;
- case 193: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ case 197: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
{
yymsp[-4].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy148, yymsp[-2].minor.yy190.pExpr);
yymsp[-4].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy148, yymsp[0].minor.yy190.pExpr);
}
break;
- case 194: /* case_exprlist ::= WHEN expr THEN expr */
+ case 198: /* case_exprlist ::= WHEN expr THEN expr */
{
yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr);
yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148, yymsp[0].minor.yy190.pExpr);
}
break;
- case 197: /* case_operand ::= expr */
+ case 201: /* case_operand ::= expr */
{yymsp[0].minor.yy72 = yymsp[0].minor.yy190.pExpr; /*A-overwrites-X*/}
break;
- case 200: /* nexprlist ::= nexprlist COMMA expr */
+ case 204: /* nexprlist ::= nexprlist COMMA expr */
{yymsp[-2].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy148,yymsp[0].minor.yy190.pExpr);}
break;
- case 201: /* nexprlist ::= expr */
+ case 205: /* nexprlist ::= expr */
{yymsp[0].minor.yy148 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy190.pExpr); /*A-overwrites-Y*/}
break;
- case 203: /* paren_exprlist ::= LP exprlist RP */
- case 208: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==208);
+ case 207: /* paren_exprlist ::= LP exprlist RP */
+ case 212: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==212);
{yymsp[-2].minor.yy148 = yymsp[-1].minor.yy148;}
break;
- case 204: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ case 208: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
{
sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0,
sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy148, yymsp[-10].minor.yy194,
&yymsp[-11].minor.yy0, yymsp[0].minor.yy72, SQLITE_SO_ASC, yymsp[-8].minor.yy194, SQLITE_IDXTYPE_APPDEF);
}
break;
- case 205: /* uniqueflag ::= UNIQUE */
- case 246: /* raisetype ::= ABORT */ yytestcase(yyruleno==246);
+ case 209: /* uniqueflag ::= UNIQUE */
+ case 250: /* raisetype ::= ABORT */ yytestcase(yyruleno==250);
{yymsp[0].minor.yy194 = OE_Abort;}
break;
- case 206: /* uniqueflag ::= */
+ case 210: /* uniqueflag ::= */
{yymsp[1].minor.yy194 = OE_None;}
break;
- case 209: /* eidlist ::= eidlist COMMA nm collate sortorder */
+ case 213: /* eidlist ::= eidlist COMMA nm collate sortorder */
{
yymsp[-4].minor.yy148 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy148, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy194, yymsp[0].minor.yy194);
}
break;
- case 210: /* eidlist ::= nm collate sortorder */
+ case 214: /* eidlist ::= nm collate sortorder */
{
yymsp[-2].minor.yy148 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy194, yymsp[0].minor.yy194); /*A-overwrites-Y*/
}
break;
- case 213: /* cmd ::= DROP INDEX ifexists fullname */
+ case 217: /* cmd ::= DROP INDEX ifexists fullname */
{sqlite3DropIndex(pParse, yymsp[0].minor.yy185, yymsp[-1].minor.yy194);}
break;
- case 214: /* cmd ::= VACUUM */
- case 215: /* cmd ::= VACUUM nm */ yytestcase(yyruleno==215);
-{sqlite3Vacuum(pParse);}
+ case 218: /* cmd ::= VACUUM */
+{sqlite3Vacuum(pParse,0);}
+ break;
+ case 219: /* cmd ::= VACUUM nm */
+{sqlite3Vacuum(pParse,&yymsp[0].minor.yy0);}
break;
- case 216: /* cmd ::= PRAGMA nm dbnm */
+ case 220: /* cmd ::= PRAGMA nm dbnm */
{sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);}
break;
- case 217: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
+ case 221: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);}
break;
- case 218: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ case 222: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);}
break;
- case 219: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
+ case 223: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);}
break;
- case 220: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ case 224: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);}
break;
- case 223: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ case 227: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
{
Token all;
all.z = yymsp[-3].minor.yy0.z;
@@ -134688,53 +136039,53 @@ static void yy_reduce(
sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy145, &all);
}
break;
- case 224: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ case 228: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
{
sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy194, yymsp[-4].minor.yy332.a, yymsp[-4].minor.yy332.b, yymsp[-2].minor.yy185, yymsp[0].minor.yy72, yymsp[-10].minor.yy194, yymsp[-8].minor.yy194);
yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/
}
break;
- case 225: /* trigger_time ::= BEFORE */
+ case 229: /* trigger_time ::= BEFORE */
{ yymsp[0].minor.yy194 = TK_BEFORE; }
break;
- case 226: /* trigger_time ::= AFTER */
+ case 230: /* trigger_time ::= AFTER */
{ yymsp[0].minor.yy194 = TK_AFTER; }
break;
- case 227: /* trigger_time ::= INSTEAD OF */
+ case 231: /* trigger_time ::= INSTEAD OF */
{ yymsp[-1].minor.yy194 = TK_INSTEAD;}
break;
- case 228: /* trigger_time ::= */
+ case 232: /* trigger_time ::= */
{ yymsp[1].minor.yy194 = TK_BEFORE; }
break;
- case 229: /* trigger_event ::= DELETE|INSERT */
- case 230: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==230);
+ case 233: /* trigger_event ::= DELETE|INSERT */
+ case 234: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==234);
{yymsp[0].minor.yy332.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy332.b = 0;}
break;
- case 231: /* trigger_event ::= UPDATE OF idlist */
+ case 235: /* trigger_event ::= UPDATE OF idlist */
{yymsp[-2].minor.yy332.a = TK_UPDATE; yymsp[-2].minor.yy332.b = yymsp[0].minor.yy254;}
break;
- case 232: /* when_clause ::= */
- case 251: /* key_opt ::= */ yytestcase(yyruleno==251);
+ case 236: /* when_clause ::= */
+ case 255: /* key_opt ::= */ yytestcase(yyruleno==255);
{ yymsp[1].minor.yy72 = 0; }
break;
- case 233: /* when_clause ::= WHEN expr */
- case 252: /* key_opt ::= KEY expr */ yytestcase(yyruleno==252);
+ case 237: /* when_clause ::= WHEN expr */
+ case 256: /* key_opt ::= KEY expr */ yytestcase(yyruleno==256);
{ yymsp[-1].minor.yy72 = yymsp[0].minor.yy190.pExpr; }
break;
- case 234: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ case 238: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
{
assert( yymsp[-2].minor.yy145!=0 );
yymsp[-2].minor.yy145->pLast->pNext = yymsp[-1].minor.yy145;
yymsp[-2].minor.yy145->pLast = yymsp[-1].minor.yy145;
}
break;
- case 235: /* trigger_cmd_list ::= trigger_cmd SEMI */
+ case 239: /* trigger_cmd_list ::= trigger_cmd SEMI */
{
assert( yymsp[-1].minor.yy145!=0 );
yymsp[-1].minor.yy145->pLast = yymsp[-1].minor.yy145;
}
break;
- case 236: /* trnm ::= nm DOT nm */
+ case 240: /* trnm ::= nm DOT nm */
{
yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;
sqlite3ErrorMsg(pParse,
@@ -134742,33 +136093,33 @@ static void yy_reduce(
"statements within triggers");
}
break;
- case 237: /* tridxby ::= INDEXED BY nm */
+ case 241: /* tridxby ::= INDEXED BY nm */
{
sqlite3ErrorMsg(pParse,
"the INDEXED BY clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 238: /* tridxby ::= NOT INDEXED */
+ case 242: /* tridxby ::= NOT INDEXED */
{
sqlite3ErrorMsg(pParse,
"the NOT INDEXED clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 239: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt */
+ case 243: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt */
{yymsp[-6].minor.yy145 = sqlite3TriggerUpdateStep(pParse->db, &yymsp[-4].minor.yy0, yymsp[-1].minor.yy148, yymsp[0].minor.yy72, yymsp[-5].minor.yy194);}
break;
- case 240: /* trigger_cmd ::= insert_cmd INTO trnm idlist_opt select */
+ case 244: /* trigger_cmd ::= insert_cmd INTO trnm idlist_opt select */
{yymsp[-4].minor.yy145 = sqlite3TriggerInsertStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy254, yymsp[0].minor.yy243, yymsp[-4].minor.yy194);/*A-overwrites-R*/}
break;
- case 241: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt */
+ case 245: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt */
{yymsp[-4].minor.yy145 = sqlite3TriggerDeleteStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[0].minor.yy72);}
break;
- case 242: /* trigger_cmd ::= select */
+ case 246: /* trigger_cmd ::= select */
{yymsp[0].minor.yy145 = sqlite3TriggerSelectStep(pParse->db, yymsp[0].minor.yy243); /*A-overwrites-X*/}
break;
- case 243: /* expr ::= RAISE LP IGNORE RP */
+ case 247: /* expr ::= RAISE LP IGNORE RP */
{
spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0);
@@ -134777,7 +136128,7 @@ static void yy_reduce(
}
}
break;
- case 244: /* expr ::= RAISE LP raisetype COMMA nm RP */
+ case 248: /* expr ::= RAISE LP raisetype COMMA nm RP */
{
spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
yymsp[-5].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &yymsp[-1].minor.yy0);
@@ -134786,151 +136137,151 @@ static void yy_reduce(
}
}
break;
- case 245: /* raisetype ::= ROLLBACK */
+ case 249: /* raisetype ::= ROLLBACK */
{yymsp[0].minor.yy194 = OE_Rollback;}
break;
- case 247: /* raisetype ::= FAIL */
+ case 251: /* raisetype ::= FAIL */
{yymsp[0].minor.yy194 = OE_Fail;}
break;
- case 248: /* cmd ::= DROP TRIGGER ifexists fullname */
+ case 252: /* cmd ::= DROP TRIGGER ifexists fullname */
{
sqlite3DropTrigger(pParse,yymsp[0].minor.yy185,yymsp[-1].minor.yy194);
}
break;
- case 249: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ case 253: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
{
sqlite3Attach(pParse, yymsp[-3].minor.yy190.pExpr, yymsp[-1].minor.yy190.pExpr, yymsp[0].minor.yy72);
}
break;
- case 250: /* cmd ::= DETACH database_kw_opt expr */
+ case 254: /* cmd ::= DETACH database_kw_opt expr */
{
sqlite3Detach(pParse, yymsp[0].minor.yy190.pExpr);
}
break;
- case 253: /* cmd ::= REINDEX */
+ case 257: /* cmd ::= REINDEX */
{sqlite3Reindex(pParse, 0, 0);}
break;
- case 254: /* cmd ::= REINDEX nm dbnm */
+ case 258: /* cmd ::= REINDEX nm dbnm */
{sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 255: /* cmd ::= ANALYZE */
+ case 259: /* cmd ::= ANALYZE */
{sqlite3Analyze(pParse, 0, 0);}
break;
- case 256: /* cmd ::= ANALYZE nm dbnm */
+ case 260: /* cmd ::= ANALYZE nm dbnm */
{sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 257: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
+ case 261: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
{
sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy185,&yymsp[0].minor.yy0);
}
break;
- case 258: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ case 262: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
{
yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n;
sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0);
}
break;
- case 259: /* add_column_fullname ::= fullname */
+ case 263: /* add_column_fullname ::= fullname */
{
disableLookaside(pParse);
sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy185);
}
break;
- case 260: /* cmd ::= create_vtab */
+ case 264: /* cmd ::= create_vtab */
{sqlite3VtabFinishParse(pParse,0);}
break;
- case 261: /* cmd ::= create_vtab LP vtabarglist RP */
+ case 265: /* cmd ::= create_vtab LP vtabarglist RP */
{sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);}
break;
- case 262: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ case 266: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
{
sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy194);
}
break;
- case 263: /* vtabarg ::= */
+ case 267: /* vtabarg ::= */
{sqlite3VtabArgInit(pParse);}
break;
- case 264: /* vtabargtoken ::= ANY */
- case 265: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==265);
- case 266: /* lp ::= LP */ yytestcase(yyruleno==266);
+ case 268: /* vtabargtoken ::= ANY */
+ case 269: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==269);
+ case 270: /* lp ::= LP */ yytestcase(yyruleno==270);
{sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);}
break;
- case 267: /* with ::= */
+ case 271: /* with ::= */
{yymsp[1].minor.yy285 = 0;}
break;
- case 268: /* with ::= WITH wqlist */
+ case 272: /* with ::= WITH wqlist */
{ yymsp[-1].minor.yy285 = yymsp[0].minor.yy285; }
break;
- case 269: /* with ::= WITH RECURSIVE wqlist */
+ case 273: /* with ::= WITH RECURSIVE wqlist */
{ yymsp[-2].minor.yy285 = yymsp[0].minor.yy285; }
break;
- case 270: /* wqlist ::= nm eidlist_opt AS LP select RP */
+ case 274: /* wqlist ::= nm eidlist_opt AS LP select RP */
{
yymsp[-5].minor.yy285 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy148, yymsp[-1].minor.yy243); /*A-overwrites-X*/
}
break;
- case 271: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
+ case 275: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */
{
yymsp[-7].minor.yy285 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy285, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy148, yymsp[-1].minor.yy243);
}
break;
default:
- /* (272) input ::= cmdlist */ yytestcase(yyruleno==272);
- /* (273) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==273);
- /* (274) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=274);
- /* (275) ecmd ::= SEMI */ yytestcase(yyruleno==275);
- /* (276) ecmd ::= explain cmdx SEMI */ yytestcase(yyruleno==276);
- /* (277) explain ::= */ yytestcase(yyruleno==277);
- /* (278) trans_opt ::= */ yytestcase(yyruleno==278);
- /* (279) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==279);
- /* (280) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==280);
- /* (281) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==281);
- /* (282) savepoint_opt ::= */ yytestcase(yyruleno==282);
- /* (283) cmd ::= create_table create_table_args */ yytestcase(yyruleno==283);
- /* (284) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==284);
- /* (285) columnlist ::= columnname carglist */ yytestcase(yyruleno==285);
- /* (286) nm ::= ID|INDEXED */ yytestcase(yyruleno==286);
- /* (287) nm ::= STRING */ yytestcase(yyruleno==287);
- /* (288) nm ::= JOIN_KW */ yytestcase(yyruleno==288);
- /* (289) typetoken ::= typename */ yytestcase(yyruleno==289);
- /* (290) typename ::= ID|STRING */ yytestcase(yyruleno==290);
- /* (291) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=291);
- /* (292) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=292);
- /* (293) carglist ::= carglist ccons */ yytestcase(yyruleno==293);
- /* (294) carglist ::= */ yytestcase(yyruleno==294);
- /* (295) ccons ::= NULL onconf */ yytestcase(yyruleno==295);
- /* (296) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==296);
- /* (297) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==297);
- /* (298) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=298);
- /* (299) tconscomma ::= */ yytestcase(yyruleno==299);
- /* (300) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=300);
- /* (301) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=301);
- /* (302) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=302);
- /* (303) oneselect ::= values */ yytestcase(yyruleno==303);
- /* (304) sclp ::= selcollist COMMA */ yytestcase(yyruleno==304);
- /* (305) as ::= ID|STRING */ yytestcase(yyruleno==305);
- /* (306) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=306);
- /* (307) exprlist ::= nexprlist */ yytestcase(yyruleno==307);
- /* (308) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=308);
- /* (309) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=309);
- /* (310) nmnum ::= ON */ yytestcase(yyruleno==310);
- /* (311) nmnum ::= DELETE */ yytestcase(yyruleno==311);
- /* (312) nmnum ::= DEFAULT */ yytestcase(yyruleno==312);
- /* (313) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==313);
- /* (314) foreach_clause ::= */ yytestcase(yyruleno==314);
- /* (315) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==315);
- /* (316) trnm ::= nm */ yytestcase(yyruleno==316);
- /* (317) tridxby ::= */ yytestcase(yyruleno==317);
- /* (318) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==318);
- /* (319) database_kw_opt ::= */ yytestcase(yyruleno==319);
- /* (320) kwcolumn_opt ::= */ yytestcase(yyruleno==320);
- /* (321) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==321);
- /* (322) vtabarglist ::= vtabarg */ yytestcase(yyruleno==322);
- /* (323) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==323);
- /* (324) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==324);
- /* (325) anylist ::= */ yytestcase(yyruleno==325);
- /* (326) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==326);
- /* (327) anylist ::= anylist ANY */ yytestcase(yyruleno==327);
+ /* (276) input ::= cmdlist */ yytestcase(yyruleno==276);
+ /* (277) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==277);
+ /* (278) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=278);
+ /* (279) ecmd ::= SEMI */ yytestcase(yyruleno==279);
+ /* (280) ecmd ::= explain cmdx SEMI */ yytestcase(yyruleno==280);
+ /* (281) explain ::= */ yytestcase(yyruleno==281);
+ /* (282) trans_opt ::= */ yytestcase(yyruleno==282);
+ /* (283) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==283);
+ /* (284) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==284);
+ /* (285) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==285);
+ /* (286) savepoint_opt ::= */ yytestcase(yyruleno==286);
+ /* (287) cmd ::= create_table create_table_args */ yytestcase(yyruleno==287);
+ /* (288) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==288);
+ /* (289) columnlist ::= columnname carglist */ yytestcase(yyruleno==289);
+ /* (290) nm ::= ID|INDEXED */ yytestcase(yyruleno==290);
+ /* (291) nm ::= STRING */ yytestcase(yyruleno==291);
+ /* (292) nm ::= JOIN_KW */ yytestcase(yyruleno==292);
+ /* (293) typetoken ::= typename */ yytestcase(yyruleno==293);
+ /* (294) typename ::= ID|STRING */ yytestcase(yyruleno==294);
+ /* (295) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=295);
+ /* (296) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=296);
+ /* (297) carglist ::= carglist ccons */ yytestcase(yyruleno==297);
+ /* (298) carglist ::= */ yytestcase(yyruleno==298);
+ /* (299) ccons ::= NULL onconf */ yytestcase(yyruleno==299);
+ /* (300) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==300);
+ /* (301) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==301);
+ /* (302) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=302);
+ /* (303) tconscomma ::= */ yytestcase(yyruleno==303);
+ /* (304) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=304);
+ /* (305) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=305);
+ /* (306) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=306);
+ /* (307) oneselect ::= values */ yytestcase(yyruleno==307);
+ /* (308) sclp ::= selcollist COMMA */ yytestcase(yyruleno==308);
+ /* (309) as ::= ID|STRING */ yytestcase(yyruleno==309);
+ /* (310) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=310);
+ /* (311) exprlist ::= nexprlist */ yytestcase(yyruleno==311);
+ /* (312) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=312);
+ /* (313) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=313);
+ /* (314) nmnum ::= ON */ yytestcase(yyruleno==314);
+ /* (315) nmnum ::= DELETE */ yytestcase(yyruleno==315);
+ /* (316) nmnum ::= DEFAULT */ yytestcase(yyruleno==316);
+ /* (317) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==317);
+ /* (318) foreach_clause ::= */ yytestcase(yyruleno==318);
+ /* (319) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==319);
+ /* (320) trnm ::= nm */ yytestcase(yyruleno==320);
+ /* (321) tridxby ::= */ yytestcase(yyruleno==321);
+ /* (322) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==322);
+ /* (323) database_kw_opt ::= */ yytestcase(yyruleno==323);
+ /* (324) kwcolumn_opt ::= */ yytestcase(yyruleno==324);
+ /* (325) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==325);
+ /* (326) vtabarglist ::= vtabarg */ yytestcase(yyruleno==326);
+ /* (327) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==327);
+ /* (328) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==328);
+ /* (329) anylist ::= */ yytestcase(yyruleno==329);
+ /* (330) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==330);
+ /* (331) anylist ::= anylist ANY */ yytestcase(yyruleno==331);
break;
/********** End reduce actions ************************************************/
};
@@ -135121,7 +136472,7 @@ SQLITE_PRIVATE void sqlite3Parser(
yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion);
yymajor = YYNOCODE;
}else{
- while( yypParser->yytos >= &yypParser->yystack
+ while( yypParser->yytos >= yypParser->yystack
&& yymx != YYERRORSYMBOL
&& (yyact = yy_find_reduce_action(
yypParser->yytos->stateno,
@@ -135989,14 +137340,26 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
assert( pParse->nVar==0 );
assert( pParse->nzVar==0 );
assert( pParse->azVar==0 );
- while( zSql[i]!=0 ){
+ while( 1 ){
assert( i>=0 );
- pParse->sLastToken.z = &zSql[i];
- pParse->sLastToken.n = sqlite3GetToken((unsigned char*)&zSql[i],&tokenType);
- i += pParse->sLastToken.n;
- if( i>mxSqlLen ){
- pParse->rc = SQLITE_TOOBIG;
- break;
+ if( zSql[i]!=0 ){
+ pParse->sLastToken.z = &zSql[i];
+ pParse->sLastToken.n = sqlite3GetToken((u8*)&zSql[i],&tokenType);
+ i += pParse->sLastToken.n;
+ if( i>mxSqlLen ){
+ pParse->rc = SQLITE_TOOBIG;
+ break;
+ }
+ }else{
+ /* Upon reaching the end of input, call the parser two more times
+ ** with tokens TK_SEMI and 0, in that order. */
+ if( lastTokenParsed==TK_SEMI ){
+ tokenType = 0;
+ }else if( lastTokenParsed==0 ){
+ break;
+ }else{
+ tokenType = TK_SEMI;
+ }
}
if( tokenType>=TK_SPACE ){
assert( tokenType==TK_SPACE || tokenType==TK_ILLEGAL );
@@ -136017,15 +137380,6 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
}
assert( nErr==0 );
pParse->zTail = &zSql[i];
- if( pParse->rc==SQLITE_OK && db->mallocFailed==0 ){
- assert( zSql[i]==0 );
- if( lastTokenParsed!=TK_SEMI ){
- sqlite3Parser(pEngine, TK_SEMI, pParse->sLastToken, pParse);
- }
- if( pParse->rc==SQLITE_OK && db->mallocFailed==0 ){
- sqlite3Parser(pEngine, 0, pParse->sLastToken, pParse);
- }
- }
#ifdef YYTRACKMAXSTACKDEPTH
sqlite3_mutex_enter(sqlite3MallocMutex());
sqlite3StatusHighwater(SQLITE_STATUS_PARSER_STACK,
@@ -136193,7 +137547,7 @@ SQLITE_PRIVATE const char sqlite3IsEbcdicIdChar[];
** to recognize the end of a trigger can be omitted. All we have to do
** is look for a semicolon that is not part of an string or comment.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *zSql){
+SQLITE_API int sqlite3_complete(const char *zSql){
u8 state = 0; /* Current state, using numbers defined in header comment */
u8 token; /* Value of the next token */
@@ -136358,7 +137712,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *zSql){
** above, except that the parameter is required to be UTF-16 encoded, not
** UTF-8.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *zSql){
+SQLITE_API int sqlite3_complete16(const void *zSql){
sqlite3_value *pVal;
char const *zSql8;
int rc;
@@ -136518,24 +137872,24 @@ SQLITE_API const char sqlite3_version[] = SQLITE_VERSION;
/* IMPLEMENTATION-OF: R-53536-42575 The sqlite3_libversion() function returns
** a pointer to the to the sqlite3_version[] string constant.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_libversion(void){ return sqlite3_version; }
+SQLITE_API const char *sqlite3_libversion(void){ return sqlite3_version; }
/* IMPLEMENTATION-OF: R-63124-39300 The sqlite3_sourceid() function returns a
** pointer to a string constant whose value is the same as the
** SQLITE_SOURCE_ID C preprocessor macro.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; }
+SQLITE_API const char *sqlite3_sourceid(void){ return SQLITE_SOURCE_ID; }
/* IMPLEMENTATION-OF: R-35210-63508 The sqlite3_libversion_number() function
** returns an integer equal to SQLITE_VERSION_NUMBER.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; }
+SQLITE_API int sqlite3_libversion_number(void){ return SQLITE_VERSION_NUMBER; }
/* IMPLEMENTATION-OF: R-20790-14025 The sqlite3_threadsafe() function returns
** zero if and only if SQLite was compiled with mutexing code omitted due to
** the SQLITE_THREADSAFE compile-time option being set to 0.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; }
+SQLITE_API int sqlite3_threadsafe(void){ return SQLITE_THREADSAFE; }
/*
** When compiling the test fixture or with debugging enabled (on Win32),
@@ -136608,7 +137962,7 @@ SQLITE_API char *sqlite3_data_directory = 0;
** * Recursive calls to this routine from thread X return immediately
** without blocking.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void){
+SQLITE_API int sqlite3_initialize(void){
MUTEX_LOGIC( sqlite3_mutex *pMaster; ) /* The main static mutex */
int rc; /* Result code */
#ifdef SQLITE_EXTRA_INIT
@@ -136774,7 +138128,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void){
** on when SQLite is already shut down. If SQLite is already shut down
** when this routine is invoked, then this routine is a harmless no-op.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void){
+SQLITE_API int sqlite3_shutdown(void){
#ifdef SQLITE_OMIT_WSD
int rc = sqlite3_wsd_init(4096, 24);
if( rc!=SQLITE_OK ){
@@ -136828,7 +138182,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void){
** threadsafe. Failure to heed these warnings can lead to unpredictable
** behavior.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_config(int op, ...){
+SQLITE_API int sqlite3_config(int op, ...){
va_list ap;
int rc = SQLITE_OK;
@@ -137193,7 +138547,7 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){
/*
** Return the mutex associated with a database connection.
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3 *db){
+SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3 *db){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
(void)SQLITE_MISUSE_BKPT;
@@ -137207,7 +138561,7 @@ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3 *db){
** Free up as much memory as we can from the given database
** connection.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3 *db){
+SQLITE_API int sqlite3_db_release_memory(sqlite3 *db){
int i;
#ifdef SQLITE_ENABLE_API_ARMOR
@@ -137231,7 +138585,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3 *db){
** Flush any dirty pages in the pager-cache for any attached database
** to disk.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3 *db){
+SQLITE_API int sqlite3_db_cacheflush(sqlite3 *db){
int i;
int rc = SQLITE_OK;
int bSeenBusy = 0;
@@ -137260,11 +138614,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3 *db){
/*
** Configuration settings for an individual database connection
*/
-SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3 *db, int op, ...){
+SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){
va_list ap;
int rc;
va_start(ap, op);
switch( op ){
+ case SQLITE_DBCONFIG_MAINDBNAME: {
+ db->aDb[0].zDbSName = va_arg(ap,char*);
+ rc = SQLITE_OK;
+ break;
+ }
case SQLITE_DBCONFIG_LOOKASIDE: {
void *pBuf = va_arg(ap, void*); /* IMP: R-26835-10964 */
int sz = va_arg(ap, int); /* IMP: R-47871-25994 */
@@ -137381,7 +138740,7 @@ static int nocaseCollatingFunc(
/*
** Return the ROWID of the most recent insert
*/
-SQLITE_API sqlite_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3 *db){
+SQLITE_API sqlite_int64 sqlite3_last_insert_rowid(sqlite3 *db){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
(void)SQLITE_MISUSE_BKPT;
@@ -137394,7 +138753,7 @@ SQLITE_API sqlite_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3 *db){
/*
** Return the number of changes in the most recent call to sqlite3_exec().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3 *db){
+SQLITE_API int sqlite3_changes(sqlite3 *db){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
(void)SQLITE_MISUSE_BKPT;
@@ -137407,7 +138766,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3 *db){
/*
** Return the number of changes since the database handle was opened.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3 *db){
+SQLITE_API int sqlite3_total_changes(sqlite3 *db){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
(void)SQLITE_MISUSE_BKPT;
@@ -137558,8 +138917,8 @@ static int sqlite3Close(sqlite3 *db, int forceZombie){
** unclosed resources, and arranges for deallocation when the last
** prepare statement or sqlite3_backup closes.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_close(sqlite3 *db){ return sqlite3Close(db,0); }
-SQLITE_API int SQLITE_STDCALL sqlite3_close_v2(sqlite3 *db){ return sqlite3Close(db,1); }
+SQLITE_API int sqlite3_close(sqlite3 *db){ return sqlite3Close(db,0); }
+SQLITE_API int sqlite3_close_v2(sqlite3 *db){ return sqlite3Close(db,1); }
/*
@@ -137966,7 +139325,7 @@ SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler *p){
** This routine sets the busy callback for an Sqlite database to the
** given callback function with the given argument.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(
+SQLITE_API int sqlite3_busy_handler(
sqlite3 *db,
int (*xBusy)(void*,int),
void *pArg
@@ -137989,7 +139348,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(
** given callback function with the given argument. The progress callback will
** be invoked every nOps opcodes.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(
+SQLITE_API void sqlite3_progress_handler(
sqlite3 *db,
int nOps,
int (*xProgress)(void*),
@@ -138020,7 +139379,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(
** This routine installs a default busy handler that waits for the
** specified number of milliseconds before returning 0.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3 *db, int ms){
+SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
#endif
@@ -138036,7 +139395,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3 *db, int ms){
/*
** Cause any pending operation to stop at its earliest opportunity.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3 *db){
+SQLITE_API void sqlite3_interrupt(sqlite3 *db){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
(void)SQLITE_MISUSE_BKPT;
@@ -138152,7 +139511,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
/*
** Create new user functions.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
+SQLITE_API int sqlite3_create_function(
sqlite3 *db,
const char *zFunc,
int nArg,
@@ -138166,7 +139525,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
xFinal, 0);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
+SQLITE_API int sqlite3_create_function_v2(
sqlite3 *db,
const char *zFunc,
int nArg,
@@ -138209,7 +139568,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
}
#ifndef SQLITE_OMIT_UTF16
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
+SQLITE_API int sqlite3_create_function16(
sqlite3 *db,
const void *zFunctionName,
int nArg,
@@ -138249,7 +139608,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
** A global function must exist in order for name resolution to work
** properly.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(
+SQLITE_API int sqlite3_overload_function(
sqlite3 *db,
const char *zName,
int nArg
@@ -138281,7 +139640,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(
** SQL statement.
*/
#ifndef SQLITE_OMIT_DEPRECATED
-SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3 *db, void(*xTrace)(void*,const char*), void *pArg){
+SQLITE_API void *sqlite3_trace(sqlite3 *db, void(*xTrace)(void*,const char*), void *pArg){
void *pOld;
#ifdef SQLITE_ENABLE_API_ARMOR
@@ -138302,7 +139661,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3 *db, void(*xTrace)(void*,c
/* Register a trace callback using the version-2 interface.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2(
+SQLITE_API int sqlite3_trace_v2(
sqlite3 *db, /* Trace this connection */
unsigned mTrace, /* Mask of events to be traced */
int(*xTrace)(unsigned,void*,void*,void*), /* Callback to invoke */
@@ -138314,6 +139673,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2(
}
#endif
sqlite3_mutex_enter(db->mutex);
+ if( mTrace==0 ) xTrace = 0;
+ if( xTrace==0 ) mTrace = 0;
db->mTrace = mTrace;
db->xTrace = xTrace;
db->pTraceArg = pArg;
@@ -138330,7 +139691,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2(
** profile is a pointer to a function that is invoked at the conclusion of
** each SQL statement that is run.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_profile(
+SQLITE_API void *sqlite3_profile(
sqlite3 *db,
void (*xProfile)(void*,const char*,sqlite_uint64),
void *pArg
@@ -138358,7 +139719,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_profile(
** If the invoked function returns non-zero, then the commit becomes a
** rollback.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(
+SQLITE_API void *sqlite3_commit_hook(
sqlite3 *db, /* Attach the hook to this database */
int (*xCallback)(void*), /* Function to invoke on each commit */
void *pArg /* Argument to the function */
@@ -138383,7 +139744,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(
** Register a callback to be invoked each time a row is updated,
** inserted or deleted using this database connection.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
+SQLITE_API void *sqlite3_update_hook(
sqlite3 *db, /* Attach the hook to this database */
void (*xCallback)(void*,int,char const *,char const *,sqlite_int64),
void *pArg /* Argument to the function */
@@ -138408,7 +139769,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
** Register a callback to be invoked each time a transaction is rolled
** back by this database connection.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(
+SQLITE_API void *sqlite3_rollback_hook(
sqlite3 *db, /* Attach the hook to this database */
void (*xCallback)(void*), /* Callback function */
void *pArg /* Argument to the function */
@@ -138434,7 +139795,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(
** Register a callback to be invoked each time a row is updated,
** inserted or deleted using this database connection.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_preupdate_hook(
+SQLITE_API void *sqlite3_preupdate_hook(
sqlite3 *db, /* Attach the hook to this database */
void(*xCallback)( /* Callback function */
void*,sqlite3*,int,char const*,char const*,sqlite3_int64,sqlite3_int64),
@@ -138483,7 +139844,7 @@ SQLITE_PRIVATE int sqlite3WalDefaultHook(
** using sqlite3_wal_hook() disables the automatic checkpoint mechanism
** configured by this function.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame){
+SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame){
#ifdef SQLITE_OMIT_WAL
UNUSED_PARAMETER(db);
UNUSED_PARAMETER(nFrame);
@@ -138504,7 +139865,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int nFrame
** Register a callback to be invoked each time a transaction is written
** into the write-ahead-log by this database connection.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
+SQLITE_API void *sqlite3_wal_hook(
sqlite3 *db, /* Attach the hook to this db handle */
int(*xCallback)(void *, sqlite3*, const char*, int),
void *pArg /* First argument passed to xCallback() */
@@ -138531,7 +139892,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
/*
** Checkpoint database zDb.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
+SQLITE_API int sqlite3_wal_checkpoint_v2(
sqlite3 *db, /* Database handle */
const char *zDb, /* Name of attached database (or NULL) */
int eMode, /* SQLITE_CHECKPOINT_* value */
@@ -138586,7 +139947,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
** to contains a zero-length string, all attached databases are
** checkpointed.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){
+SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb){
/* EVIDENCE-OF: R-41613-20553 The sqlite3_wal_checkpoint(D,X) is equivalent to
** sqlite3_wal_checkpoint_v2(D,X,SQLITE_CHECKPOINT_PASSIVE,0,0). */
return sqlite3_wal_checkpoint_v2(db,zDb,SQLITE_CHECKPOINT_PASSIVE,0,0);
@@ -138677,7 +140038,7 @@ SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3 *db){
** Return UTF-8 encoded English language explanation of the most recent
** error.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3 *db){
+SQLITE_API const char *sqlite3_errmsg(sqlite3 *db){
const char *z;
if( !db ){
return sqlite3ErrStr(SQLITE_NOMEM_BKPT);
@@ -138705,7 +140066,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3 *db){
** Return UTF-16 encoded English language explanation of the most recent
** error.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3 *db){
+SQLITE_API const void *sqlite3_errmsg16(sqlite3 *db){
static const u16 outOfMem[] = {
'o', 'u', 't', ' ', 'o', 'f', ' ', 'm', 'e', 'm', 'o', 'r', 'y', 0
};
@@ -138750,7 +140111,7 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3 *db){
** Return the most recent error code generated by an SQLite routine. If NULL is
** passed to this function, we assume a malloc() failed during sqlite3_open().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db){
+SQLITE_API int sqlite3_errcode(sqlite3 *db){
if( db && !sqlite3SafetyCheckSickOrOk(db) ){
return SQLITE_MISUSE_BKPT;
}
@@ -138759,7 +140120,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db){
}
return db->errCode & db->errMask;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db){
+SQLITE_API int sqlite3_extended_errcode(sqlite3 *db){
if( db && !sqlite3SafetyCheckSickOrOk(db) ){
return SQLITE_MISUSE_BKPT;
}
@@ -138768,7 +140129,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db){
}
return db->errCode;
}
-SQLITE_API int SQLITE_STDCALL sqlite3_system_errno(sqlite3 *db){
+SQLITE_API int sqlite3_system_errno(sqlite3 *db){
return db ? db->iSysErrno : 0;
}
@@ -138777,7 +140138,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_system_errno(sqlite3 *db){
** argument. For now, this simply calls the internal sqlite3ErrStr()
** function.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errstr(int rc){
+SQLITE_API const char *sqlite3_errstr(int rc){
return sqlite3ErrStr(rc);
}
@@ -138925,7 +140286,7 @@ static const int aHardLimit[] = {
** It merely prevents new constructs that exceed the limit
** from forming.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3 *db, int limitId, int newLimit){
+SQLITE_API int sqlite3_limit(sqlite3 *db, int limitId, int newLimit){
int oldLimit;
#ifdef SQLITE_ENABLE_API_ARMOR
@@ -139408,9 +140769,9 @@ static int openDatabase(
/* The default safety_level for the main database is FULL; for the temp
** database it is OFF. This matches the pager layer defaults.
*/
- db->aDb[0].zName = "main";
+ db->aDb[0].zDbSName = "main";
db->aDb[0].safety_level = SQLITE_DEFAULT_SYNCHRONOUS+1;
- db->aDb[1].zName = "temp";
+ db->aDb[1].zDbSName = "temp";
db->aDb[1].safety_level = PAGER_SYNCHRONOUS_OFF;
db->magic = SQLITE_MAGIC_OPEN;
@@ -139424,11 +140785,20 @@ static int openDatabase(
*/
sqlite3Error(db, SQLITE_OK);
sqlite3RegisterPerConnectionBuiltinFunctions(db);
+ rc = sqlite3_errcode(db);
+
+#ifdef SQLITE_ENABLE_FTS5
+ /* Register any built-in FTS5 module before loading the automatic
+ ** extensions. This allows automatic extensions to register FTS5
+ ** tokenizers and auxiliary functions. */
+ if( !db->mallocFailed && rc==SQLITE_OK ){
+ rc = sqlite3Fts5Init(db);
+ }
+#endif
/* Load automatic extensions - extensions that have been registered
** using the sqlite3_automatic_extension() API.
*/
- rc = sqlite3_errcode(db);
if( rc==SQLITE_OK ){
sqlite3AutoLoadExtensions(db);
rc = sqlite3_errcode(db);
@@ -139457,12 +140827,6 @@ static int openDatabase(
}
#endif
-#ifdef SQLITE_ENABLE_FTS5
- if( !db->mallocFailed && rc==SQLITE_OK ){
- rc = sqlite3Fts5Init(db);
- }
-#endif
-
#ifdef SQLITE_ENABLE_ICU
if( !db->mallocFailed && rc==SQLITE_OK ){
rc = sqlite3IcuInit(db);
@@ -139549,14 +140913,14 @@ opendb_out:
/*
** Open a new database handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_open(
+SQLITE_API int sqlite3_open(
const char *zFilename,
sqlite3 **ppDb
){
return openDatabase(zFilename, ppDb,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0);
}
-SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
+SQLITE_API int sqlite3_open_v2(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb, /* OUT: SQLite db handle */
int flags, /* Flags */
@@ -139569,7 +140933,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
/*
** Open a new database handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_open16(
+SQLITE_API int sqlite3_open16(
const void *zFilename,
sqlite3 **ppDb
){
@@ -139608,7 +140972,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_open16(
/*
** Register a new collation sequence with the database handle db.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation(
+SQLITE_API int sqlite3_create_collation(
sqlite3* db,
const char *zName,
int enc,
@@ -139621,7 +140985,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation(
/*
** Register a new collation sequence with the database handle db.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
+SQLITE_API int sqlite3_create_collation_v2(
sqlite3* db,
const char *zName,
int enc,
@@ -139646,7 +141010,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
/*
** Register a new collation sequence with the database handle db.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
+SQLITE_API int sqlite3_create_collation16(
sqlite3* db,
const void *zName,
int enc,
@@ -139676,7 +141040,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
** Register a collation sequence factory callback with the database handle
** db. Replace any previously installed collation sequence factory.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed(
+SQLITE_API int sqlite3_collation_needed(
sqlite3 *db,
void *pCollNeededArg,
void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*)
@@ -139697,7 +141061,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed(
** Register a collation sequence factory callback with the database handle
** db. Replace any previously installed collation sequence factory.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
+SQLITE_API int sqlite3_collation_needed16(
sqlite3 *db,
void *pCollNeededArg,
void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*)
@@ -139719,7 +141083,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
** This function is now an anachronism. It used to be used to recover from a
** malloc() failure, but SQLite now does this automatically.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_global_recover(void){
+SQLITE_API int sqlite3_global_recover(void){
return SQLITE_OK;
}
#endif
@@ -139730,7 +141094,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_global_recover(void){
** by default. Autocommit is disabled by a BEGIN statement and reenabled
** by the next COMMIT or ROLLBACK.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3 *db){
+SQLITE_API int sqlite3_get_autocommit(sqlite3 *db){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
(void)SQLITE_MISUSE_BKPT;
@@ -139787,7 +141151,7 @@ SQLITE_PRIVATE int sqlite3IoerrnomemError(int lineno){
** SQLite no longer uses thread-specific data so this routine is now a
** no-op. It is retained for historical compatibility.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_thread_cleanup(void){
+SQLITE_API void sqlite3_thread_cleanup(void){
}
#endif
@@ -139795,7 +141159,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_thread_cleanup(void){
** Return meta information about a specific column of a database table.
** See comment in sqlite3.h (sqlite.h.in) for details.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
+SQLITE_API int sqlite3_table_column_metadata(
sqlite3 *db, /* Connection handle */
const char *zDbName, /* Database name or NULL */
const char *zTableName, /* Table name */
@@ -139913,7 +141277,7 @@ error_out:
/*
** Sleep for a little while. Return the amount of time slept.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int ms){
+SQLITE_API int sqlite3_sleep(int ms){
sqlite3_vfs *pVfs;
int rc;
pVfs = sqlite3_vfs_find(0);
@@ -139929,7 +141293,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int ms){
/*
** Enable or disable the extended result codes.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3 *db, int onoff){
+SQLITE_API int sqlite3_extended_result_codes(sqlite3 *db, int onoff){
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT;
#endif
@@ -139942,7 +141306,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3 *db, int ono
/*
** Invoke the xFileControl method on a particular database.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){
+SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, void *pArg){
int rc = SQLITE_ERROR;
Btree *pBtree;
@@ -139982,7 +141346,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3 *db, const char *zDbN
/*
** Interface to the testing logic.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
+SQLITE_API int sqlite3_test_control(int op, ...){
int rc = 0;
#ifdef SQLITE_OMIT_BUILTIN_TEST
UNUSED_PARAMETER(op);
@@ -140250,6 +141614,15 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
break;
}
+ /* Set the threshold at which OP_Once counters reset back to zero.
+ ** By default this is 0x7ffffffe (over 2 billion), but that value is
+ ** too big to test in a reasonable amount of time, so this control is
+ ** provided to set a small and easily reachable reset value.
+ */
+ case SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD: {
+ sqlite3GlobalConfig.iOnceResetThreshold = va_arg(ap, int);
+ break;
+ }
/* sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE, xCallback, ptr);
**
@@ -140327,7 +141700,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...){
** parameter if it exists. If the parameter does not exist, this routine
** returns a NULL pointer.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilename, const char *zParam){
+SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam){
if( zFilename==0 || zParam==0 ) return 0;
zFilename += sqlite3Strlen30(zFilename) + 1;
while( zFilename[0] ){
@@ -140342,7 +141715,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilenam
/*
** Return a boolean value for a query parameter.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFilename, const char *zParam, int bDflt){
+SQLITE_API int sqlite3_uri_boolean(const char *zFilename, const char *zParam, int bDflt){
const char *z = sqlite3_uri_parameter(zFilename, zParam);
bDflt = bDflt!=0;
return z ? sqlite3GetBoolean(z, bDflt) : bDflt;
@@ -140351,7 +141724,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFilename, const c
/*
** Return a 64-bit integer value for a query parameter.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(
const char *zFilename, /* Filename as passed to xOpen */
const char *zParam, /* URI parameter sought */
sqlite3_int64 bDflt /* return if parameter is missing */
@@ -140371,7 +141744,7 @@ SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){
int i;
for(i=0; i<db->nDb; i++){
if( db->aDb[i].pBt
- && (zDbName==0 || sqlite3StrICmp(zDbName, db->aDb[i].zName)==0)
+ && (zDbName==0 || sqlite3StrICmp(zDbName, db->aDb[i].zDbSName)==0)
){
return db->aDb[i].pBt;
}
@@ -140383,7 +141756,7 @@ SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){
** Return the filename of the database associated with a database
** connection.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const char *zDbName){
+SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName){
Btree *pBt;
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
@@ -140399,7 +141772,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const cha
** Return 1 if database is read-only or 0 if read/write. Return -1 if
** no such database exists.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbName){
+SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName){
Btree *pBt;
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
@@ -140416,7 +141789,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbNa
** Obtain a snapshot handle for the snapshot of database zDb currently
** being read by handle db.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_snapshot_get(
+SQLITE_API int sqlite3_snapshot_get(
sqlite3 *db,
const char *zDb,
sqlite3_snapshot **ppSnapshot
@@ -140451,7 +141824,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_snapshot_get(
/*
** Open a read-transaction on the snapshot idendified by pSnapshot.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_snapshot_open(
+SQLITE_API int sqlite3_snapshot_open(
sqlite3 *db,
const char *zDb,
sqlite3_snapshot *pSnapshot
@@ -140488,7 +141861,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_snapshot_open(
/*
** Free a snapshot handle obtained from sqlite3_snapshot_get().
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3_snapshot *pSnapshot){
+SQLITE_API void sqlite3_snapshot_free(sqlite3_snapshot *pSnapshot){
sqlite3_free(pSnapshot);
}
#endif /* SQLITE_ENABLE_SNAPSHOT */
@@ -140642,7 +142015,7 @@ static void leaveMutex(void){
** on the same "db". If xNotify==0 then any prior callbacks are immediately
** cancelled.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
+SQLITE_API int sqlite3_unlock_notify(
sqlite3 *db,
void (*xNotify)(void **, int),
void *pArg
@@ -147645,7 +149018,7 @@ SQLITE_PRIVATE int sqlite3Fts3Corrupt(){
#ifdef _WIN32
__declspec(dllexport)
#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_fts3_init(
+SQLITE_API int sqlite3_fts3_init(
sqlite3 *db,
char **pzErrMsg,
const sqlite3_api_routines *pApi
@@ -161486,7 +162859,7 @@ static int rtreeFilter(
if( idxNum==1 ){
/* Special case - lookup by rowid. */
RtreeNode *pLeaf; /* Leaf on which the required cell resides */
- RtreeSearchPoint *p; /* Search point for the the leaf */
+ RtreeSearchPoint *p; /* Search point for the leaf */
i64 iRowid = sqlite3_value_int64(argv[0]);
i64 iNode = 0;
rc = findLeafNode(pRtree, iRowid, &pLeaf, &iNode);
@@ -162956,10 +164329,12 @@ static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){
int rc;
i64 nRow = 0;
- if( sqlite3_table_column_metadata(db,pRtree->zDb,"sqlite_stat1",
- 0,0,0,0,0,0)==SQLITE_ERROR ){
+ rc = sqlite3_table_column_metadata(
+ db, pRtree->zDb, "sqlite_stat1",0,0,0,0,0,0
+ );
+ if( rc!=SQLITE_OK ){
pRtree->nRowEst = RTREE_DEFAULT_ROWEST;
- return SQLITE_OK;
+ return rc==SQLITE_ERROR ? SQLITE_OK : rc;
}
zSql = sqlite3_mprintf(zFmt, pRtree->zDb, pRtree->zName);
if( zSql==0 ){
@@ -163446,7 +164821,7 @@ static void geomCallback(sqlite3_context *ctx, int nArg, sqlite3_value **aArg){
/*
** Register a new geometry function for use with the r-tree MATCH operator.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
+SQLITE_API int sqlite3_rtree_geometry_callback(
sqlite3 *db, /* Register SQL function on this connection */
const char *zGeom, /* Name of the new SQL function */
int (*xGeom)(sqlite3_rtree_geometry*,int,RtreeDValue*,int*), /* Callback */
@@ -163470,7 +164845,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
** Register a new 2nd-generation geometry function for use with the
** r-tree MATCH operator.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback(
+SQLITE_API int sqlite3_rtree_query_callback(
sqlite3 *db, /* Register SQL function on this connection */
const char *zQueryFunc, /* Name of new SQL function */
int (*xQueryFunc)(sqlite3_rtree_query_info*), /* Callback */
@@ -163495,7 +164870,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback(
#ifdef _WIN32
__declspec(dllexport)
#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_init(
+SQLITE_API int sqlite3_rtree_init(
sqlite3 *db,
char **pzErrMsg,
const sqlite3_api_routines *pApi
@@ -163860,7 +165235,7 @@ static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){
** of upper() or lower().
**
** lower('I', 'en_us') -> 'i'
-** lower('I', 'tr_tr') -> 'ı' (small dotless i)
+** lower('I', 'tr_tr') -> '\u131' (small dotless i)
**
** http://www.icu-project.org/userguide/posix.html#case_mappings
*/
@@ -164046,7 +165421,7 @@ SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){
#ifdef _WIN32
__declspec(dllexport)
#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_icu_init(
+SQLITE_API int sqlite3_icu_init(
sqlite3 *db,
char **pzErrMsg,
const sqlite3_api_routines *pApi
@@ -164522,7 +165897,7 @@ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(
** may also be named data<integer>_<target>, where <integer> is any sequence
** of zero or more numeric characters (0-9). This can be significant because
** tables within the RBU database are always processed in order sorted by
-** name. By judicious selection of the the <integer> portion of the names
+** name. By judicious selection of the <integer> portion of the names
** of the RBU tables the user can therefore control the order in which they
** are processed. This can be useful, for example, to ensure that "external
** content" FTS4 tables are updated before their underlying content tables.
@@ -164726,7 +166101,7 @@ typedef struct sqlite3rbu sqlite3rbu;
** not work out of the box with zipvfs. Refer to the comment describing
** the zipvfs_create_vfs() API below for details on using RBU with zipvfs.
*/
-SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open(
+SQLITE_API sqlite3rbu *sqlite3rbu_open(
const char *zTarget,
const char *zRbu,
const char *zState
@@ -164737,16 +166112,22 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open(
** An RBU vacuum is similar to SQLite's built-in VACUUM command, except
** that it can be suspended and resumed like an RBU update.
**
-** The second argument to this function, which may not be NULL, identifies
-** a database in which to store the state of the RBU vacuum operation if
-** it is suspended. The first time sqlite3rbu_vacuum() is called, to start
-** an RBU vacuum operation, the state database should either not exist or
-** be empty (contain no tables). If an RBU vacuum is suspended by calling
+** The second argument to this function identifies a database in which
+** to store the state of the RBU vacuum operation if it is suspended. The
+** first time sqlite3rbu_vacuum() is called, to start an RBU vacuum
+** operation, the state database should either not exist or be empty
+** (contain no tables). If an RBU vacuum is suspended by calling
** sqlite3rbu_close() on the RBU handle before sqlite3rbu_step() has
** returned SQLITE_DONE, the vacuum state is stored in the state database.
** The vacuum can be resumed by calling this function to open a new RBU
** handle specifying the same target and state databases.
**
+** If the second argument passed to this function is NULL, then the
+** name of the state database is "<database>-vacuum", where <database>
+** is the name of the target database file. In this case, on UNIX, if the
+** state database is not already present in the file-system, it is created
+** with the same permissions as the target db is made.
+**
** This function does not delete the state database after an RBU vacuum
** is completed, even if it created it. However, if the call to
** sqlite3rbu_close() returns any value other than SQLITE_OK, the contents
@@ -164759,7 +166140,7 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open(
** a description of the complications associated with using RBU with
** zipvfs databases.
*/
-SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_vacuum(
+SQLITE_API sqlite3rbu *sqlite3rbu_vacuum(
const char *zTarget,
const char *zState
);
@@ -164795,7 +166176,7 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_vacuum(
** Database handles returned by this function remain valid until the next
** call to any sqlite3rbu_xxx() function other than sqlite3rbu_db().
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3rbu_db(sqlite3rbu*, int bRbu);
+SQLITE_API sqlite3 *sqlite3rbu_db(sqlite3rbu*, int bRbu);
/*
** Do some work towards applying the RBU update to the target db.
@@ -164809,7 +166190,7 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3rbu_db(sqlite3rbu*, int bRbu);
** SQLITE_OK, all subsequent calls on the same RBU handle are no-ops
** that immediately return the same value.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_step(sqlite3rbu *pRbu);
+SQLITE_API int sqlite3rbu_step(sqlite3rbu *pRbu);
/*
** Force RBU to save its state to disk.
@@ -164821,7 +166202,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_step(sqlite3rbu *pRbu);
**
** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_savestate(sqlite3rbu *pRbu);
+SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *pRbu);
/*
** Close an RBU handle.
@@ -164841,14 +166222,14 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_savestate(sqlite3rbu *pRbu);
** update has been partially applied, or SQLITE_DONE if it has been
** completely applied.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_close(sqlite3rbu *pRbu, char **pzErrmsg);
+SQLITE_API int sqlite3rbu_close(sqlite3rbu *pRbu, char **pzErrmsg);
/*
** Return the total number of key-value operations (inserts, deletes or
** updates) that have been performed on the target database since the
** current RBU update was started.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3rbu_progress(sqlite3rbu *pRbu);
+SQLITE_API sqlite3_int64 sqlite3rbu_progress(sqlite3rbu *pRbu);
/*
** Obtain permyriadage (permyriadage is to 10000 as percentage is to 100)
@@ -164890,7 +166271,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3rbu_progress(sqlite3rbu *pRbu);
** table exists but is not correctly populated, the value of the *pnOne
** output variable during stage 1 is undefined.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3rbu_bp_progress(sqlite3rbu *pRbu, int *pnOne, int *pnTwo);
+SQLITE_API void sqlite3rbu_bp_progress(sqlite3rbu *pRbu, int *pnOne, int *pnTwo);
/*
** Obtain an indication as to the current stage of an RBU update or vacuum.
@@ -164928,7 +166309,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3rbu_bp_progress(sqlite3rbu *pRbu, int *pnO
#define SQLITE_RBU_STATE_DONE 4
#define SQLITE_RBU_STATE_ERROR 5
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_state(sqlite3rbu *pRbu);
+SQLITE_API int sqlite3rbu_state(sqlite3rbu *pRbu);
/*
** Create an RBU VFS named zName that accesses the underlying file-system
@@ -164972,7 +166353,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_state(sqlite3rbu *pRbu);
** file-system via "rbu" all the time, even if it only uses RBU functionality
** occasionally.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_create_vfs(const char *zName, const char *zParent);
+SQLITE_API int sqlite3rbu_create_vfs(const char *zName, const char *zParent);
/*
** Deregister and destroy an RBU vfs created by an earlier call to
@@ -164982,7 +166363,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_create_vfs(const char *zName, const cha
** before all database handles that use it have been closed, the results
** are undefined.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3rbu_destroy_vfs(const char *zName);
+SQLITE_API void sqlite3rbu_destroy_vfs(const char *zName);
#if 0
} /* end of the 'extern "C"' block */
@@ -167238,15 +168619,18 @@ static RbuState *rbuLoadState(sqlite3rbu *p){
** error occurs, leave an error code and message in the RBU handle.
*/
static void rbuOpenDatabase(sqlite3rbu *p){
- assert( p->rc==SQLITE_OK );
- assert( p->dbMain==0 && p->dbRbu==0 );
- assert( rbuIsVacuum(p) || p->zTarget!=0 );
+ assert( p->rc || (p->dbMain==0 && p->dbRbu==0) );
+ assert( p->rc || rbuIsVacuum(p) || p->zTarget!=0 );
/* Open the RBU database */
p->dbRbu = rbuOpenDbhandle(p, p->zRbu, 1);
if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){
sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p);
+ if( p->zState==0 ){
+ const char *zFile = sqlite3_db_filename(p->dbRbu, "main");
+ p->zState = rbuMPrintf(p, "file://%s-vacuum?modeof=%s", zFile, zFile);
+ }
}
/* If using separate RBU and state databases, attach the state database to
@@ -168076,7 +169460,7 @@ static void rbuCreateTargetSchema(sqlite3rbu *p){
/*
** Step the RBU object.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_step(sqlite3rbu *p){
+SQLITE_API int sqlite3rbu_step(sqlite3rbu *p){
if( p ){
switch( p->eStage ){
case RBU_STAGE_OAL: {
@@ -168381,8 +169765,7 @@ static sqlite3rbu *openRbuHandle(
sqlite3rbu *p;
size_t nTarget = zTarget ? strlen(zTarget) : 0;
size_t nRbu = strlen(zRbu);
- size_t nState = zState ? strlen(zState) : 0;
- size_t nByte = sizeof(sqlite3rbu) + nTarget+1 + nRbu+1+ nState+1;
+ size_t nByte = sizeof(sqlite3rbu) + nTarget+1 + nRbu+1;
p = (sqlite3rbu*)sqlite3_malloc64(nByte);
if( p ){
@@ -168404,8 +169787,7 @@ static sqlite3rbu *openRbuHandle(
memcpy(p->zRbu, zRbu, nRbu+1);
pCsr += nRbu+1;
if( zState ){
- p->zState = pCsr;
- memcpy(p->zState, zState, nState+1);
+ p->zState = rbuMPrintf(p, "%s", zState);
}
rbuOpenDatabase(p);
}
@@ -168516,13 +169898,28 @@ static sqlite3rbu *openRbuHandle(
}
/*
+** Allocate and return an RBU handle with all fields zeroed except for the
+** error code, which is set to SQLITE_MISUSE.
+*/
+static sqlite3rbu *rbuMisuseError(void){
+ sqlite3rbu *pRet;
+ pRet = sqlite3_malloc64(sizeof(sqlite3rbu));
+ if( pRet ){
+ memset(pRet, 0, sizeof(sqlite3rbu));
+ pRet->rc = SQLITE_MISUSE;
+ }
+ return pRet;
+}
+
+/*
** Open and return a new RBU handle.
*/
-SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open(
+SQLITE_API sqlite3rbu *sqlite3rbu_open(
const char *zTarget,
const char *zRbu,
const char *zState
){
+ if( zTarget==0 || zRbu==0 ){ return rbuMisuseError(); }
/* TODO: Check that zTarget and zRbu are non-NULL */
return openRbuHandle(zTarget, zRbu, zState);
}
@@ -168530,10 +169927,11 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open(
/*
** Open a handle to begin or resume an RBU VACUUM operation.
*/
-SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_vacuum(
+SQLITE_API sqlite3rbu *sqlite3rbu_vacuum(
const char *zTarget,
const char *zState
){
+ if( zTarget==0 ){ return rbuMisuseError(); }
/* TODO: Check that both arguments are non-NULL */
return openRbuHandle(0, zTarget, zState);
}
@@ -168541,7 +169939,7 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_vacuum(
/*
** Return the database handle used by pRbu.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3rbu_db(sqlite3rbu *pRbu, int bRbu){
+SQLITE_API sqlite3 *sqlite3rbu_db(sqlite3rbu *pRbu, int bRbu){
sqlite3 *db = 0;
if( pRbu ){
db = (bRbu ? pRbu->dbRbu : pRbu->dbMain);
@@ -168573,7 +169971,7 @@ static void rbuEditErrmsg(sqlite3rbu *p){
/*
** Close the RBU handle.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_close(sqlite3rbu *p, char **pzErrmsg){
+SQLITE_API int sqlite3rbu_close(sqlite3rbu *p, char **pzErrmsg){
int rc;
if( p ){
@@ -168611,6 +170009,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_close(sqlite3rbu *p, char **pzErrmsg){
rbuEditErrmsg(p);
rc = p->rc;
*pzErrmsg = p->zErrmsg;
+ sqlite3_free(p->zState);
sqlite3_free(p);
}else{
rc = SQLITE_NOMEM;
@@ -168624,7 +170023,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_close(sqlite3rbu *p, char **pzErrmsg){
** updates) that have been performed on the target database since the
** current RBU update was started.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3rbu_progress(sqlite3rbu *pRbu){
+SQLITE_API sqlite3_int64 sqlite3rbu_progress(sqlite3rbu *pRbu){
return pRbu->nProgress;
}
@@ -168632,7 +170031,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3rbu_progress(sqlite3rbu *pRbu){
** Return permyriadage progress indications for the two main stages of
** an RBU update.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3rbu_bp_progress(sqlite3rbu *p, int *pnOne, int *pnTwo){
+SQLITE_API void sqlite3rbu_bp_progress(sqlite3rbu *p, int *pnOne, int *pnTwo){
const int MAX_PROGRESS = 10000;
switch( p->eStage ){
case RBU_STAGE_OAL:
@@ -168667,7 +170066,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3rbu_bp_progress(sqlite3rbu *p, int *pnOne,
/*
** Return the current state of the RBU vacuum or update operation.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_state(sqlite3rbu *p){
+SQLITE_API int sqlite3rbu_state(sqlite3rbu *p){
int aRes[] = {
0, SQLITE_RBU_STATE_OAL, SQLITE_RBU_STATE_MOVE,
0, SQLITE_RBU_STATE_CHECKPOINT, SQLITE_RBU_STATE_DONE
@@ -168695,7 +170094,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_state(sqlite3rbu *p){
}
}
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_savestate(sqlite3rbu *p){
+SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *p){
int rc = p->rc;
if( rc==SQLITE_DONE ) return SQLITE_OK;
@@ -169522,7 +170921,7 @@ static int rbuVfsGetLastError(sqlite3_vfs *pVfs, int a, char *b){
** Deregister and destroy an RBU vfs created by an earlier call to
** sqlite3rbu_create_vfs().
*/
-SQLITE_API void SQLITE_STDCALL sqlite3rbu_destroy_vfs(const char *zName){
+SQLITE_API void sqlite3rbu_destroy_vfs(const char *zName){
sqlite3_vfs *pVfs = sqlite3_vfs_find(zName);
if( pVfs && pVfs->xOpen==rbuVfsOpen ){
sqlite3_mutex_free(((rbu_vfs*)pVfs)->mutex);
@@ -169536,7 +170935,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3rbu_destroy_vfs(const char *zName){
** via existing VFS zParent. The new object is registered as a non-default
** VFS with SQLite before returning.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3rbu_create_vfs(const char *zName, const char *zParent){
+SQLITE_API int sqlite3rbu_create_vfs(const char *zName, const char *zParent){
/* Template for VFS */
static sqlite3_vfs vfs_template = {
@@ -170222,7 +171621,7 @@ static int statFilter(
" UNION ALL "
"SELECT name, rootpage, type"
" FROM \"%w\".%s WHERE rootpage!=0"
- " ORDER BY name", pTab->db->aDb[pCsr->iDb].zName, zMaster);
+ " ORDER BY name", pTab->db->aDb[pCsr->iDb].zDbSName, zMaster);
if( zSql==0 ){
return SQLITE_NOMEM_BKPT;
}else{
@@ -170276,7 +171675,7 @@ static int statColumn(
default: { /* schema */
sqlite3 *db = sqlite3_context_db_handle(ctx);
int iDb = pCsr->iDb;
- sqlite3_result_text(ctx, db->aDb[iDb].zName, -1, SQLITE_STATIC);
+ sqlite3_result_text(ctx, db->aDb[iDb].zDbSName, -1, SQLITE_STATIC);
break;
}
}
@@ -171781,7 +173180,7 @@ static int sessionDiffFindModified(
return rc;
}
-SQLITE_API int SQLITE_STDCALL sqlite3session_diff(
+SQLITE_API int sqlite3session_diff(
sqlite3_session *pSession,
const char *zFrom,
const char *zTbl,
@@ -171875,7 +173274,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3session_diff(
** Create a session object. This session object will record changes to
** database zDb attached to connection db.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3session_create(
+SQLITE_API int sqlite3session_create(
sqlite3 *db, /* Database handle */
const char *zDb, /* Name of db (e.g. "main") */
sqlite3_session **ppSession /* OUT: New session object */
@@ -171937,7 +173336,7 @@ static void sessionDeleteTable(SessionTable *pList){
/*
** Delete a session object previously allocated using sqlite3session_create().
*/
-SQLITE_API void SQLITE_STDCALL sqlite3session_delete(sqlite3_session *pSession){
+SQLITE_API void sqlite3session_delete(sqlite3_session *pSession){
sqlite3 *db = pSession->db;
sqlite3_session *pHead;
sqlite3_session **pp;
@@ -171966,7 +173365,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3session_delete(sqlite3_session *pSession){
/*
** Set a table filter on a Session Object.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3session_table_filter(
+SQLITE_API void sqlite3session_table_filter(
sqlite3_session *pSession,
int(*xFilter)(void*, const char*),
void *pCtx /* First argument passed to xFilter */
@@ -171984,7 +173383,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3session_table_filter(
** not matter if the PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias)
** or not.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3session_attach(
+SQLITE_API int sqlite3session_attach(
sqlite3_session *pSession, /* Session object */
const char *zName /* Table name */
){
@@ -172674,7 +174073,7 @@ static int sessionGenerateChangeset(
** It is the responsibility of the caller to eventually free the buffer
** using sqlite3_free().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3session_changeset(
+SQLITE_API int sqlite3session_changeset(
sqlite3_session *pSession, /* Session object */
int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */
void **ppChangeset /* OUT: Buffer containing changeset */
@@ -172685,7 +174084,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3session_changeset(
/*
** Streaming version of sqlite3session_changeset().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3session_changeset_strm(
+SQLITE_API int sqlite3session_changeset_strm(
sqlite3_session *pSession,
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
@@ -172696,7 +174095,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3session_changeset_strm(
/*
** Streaming version of sqlite3session_patchset().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3session_patchset_strm(
+SQLITE_API int sqlite3session_patchset_strm(
sqlite3_session *pSession,
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
@@ -172711,7 +174110,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3session_patchset_strm(
** It is the responsibility of the caller to eventually free the buffer
** using sqlite3_free().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3session_patchset(
+SQLITE_API int sqlite3session_patchset(
sqlite3_session *pSession, /* Session object */
int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */
void **ppPatchset /* OUT: Buffer containing changeset */
@@ -172722,7 +174121,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3session_patchset(
/*
** Enable or disable the session object passed as the first argument.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3session_enable(sqlite3_session *pSession, int bEnable){
+SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable){
int ret;
sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db));
if( bEnable>=0 ){
@@ -172736,7 +174135,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3session_enable(sqlite3_session *pSession, i
/*
** Enable or disable the session object passed as the first argument.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3session_indirect(sqlite3_session *pSession, int bIndirect){
+SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect){
int ret;
sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db));
if( bIndirect>=0 ){
@@ -172751,7 +174150,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3session_indirect(sqlite3_session *pSession,
** Return true if there have been no changes to monitored tables recorded
** by the session object passed as the only argument.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3session_isempty(sqlite3_session *pSession){
+SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession){
int ret = 0;
SessionTable *pTab;
@@ -172801,7 +174200,7 @@ static int sessionChangesetStart(
/*
** Create an iterator used to iterate through the contents of a changeset.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_start(
+SQLITE_API int sqlite3changeset_start(
sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */
int nChangeset, /* Size of buffer pChangeset in bytes */
void *pChangeset /* Pointer to buffer containing changeset */
@@ -172812,7 +174211,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_start(
/*
** Streaming version of sqlite3changeset_start().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_start_strm(
+SQLITE_API int sqlite3changeset_start_strm(
sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */
int (*xInput)(void *pIn, void *pData, int *pnData),
void *pIn
@@ -173233,7 +174632,7 @@ static int sessionChangesetNext(
** This function may not be called on iterators passed to a conflict handler
** callback by changeset_apply().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_next(sqlite3_changeset_iter *p){
+SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *p){
return sessionChangesetNext(p, 0, 0);
}
@@ -173242,7 +174641,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_next(sqlite3_changeset_iter *p){
** from a changeset iterator. It may only be called after changeset_next()
** has returned SQLITE_ROW.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_op(
+SQLITE_API int sqlite3changeset_op(
sqlite3_changeset_iter *pIter, /* Iterator handle */
const char **pzTab, /* OUT: Pointer to table name */
int *pnCol, /* OUT: Number of columns in table */
@@ -173262,7 +174661,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_op(
** to. This function may only be called after changeset_next() returns
** SQLITE_ROW.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_pk(
+SQLITE_API int sqlite3changeset_pk(
sqlite3_changeset_iter *pIter, /* Iterator object */
unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */
int *pnCol /* OUT: Number of entries in output array */
@@ -173285,7 +174684,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_pk(
** If value iVal is out-of-range, SQLITE_RANGE is returned and *ppValue is
** not modified. Otherwise, SQLITE_OK.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_old(
+SQLITE_API int sqlite3changeset_old(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int iVal, /* Index of old.* value to retrieve */
sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */
@@ -173313,7 +174712,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_old(
** If value iVal is out-of-range, SQLITE_RANGE is returned and *ppValue is
** not modified. Otherwise, SQLITE_OK.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_new(
+SQLITE_API int sqlite3changeset_new(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int iVal, /* Index of new.* value to retrieve */
sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */
@@ -173347,7 +174746,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_new(
** If value iVal is out-of-range or some other error occurs, an SQLite error
** code is returned. Otherwise, SQLITE_OK.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_conflict(
+SQLITE_API int sqlite3changeset_conflict(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int iVal, /* Index of conflict record value to fetch */
sqlite3_value **ppValue /* OUT: Value from conflicting row */
@@ -173370,7 +174769,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_conflict(
**
** In all other cases this function returns SQLITE_MISUSE.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_fk_conflicts(
+SQLITE_API int sqlite3changeset_fk_conflicts(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int *pnOut /* OUT: Number of FK violations */
){
@@ -173388,7 +174787,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_fk_conflicts(
** This function may not be called on iterators passed to a conflict handler
** callback by changeset_apply().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_finalize(sqlite3_changeset_iter *p){
+SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *p){
int rc = SQLITE_OK;
if( p ){
int i; /* Used to iterate through p->apValue[] */
@@ -173562,7 +174961,7 @@ static int sessionChangesetInvert(
/*
** Invert a changeset object.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_invert(
+SQLITE_API int sqlite3changeset_invert(
int nChangeset, /* Number of bytes in input */
const void *pChangeset, /* Input changeset */
int *pnInverted, /* OUT: Number of bytes in output changeset */
@@ -173581,7 +174980,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_invert(
/*
** Streaming version of sqlite3changeset_invert().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_invert_strm(
+SQLITE_API int sqlite3changeset_invert_strm(
int (*xInput)(void *pIn, void *pData, int *pnData),
void *pIn,
int (*xOutput)(void *pOut, const void *pData, int nData),
@@ -174461,7 +175860,7 @@ static int sessionChangesetApply(
** attached to handle "db". Invoke the supplied conflict handler callback
** to resolve any conflicts encountered while applying the change.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_apply(
+SQLITE_API int sqlite3changeset_apply(
sqlite3 *db, /* Apply change to "main" db of this handle */
int nChangeset, /* Size of changeset in bytes */
void *pChangeset, /* Changeset blob */
@@ -174489,7 +175888,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_apply(
** attached to handle "db". Invoke the supplied conflict handler callback
** to resolve any conflicts encountered while applying the change.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_apply_strm(
+SQLITE_API int sqlite3changeset_apply_strm(
sqlite3 *db, /* Apply change to "main" db of this handle */
int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
void *pIn, /* First arg for xInput */
@@ -174824,7 +176223,7 @@ static int sessionChangegroupOutput(
/*
** Allocate a new, empty, sqlite3_changegroup.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changegroup_new(sqlite3_changegroup **pp){
+SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp){
int rc = SQLITE_OK; /* Return code */
sqlite3_changegroup *p; /* New object */
p = (sqlite3_changegroup*)sqlite3_malloc(sizeof(sqlite3_changegroup));
@@ -174841,7 +176240,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changegroup_new(sqlite3_changegroup **pp){
** Add the changeset currently stored in buffer pData, size nData bytes,
** to changeset-group p.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changegroup_add(sqlite3_changegroup *pGrp, int nData, void *pData){
+SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup *pGrp, int nData, void *pData){
sqlite3_changeset_iter *pIter; /* Iterator opened on pData/nData */
int rc; /* Return code */
@@ -174857,7 +176256,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changegroup_add(sqlite3_changegroup *pGrp,
** Obtain a buffer containing a changeset representing the concatenation
** of all changesets added to the group so far.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changegroup_output(
+SQLITE_API int sqlite3changegroup_output(
sqlite3_changegroup *pGrp,
int *pnData,
void **ppData
@@ -174868,7 +176267,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changegroup_output(
/*
** Streaming versions of changegroup_add().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changegroup_add_strm(
+SQLITE_API int sqlite3changegroup_add_strm(
sqlite3_changegroup *pGrp,
int (*xInput)(void *pIn, void *pData, int *pnData),
void *pIn
@@ -174887,7 +176286,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changegroup_add_strm(
/*
** Streaming versions of changegroup_output().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changegroup_output_strm(
+SQLITE_API int sqlite3changegroup_output_strm(
sqlite3_changegroup *pGrp,
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
@@ -174898,7 +176297,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changegroup_output_strm(
/*
** Delete a changegroup object.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3changegroup_delete(sqlite3_changegroup *pGrp){
+SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){
if( pGrp ){
sessionDeleteTable(pGrp->pList);
sqlite3_free(pGrp);
@@ -174908,7 +176307,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3changegroup_delete(sqlite3_changegroup *pG
/*
** Combine two changesets together.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_concat(
+SQLITE_API int sqlite3changeset_concat(
int nLeft, /* Number of bytes in lhs input */
void *pLeft, /* Lhs input changeset */
int nRight /* Number of bytes in rhs input */,
@@ -174937,7 +176336,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3changeset_concat(
/*
** Streaming version of sqlite3changeset_concat().
*/
-SQLITE_API int SQLITE_STDCALL sqlite3changeset_concat_strm(
+SQLITE_API int sqlite3changeset_concat_strm(
int (*xInputA)(void *pIn, void *pData, int *pnData),
void *pInA,
int (*xInputB)(void *pIn, void *pData, int *pnData),
@@ -177169,7 +178568,7 @@ SQLITE_PRIVATE int sqlite3Json1Init(sqlite3 *db){
#ifdef _WIN32
__declspec(dllexport)
#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_json_init(
+SQLITE_API int sqlite3_json_init(
sqlite3 *db,
char **pzErrMsg,
const sqlite3_api_routines *pApi
@@ -178513,6 +179912,7 @@ static void sqlite3Fts5ParseNodeFree(Fts5ExprNode*);
static void sqlite3Fts5ParseSetDistance(Fts5Parse*, Fts5ExprNearset*, Fts5Token*);
static void sqlite3Fts5ParseSetColset(Fts5Parse*, Fts5ExprNearset*, Fts5Colset*);
+static Fts5Colset *sqlite3Fts5ParseColsetInvert(Fts5Parse*, Fts5Colset*);
static void sqlite3Fts5ParseFinished(Fts5Parse *pParse, Fts5ExprNode *p);
static void sqlite3Fts5ParseNear(Fts5Parse *pParse, Fts5Token*);
@@ -178570,12 +179970,13 @@ static int sqlite3Fts5UnicodeFold(int c, int bRemoveDiacritic);
#define FTS5_COLON 5
#define FTS5_LP 6
#define FTS5_RP 7
-#define FTS5_LCP 8
-#define FTS5_RCP 9
-#define FTS5_STRING 10
-#define FTS5_COMMA 11
-#define FTS5_PLUS 12
-#define FTS5_STAR 13
+#define FTS5_MINUS 8
+#define FTS5_LCP 9
+#define FTS5_RCP 10
+#define FTS5_STRING 11
+#define FTS5_COMMA 12
+#define FTS5_PLUS 13
+#define FTS5_STAR 14
/*
** 2000-05-29
@@ -178689,17 +180090,17 @@ static int sqlite3Fts5UnicodeFold(int c, int bRemoveDiacritic);
#endif
/************* Begin control #defines *****************************************/
#define fts5YYCODETYPE unsigned char
-#define fts5YYNOCODE 27
+#define fts5YYNOCODE 28
#define fts5YYACTIONTYPE unsigned char
#define sqlite3Fts5ParserFTS5TOKENTYPE Fts5Token
typedef union {
int fts5yyinit;
sqlite3Fts5ParserFTS5TOKENTYPE fts5yy0;
- Fts5Colset* fts5yy3;
- Fts5ExprPhrase* fts5yy11;
- Fts5ExprNode* fts5yy18;
- int fts5yy20;
- Fts5ExprNearset* fts5yy26;
+ int fts5yy4;
+ Fts5Colset* fts5yy11;
+ Fts5ExprNode* fts5yy24;
+ Fts5ExprNearset* fts5yy46;
+ Fts5ExprPhrase* fts5yy53;
} fts5YYMINORTYPE;
#ifndef fts5YYSTACKDEPTH
#define fts5YYSTACKDEPTH 100
@@ -178708,16 +180109,16 @@ typedef union {
#define sqlite3Fts5ParserARG_PDECL ,Fts5Parse *pParse
#define sqlite3Fts5ParserARG_FETCH Fts5Parse *pParse = fts5yypParser->pParse
#define sqlite3Fts5ParserARG_STORE fts5yypParser->pParse = pParse
-#define fts5YYNSTATE 26
-#define fts5YYNRULE 24
-#define fts5YY_MAX_SHIFT 25
-#define fts5YY_MIN_SHIFTREDUCE 40
-#define fts5YY_MAX_SHIFTREDUCE 63
-#define fts5YY_MIN_REDUCE 64
-#define fts5YY_MAX_REDUCE 87
-#define fts5YY_ERROR_ACTION 88
-#define fts5YY_ACCEPT_ACTION 89
-#define fts5YY_NO_ACTION 90
+#define fts5YYNSTATE 29
+#define fts5YYNRULE 26
+#define fts5YY_MAX_SHIFT 28
+#define fts5YY_MIN_SHIFTREDUCE 45
+#define fts5YY_MAX_SHIFTREDUCE 70
+#define fts5YY_MIN_REDUCE 71
+#define fts5YY_MAX_REDUCE 96
+#define fts5YY_ERROR_ACTION 97
+#define fts5YY_ACCEPT_ACTION 98
+#define fts5YY_NO_ACTION 99
/************* End control #defines *******************************************/
/* Define the fts5yytestcase() macro to be a no-op if is not already defined
@@ -178749,7 +180150,7 @@ typedef union {
**
** N between fts5YY_MIN_REDUCE Reduce by rule N-fts5YY_MIN_REDUCE
** and fts5YY_MAX_REDUCE
-
+**
** N == fts5YY_ERROR_ACTION A syntax error has occurred.
**
** N == fts5YY_ACCEPT_ACTION The parser accepts its input.
@@ -178758,16 +180159,20 @@ typedef union {
** slots in the fts5yy_action[] table.
**
** The action table is constructed as a single large table named fts5yy_action[].
-** Given state S and lookahead X, the action is computed as
+** Given state S and lookahead X, the action is computed as either:
**
-** fts5yy_action[ fts5yy_shift_ofst[S] + X ]
+** (A) N = fts5yy_action[ fts5yy_shift_ofst[S] + X ]
+** (B) N = fts5yy_default[S]
**
-** If the index value fts5yy_shift_ofst[S]+X is out of range or if the value
-** fts5yy_lookahead[fts5yy_shift_ofst[S]+X] is not equal to X or if fts5yy_shift_ofst[S]
-** is equal to fts5YY_SHIFT_USE_DFLT, it means that the action is not in the table
-** and that fts5yy_default[S] should be used instead.
+** The (A) formula is preferred. The B formula is used instead if:
+** (1) The fts5yy_shift_ofst[S]+X value is out of range, or
+** (2) fts5yy_lookahead[fts5yy_shift_ofst[S]+X] is not equal to X, or
+** (3) fts5yy_shift_ofst[S] equal fts5YY_SHIFT_USE_DFLT.
+** (Implementation note: fts5YY_SHIFT_USE_DFLT is chosen so that
+** fts5YY_SHIFT_USE_DFLT+X will be out of range for all possible lookaheads X.
+** Hence only tests (1) and (2) need to be evaluated.)
**
-** The formula above is for computing the action when the lookahead is
+** The formulas above are for computing the action when the lookahead is
** a terminal symbol. If the lookahead is a non-terminal (as occurs after
** a reduce action) then the fts5yy_reduce_ofst[] array is used in place of
** the fts5yy_shift_ofst[] array and fts5YY_REDUCE_USE_DFLT is used in place of
@@ -178785,48 +180190,50 @@ typedef union {
** fts5yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define fts5YY_ACTTAB_COUNT (78)
+#define fts5YY_ACTTAB_COUNT (85)
static const fts5YYACTIONTYPE fts5yy_action[] = {
- /* 0 */ 89, 15, 46, 5, 48, 24, 12, 19, 23, 14,
- /* 10 */ 46, 5, 48, 24, 20, 21, 23, 43, 46, 5,
- /* 20 */ 48, 24, 6, 18, 23, 17, 46, 5, 48, 24,
- /* 30 */ 75, 7, 23, 25, 46, 5, 48, 24, 62, 47,
- /* 40 */ 23, 48, 24, 7, 11, 23, 9, 3, 4, 2,
- /* 50 */ 62, 50, 52, 44, 64, 3, 4, 2, 49, 4,
- /* 60 */ 2, 1, 23, 11, 16, 9, 12, 2, 10, 61,
- /* 70 */ 53, 59, 62, 60, 22, 13, 55, 8,
+ /* 0 */ 98, 16, 51, 5, 53, 27, 83, 7, 26, 15,
+ /* 10 */ 51, 5, 53, 27, 13, 69, 26, 48, 51, 5,
+ /* 20 */ 53, 27, 19, 11, 26, 9, 20, 51, 5, 53,
+ /* 30 */ 27, 13, 22, 26, 28, 51, 5, 53, 27, 68,
+ /* 40 */ 1, 26, 19, 11, 17, 9, 52, 10, 53, 27,
+ /* 50 */ 23, 24, 26, 54, 3, 4, 2, 26, 6, 21,
+ /* 60 */ 49, 71, 3, 4, 2, 7, 56, 59, 55, 59,
+ /* 70 */ 4, 2, 12, 69, 58, 60, 18, 67, 62, 69,
+ /* 80 */ 25, 66, 8, 14, 2,
};
static const fts5YYCODETYPE fts5yy_lookahead[] = {
- /* 0 */ 15, 16, 17, 18, 19, 20, 10, 11, 23, 16,
- /* 10 */ 17, 18, 19, 20, 23, 24, 23, 16, 17, 18,
- /* 20 */ 19, 20, 22, 23, 23, 16, 17, 18, 19, 20,
- /* 30 */ 5, 6, 23, 16, 17, 18, 19, 20, 13, 17,
- /* 40 */ 23, 19, 20, 6, 8, 23, 10, 1, 2, 3,
- /* 50 */ 13, 9, 10, 7, 0, 1, 2, 3, 19, 2,
- /* 60 */ 3, 6, 23, 8, 21, 10, 10, 3, 10, 25,
- /* 70 */ 10, 10, 13, 25, 12, 10, 7, 5,
+ /* 0 */ 16, 17, 18, 19, 20, 21, 5, 6, 24, 17,
+ /* 10 */ 18, 19, 20, 21, 11, 14, 24, 17, 18, 19,
+ /* 20 */ 20, 21, 8, 9, 24, 11, 17, 18, 19, 20,
+ /* 30 */ 21, 11, 12, 24, 17, 18, 19, 20, 21, 26,
+ /* 40 */ 6, 24, 8, 9, 22, 11, 18, 11, 20, 21,
+ /* 50 */ 24, 25, 24, 20, 1, 2, 3, 24, 23, 24,
+ /* 60 */ 7, 0, 1, 2, 3, 6, 10, 11, 10, 11,
+ /* 70 */ 2, 3, 9, 14, 11, 11, 22, 26, 7, 14,
+ /* 80 */ 13, 11, 5, 11, 3,
};
-#define fts5YY_SHIFT_USE_DFLT (-5)
-#define fts5YY_SHIFT_COUNT (25)
-#define fts5YY_SHIFT_MIN (-4)
-#define fts5YY_SHIFT_MAX (72)
-static const signed char fts5yy_shift_ofst[] = {
- /* 0 */ 55, 55, 55, 55, 55, 36, -4, 56, 58, 25,
- /* 10 */ 37, 60, 59, 59, 46, 54, 42, 57, 62, 61,
- /* 20 */ 62, 69, 65, 62, 72, 64,
+#define fts5YY_SHIFT_USE_DFLT (85)
+#define fts5YY_SHIFT_COUNT (28)
+#define fts5YY_SHIFT_MIN (0)
+#define fts5YY_SHIFT_MAX (81)
+static const unsigned char fts5yy_shift_ofst[] = {
+ /* 0 */ 34, 34, 34, 34, 34, 14, 20, 3, 36, 1,
+ /* 10 */ 59, 64, 64, 65, 65, 53, 61, 56, 58, 63,
+ /* 20 */ 68, 67, 70, 67, 71, 72, 67, 77, 81,
};
-#define fts5YY_REDUCE_USE_DFLT (-16)
-#define fts5YY_REDUCE_COUNT (13)
-#define fts5YY_REDUCE_MIN (-15)
-#define fts5YY_REDUCE_MAX (48)
+#define fts5YY_REDUCE_USE_DFLT (-17)
+#define fts5YY_REDUCE_COUNT (14)
+#define fts5YY_REDUCE_MIN (-16)
+#define fts5YY_REDUCE_MAX (54)
static const signed char fts5yy_reduce_ofst[] = {
- /* 0 */ -15, -7, 1, 9, 17, 22, -9, 0, 39, 44,
- /* 10 */ 44, 43, 44, 48,
+ /* 0 */ -16, -8, 0, 9, 17, 28, 26, 35, 33, 13,
+ /* 10 */ 13, 22, 54, 13, 51,
};
static const fts5YYACTIONTYPE fts5yy_default[] = {
- /* 0 */ 88, 88, 88, 88, 88, 69, 82, 88, 88, 87,
- /* 10 */ 87, 88, 87, 87, 88, 88, 88, 66, 80, 88,
- /* 20 */ 81, 88, 88, 78, 88, 65,
+ /* 0 */ 97, 97, 97, 97, 97, 76, 91, 97, 97, 96,
+ /* 10 */ 96, 97, 97, 96, 96, 97, 97, 97, 97, 97,
+ /* 20 */ 73, 89, 97, 90, 97, 97, 87, 97, 72,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -178933,11 +180340,11 @@ static void sqlite3Fts5ParserTrace(FILE *TraceFILE, char *zTracePrompt){
static const char *const fts5yyTokenName[] = {
"$", "OR", "AND", "NOT",
"TERM", "COLON", "LP", "RP",
- "LCP", "RCP", "STRING", "COMMA",
- "PLUS", "STAR", "error", "input",
- "expr", "cnearset", "exprlist", "nearset",
- "colset", "colsetlist", "nearphrases", "phrase",
- "neardist_opt", "star_opt",
+ "MINUS", "LCP", "RCP", "STRING",
+ "COMMA", "PLUS", "STAR", "error",
+ "input", "expr", "cnearset", "exprlist",
+ "nearset", "colset", "colsetlist", "nearphrases",
+ "phrase", "neardist_opt", "star_opt",
};
#endif /* NDEBUG */
@@ -178955,20 +180362,22 @@ static const char *const fts5yyRuleName[] = {
/* 7 */ "exprlist ::= exprlist cnearset",
/* 8 */ "cnearset ::= nearset",
/* 9 */ "cnearset ::= colset COLON nearset",
- /* 10 */ "colset ::= LCP colsetlist RCP",
- /* 11 */ "colset ::= STRING",
- /* 12 */ "colsetlist ::= colsetlist STRING",
- /* 13 */ "colsetlist ::= STRING",
- /* 14 */ "nearset ::= phrase",
- /* 15 */ "nearset ::= STRING LP nearphrases neardist_opt RP",
- /* 16 */ "nearphrases ::= phrase",
- /* 17 */ "nearphrases ::= nearphrases phrase",
- /* 18 */ "neardist_opt ::=",
- /* 19 */ "neardist_opt ::= COMMA STRING",
- /* 20 */ "phrase ::= phrase PLUS STRING star_opt",
- /* 21 */ "phrase ::= STRING star_opt",
- /* 22 */ "star_opt ::= STAR",
- /* 23 */ "star_opt ::=",
+ /* 10 */ "colset ::= MINUS LCP colsetlist RCP",
+ /* 11 */ "colset ::= LCP colsetlist RCP",
+ /* 12 */ "colset ::= STRING",
+ /* 13 */ "colset ::= MINUS STRING",
+ /* 14 */ "colsetlist ::= colsetlist STRING",
+ /* 15 */ "colsetlist ::= STRING",
+ /* 16 */ "nearset ::= phrase",
+ /* 17 */ "nearset ::= STRING LP nearphrases neardist_opt RP",
+ /* 18 */ "nearphrases ::= phrase",
+ /* 19 */ "nearphrases ::= nearphrases phrase",
+ /* 20 */ "neardist_opt ::=",
+ /* 21 */ "neardist_opt ::= COMMA STRING",
+ /* 22 */ "phrase ::= phrase PLUS STRING star_opt",
+ /* 23 */ "phrase ::= STRING star_opt",
+ /* 24 */ "star_opt ::= STAR",
+ /* 25 */ "star_opt ::=",
};
#endif /* NDEBUG */
@@ -179078,33 +180487,33 @@ static void fts5yy_destructor(
** inside the C code.
*/
/********* Begin destructor definitions ***************************************/
- case 15: /* input */
+ case 16: /* input */
{
(void)pParse;
}
break;
- case 16: /* expr */
- case 17: /* cnearset */
- case 18: /* exprlist */
+ case 17: /* expr */
+ case 18: /* cnearset */
+ case 19: /* exprlist */
{
- sqlite3Fts5ParseNodeFree((fts5yypminor->fts5yy18));
+ sqlite3Fts5ParseNodeFree((fts5yypminor->fts5yy24));
}
break;
- case 19: /* nearset */
- case 22: /* nearphrases */
+ case 20: /* nearset */
+ case 23: /* nearphrases */
{
- sqlite3Fts5ParseNearsetFree((fts5yypminor->fts5yy26));
+ sqlite3Fts5ParseNearsetFree((fts5yypminor->fts5yy46));
}
break;
- case 20: /* colset */
- case 21: /* colsetlist */
+ case 21: /* colset */
+ case 22: /* colsetlist */
{
- sqlite3_free((fts5yypminor->fts5yy3));
+ sqlite3_free((fts5yypminor->fts5yy11));
}
break;
- case 23: /* phrase */
+ case 24: /* phrase */
{
- sqlite3Fts5ParsePhraseFree((fts5yypminor->fts5yy11));
+ sqlite3Fts5ParsePhraseFree((fts5yypminor->fts5yy53));
}
break;
/********* End destructor definitions *****************************************/
@@ -179181,50 +180590,47 @@ static unsigned int fts5yy_find_shift_action(
assert( stateno <= fts5YY_SHIFT_COUNT );
do{
i = fts5yy_shift_ofst[stateno];
- if( i==fts5YY_SHIFT_USE_DFLT ) return fts5yy_default[stateno];
assert( iLookAhead!=fts5YYNOCODE );
i += iLookAhead;
if( i<0 || i>=fts5YY_ACTTAB_COUNT || fts5yy_lookahead[i]!=iLookAhead ){
- if( iLookAhead>0 ){
#ifdef fts5YYFALLBACK
- fts5YYCODETYPE iFallback; /* Fallback token */
- if( iLookAhead<sizeof(fts5yyFallback)/sizeof(fts5yyFallback[0])
- && (iFallback = fts5yyFallback[iLookAhead])!=0 ){
+ fts5YYCODETYPE iFallback; /* Fallback token */
+ if( iLookAhead<sizeof(fts5yyFallback)/sizeof(fts5yyFallback[0])
+ && (iFallback = fts5yyFallback[iLookAhead])!=0 ){
#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE, "%sFALLBACK %s => %s\n",
- fts5yyTracePrompt, fts5yyTokenName[iLookAhead], fts5yyTokenName[iFallback]);
- }
-#endif
- assert( fts5yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
- iLookAhead = iFallback;
- continue;
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE, "%sFALLBACK %s => %s\n",
+ fts5yyTracePrompt, fts5yyTokenName[iLookAhead], fts5yyTokenName[iFallback]);
}
#endif
+ assert( fts5yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
+ iLookAhead = iFallback;
+ continue;
+ }
+#endif
#ifdef fts5YYWILDCARD
- {
- int j = i - iLookAhead + fts5YYWILDCARD;
- if(
+ {
+ int j = i - iLookAhead + fts5YYWILDCARD;
+ if(
#if fts5YY_SHIFT_MIN+fts5YYWILDCARD<0
- j>=0 &&
+ j>=0 &&
#endif
#if fts5YY_SHIFT_MAX+fts5YYWILDCARD>=fts5YY_ACTTAB_COUNT
- j<fts5YY_ACTTAB_COUNT &&
+ j<fts5YY_ACTTAB_COUNT &&
#endif
- fts5yy_lookahead[j]==fts5YYWILDCARD
- ){
+ fts5yy_lookahead[j]==fts5YYWILDCARD && iLookAhead>0
+ ){
#ifndef NDEBUG
- if( fts5yyTraceFILE ){
- fprintf(fts5yyTraceFILE, "%sWILDCARD %s => %s\n",
- fts5yyTracePrompt, fts5yyTokenName[iLookAhead],
- fts5yyTokenName[fts5YYWILDCARD]);
- }
-#endif /* NDEBUG */
- return fts5yy_action[j];
+ if( fts5yyTraceFILE ){
+ fprintf(fts5yyTraceFILE, "%sWILDCARD %s => %s\n",
+ fts5yyTracePrompt, fts5yyTokenName[iLookAhead],
+ fts5yyTokenName[fts5YYWILDCARD]);
}
+#endif /* NDEBUG */
+ return fts5yy_action[j];
}
-#endif /* fts5YYWILDCARD */
}
+#endif /* fts5YYWILDCARD */
return fts5yy_default[stateno];
}else{
return fts5yy_action[i];
@@ -179351,30 +180757,32 @@ static const struct {
fts5YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
unsigned char nrhs; /* Number of right-hand side symbols in the rule */
} fts5yyRuleInfo[] = {
- { 15, 1 },
- { 16, 3 },
- { 16, 3 },
- { 16, 3 },
- { 16, 3 },
{ 16, 1 },
- { 18, 1 },
- { 18, 2 },
- { 17, 1 },
{ 17, 3 },
- { 20, 3 },
- { 20, 1 },
- { 21, 2 },
- { 21, 1 },
+ { 17, 3 },
+ { 17, 3 },
+ { 17, 3 },
+ { 17, 1 },
{ 19, 1 },
- { 19, 5 },
- { 22, 1 },
+ { 19, 2 },
+ { 18, 1 },
+ { 18, 3 },
+ { 21, 4 },
+ { 21, 3 },
+ { 21, 1 },
+ { 21, 2 },
{ 22, 2 },
- { 24, 0 },
- { 24, 2 },
- { 23, 4 },
+ { 22, 1 },
+ { 20, 1 },
+ { 20, 5 },
+ { 23, 1 },
{ 23, 2 },
- { 25, 1 },
{ 25, 0 },
+ { 25, 2 },
+ { 24, 4 },
+ { 24, 2 },
+ { 26, 1 },
+ { 26, 0 },
};
static void fts5yy_accept(fts5yyParser*); /* Forward Declaration */
@@ -179439,120 +180847,131 @@ static void fts5yy_reduce(
/********** Begin reduce actions **********************************************/
fts5YYMINORTYPE fts5yylhsminor;
case 0: /* input ::= expr */
-{ sqlite3Fts5ParseFinished(pParse, fts5yymsp[0].minor.fts5yy18); }
+{ sqlite3Fts5ParseFinished(pParse, fts5yymsp[0].minor.fts5yy24); }
break;
case 1: /* expr ::= expr AND expr */
{
- fts5yylhsminor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_AND, fts5yymsp[-2].minor.fts5yy18, fts5yymsp[0].minor.fts5yy18, 0);
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_AND, fts5yymsp[-2].minor.fts5yy24, fts5yymsp[0].minor.fts5yy24, 0);
}
- fts5yymsp[-2].minor.fts5yy18 = fts5yylhsminor.fts5yy18;
+ fts5yymsp[-2].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
break;
case 2: /* expr ::= expr OR expr */
{
- fts5yylhsminor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_OR, fts5yymsp[-2].minor.fts5yy18, fts5yymsp[0].minor.fts5yy18, 0);
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_OR, fts5yymsp[-2].minor.fts5yy24, fts5yymsp[0].minor.fts5yy24, 0);
}
- fts5yymsp[-2].minor.fts5yy18 = fts5yylhsminor.fts5yy18;
+ fts5yymsp[-2].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
break;
case 3: /* expr ::= expr NOT expr */
{
- fts5yylhsminor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_NOT, fts5yymsp[-2].minor.fts5yy18, fts5yymsp[0].minor.fts5yy18, 0);
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_NOT, fts5yymsp[-2].minor.fts5yy24, fts5yymsp[0].minor.fts5yy24, 0);
}
- fts5yymsp[-2].minor.fts5yy18 = fts5yylhsminor.fts5yy18;
+ fts5yymsp[-2].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
break;
case 4: /* expr ::= LP expr RP */
-{fts5yymsp[-2].minor.fts5yy18 = fts5yymsp[-1].minor.fts5yy18;}
+{fts5yymsp[-2].minor.fts5yy24 = fts5yymsp[-1].minor.fts5yy24;}
break;
case 5: /* expr ::= exprlist */
case 6: /* exprlist ::= cnearset */ fts5yytestcase(fts5yyruleno==6);
-{fts5yylhsminor.fts5yy18 = fts5yymsp[0].minor.fts5yy18;}
- fts5yymsp[0].minor.fts5yy18 = fts5yylhsminor.fts5yy18;
+{fts5yylhsminor.fts5yy24 = fts5yymsp[0].minor.fts5yy24;}
+ fts5yymsp[0].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
break;
case 7: /* exprlist ::= exprlist cnearset */
{
- fts5yylhsminor.fts5yy18 = sqlite3Fts5ParseImplicitAnd(pParse, fts5yymsp[-1].minor.fts5yy18, fts5yymsp[0].minor.fts5yy18);
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseImplicitAnd(pParse, fts5yymsp[-1].minor.fts5yy24, fts5yymsp[0].minor.fts5yy24);
}
- fts5yymsp[-1].minor.fts5yy18 = fts5yylhsminor.fts5yy18;
+ fts5yymsp[-1].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
break;
case 8: /* cnearset ::= nearset */
{
- fts5yylhsminor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, fts5yymsp[0].minor.fts5yy26);
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, fts5yymsp[0].minor.fts5yy46);
}
- fts5yymsp[0].minor.fts5yy18 = fts5yylhsminor.fts5yy18;
+ fts5yymsp[0].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
break;
case 9: /* cnearset ::= colset COLON nearset */
{
- sqlite3Fts5ParseSetColset(pParse, fts5yymsp[0].minor.fts5yy26, fts5yymsp[-2].minor.fts5yy3);
- fts5yylhsminor.fts5yy18 = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, fts5yymsp[0].minor.fts5yy26);
+ sqlite3Fts5ParseSetColset(pParse, fts5yymsp[0].minor.fts5yy46, fts5yymsp[-2].minor.fts5yy11);
+ fts5yylhsminor.fts5yy24 = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, fts5yymsp[0].minor.fts5yy46);
+}
+ fts5yymsp[-2].minor.fts5yy24 = fts5yylhsminor.fts5yy24;
+ break;
+ case 10: /* colset ::= MINUS LCP colsetlist RCP */
+{
+ fts5yymsp[-3].minor.fts5yy11 = sqlite3Fts5ParseColsetInvert(pParse, fts5yymsp[-1].minor.fts5yy11);
}
- fts5yymsp[-2].minor.fts5yy18 = fts5yylhsminor.fts5yy18;
break;
- case 10: /* colset ::= LCP colsetlist RCP */
-{ fts5yymsp[-2].minor.fts5yy3 = fts5yymsp[-1].minor.fts5yy3; }
+ case 11: /* colset ::= LCP colsetlist RCP */
+{ fts5yymsp[-2].minor.fts5yy11 = fts5yymsp[-1].minor.fts5yy11; }
break;
- case 11: /* colset ::= STRING */
+ case 12: /* colset ::= STRING */
{
- fts5yylhsminor.fts5yy3 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
+ fts5yylhsminor.fts5yy11 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
}
- fts5yymsp[0].minor.fts5yy3 = fts5yylhsminor.fts5yy3;
+ fts5yymsp[0].minor.fts5yy11 = fts5yylhsminor.fts5yy11;
break;
- case 12: /* colsetlist ::= colsetlist STRING */
+ case 13: /* colset ::= MINUS STRING */
+{
+ fts5yymsp[-1].minor.fts5yy11 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
+ fts5yymsp[-1].minor.fts5yy11 = sqlite3Fts5ParseColsetInvert(pParse, fts5yymsp[-1].minor.fts5yy11);
+}
+ break;
+ case 14: /* colsetlist ::= colsetlist STRING */
{
- fts5yylhsminor.fts5yy3 = sqlite3Fts5ParseColset(pParse, fts5yymsp[-1].minor.fts5yy3, &fts5yymsp[0].minor.fts5yy0); }
- fts5yymsp[-1].minor.fts5yy3 = fts5yylhsminor.fts5yy3;
+ fts5yylhsminor.fts5yy11 = sqlite3Fts5ParseColset(pParse, fts5yymsp[-1].minor.fts5yy11, &fts5yymsp[0].minor.fts5yy0); }
+ fts5yymsp[-1].minor.fts5yy11 = fts5yylhsminor.fts5yy11;
break;
- case 13: /* colsetlist ::= STRING */
+ case 15: /* colsetlist ::= STRING */
{
- fts5yylhsminor.fts5yy3 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
+ fts5yylhsminor.fts5yy11 = sqlite3Fts5ParseColset(pParse, 0, &fts5yymsp[0].minor.fts5yy0);
}
- fts5yymsp[0].minor.fts5yy3 = fts5yylhsminor.fts5yy3;
+ fts5yymsp[0].minor.fts5yy11 = fts5yylhsminor.fts5yy11;
break;
- case 14: /* nearset ::= phrase */
-{ fts5yylhsminor.fts5yy26 = sqlite3Fts5ParseNearset(pParse, 0, fts5yymsp[0].minor.fts5yy11); }
- fts5yymsp[0].minor.fts5yy26 = fts5yylhsminor.fts5yy26;
+ case 16: /* nearset ::= phrase */
+{ fts5yylhsminor.fts5yy46 = sqlite3Fts5ParseNearset(pParse, 0, fts5yymsp[0].minor.fts5yy53); }
+ fts5yymsp[0].minor.fts5yy46 = fts5yylhsminor.fts5yy46;
break;
- case 15: /* nearset ::= STRING LP nearphrases neardist_opt RP */
+ case 17: /* nearset ::= STRING LP nearphrases neardist_opt RP */
{
sqlite3Fts5ParseNear(pParse, &fts5yymsp[-4].minor.fts5yy0);
- sqlite3Fts5ParseSetDistance(pParse, fts5yymsp[-2].minor.fts5yy26, &fts5yymsp[-1].minor.fts5yy0);
- fts5yylhsminor.fts5yy26 = fts5yymsp[-2].minor.fts5yy26;
+ sqlite3Fts5ParseSetDistance(pParse, fts5yymsp[-2].minor.fts5yy46, &fts5yymsp[-1].minor.fts5yy0);
+ fts5yylhsminor.fts5yy46 = fts5yymsp[-2].minor.fts5yy46;
}
- fts5yymsp[-4].minor.fts5yy26 = fts5yylhsminor.fts5yy26;
+ fts5yymsp[-4].minor.fts5yy46 = fts5yylhsminor.fts5yy46;
break;
- case 16: /* nearphrases ::= phrase */
+ case 18: /* nearphrases ::= phrase */
{
- fts5yylhsminor.fts5yy26 = sqlite3Fts5ParseNearset(pParse, 0, fts5yymsp[0].minor.fts5yy11);
+ fts5yylhsminor.fts5yy46 = sqlite3Fts5ParseNearset(pParse, 0, fts5yymsp[0].minor.fts5yy53);
}
- fts5yymsp[0].minor.fts5yy26 = fts5yylhsminor.fts5yy26;
+ fts5yymsp[0].minor.fts5yy46 = fts5yylhsminor.fts5yy46;
break;
- case 17: /* nearphrases ::= nearphrases phrase */
+ case 19: /* nearphrases ::= nearphrases phrase */
{
- fts5yylhsminor.fts5yy26 = sqlite3Fts5ParseNearset(pParse, fts5yymsp[-1].minor.fts5yy26, fts5yymsp[0].minor.fts5yy11);
+ fts5yylhsminor.fts5yy46 = sqlite3Fts5ParseNearset(pParse, fts5yymsp[-1].minor.fts5yy46, fts5yymsp[0].minor.fts5yy53);
}
- fts5yymsp[-1].minor.fts5yy26 = fts5yylhsminor.fts5yy26;
+ fts5yymsp[-1].minor.fts5yy46 = fts5yylhsminor.fts5yy46;
break;
- case 18: /* neardist_opt ::= */
+ case 20: /* neardist_opt ::= */
{ fts5yymsp[1].minor.fts5yy0.p = 0; fts5yymsp[1].minor.fts5yy0.n = 0; }
break;
- case 19: /* neardist_opt ::= COMMA STRING */
+ case 21: /* neardist_opt ::= COMMA STRING */
{ fts5yymsp[-1].minor.fts5yy0 = fts5yymsp[0].minor.fts5yy0; }
break;
- case 20: /* phrase ::= phrase PLUS STRING star_opt */
+ case 22: /* phrase ::= phrase PLUS STRING star_opt */
{
- fts5yylhsminor.fts5yy11 = sqlite3Fts5ParseTerm(pParse, fts5yymsp[-3].minor.fts5yy11, &fts5yymsp[-1].minor.fts5yy0, fts5yymsp[0].minor.fts5yy20);
+ fts5yylhsminor.fts5yy53 = sqlite3Fts5ParseTerm(pParse, fts5yymsp[-3].minor.fts5yy53, &fts5yymsp[-1].minor.fts5yy0, fts5yymsp[0].minor.fts5yy4);
}
- fts5yymsp[-3].minor.fts5yy11 = fts5yylhsminor.fts5yy11;
+ fts5yymsp[-3].minor.fts5yy53 = fts5yylhsminor.fts5yy53;
break;
- case 21: /* phrase ::= STRING star_opt */
+ case 23: /* phrase ::= STRING star_opt */
{
- fts5yylhsminor.fts5yy11 = sqlite3Fts5ParseTerm(pParse, 0, &fts5yymsp[-1].minor.fts5yy0, fts5yymsp[0].minor.fts5yy20);
+ fts5yylhsminor.fts5yy53 = sqlite3Fts5ParseTerm(pParse, 0, &fts5yymsp[-1].minor.fts5yy0, fts5yymsp[0].minor.fts5yy4);
}
- fts5yymsp[-1].minor.fts5yy11 = fts5yylhsminor.fts5yy11;
+ fts5yymsp[-1].minor.fts5yy53 = fts5yylhsminor.fts5yy53;
break;
- case 22: /* star_opt ::= STAR */
-{ fts5yymsp[0].minor.fts5yy20 = 1; }
+ case 24: /* star_opt ::= STAR */
+{ fts5yymsp[0].minor.fts5yy4 = 1; }
break;
- case 23: /* star_opt ::= */
-{ fts5yymsp[1].minor.fts5yy20 = 0; }
+ case 25: /* star_opt ::= */
+{ fts5yymsp[1].minor.fts5yy4 = 0; }
break;
default:
break;
@@ -179746,7 +181165,7 @@ static void sqlite3Fts5Parser(
fts5yy_destructor(fts5yypParser, (fts5YYCODETYPE)fts5yymajor, &fts5yyminorunion);
fts5yymajor = fts5YYNOCODE;
}else{
- while( fts5yypParser->fts5yytos >= &fts5yypParser->fts5yystack
+ while( fts5yypParser->fts5yytos >= fts5yypParser->fts5yystack
&& fts5yymx != fts5YYERRORSYMBOL
&& (fts5yyact = fts5yy_find_reduce_action(
fts5yypParser->fts5yytos->stateno,
@@ -180010,7 +181429,7 @@ static int fts5HighlightCb(
if( p->iRangeEnd>0 && iPos==p->iRangeEnd ){
fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff);
p->iOff = iEndOff;
- if( iPos<p->iter.iEnd ){
+ if( iPos>=p->iter.iStart && iPos<p->iter.iEnd ){
fts5HighlightAppend(&rc, p, p->zClose, -1);
}
}
@@ -180068,6 +181487,118 @@ static void fts5HighlightFunction(
**************************************************************************/
/*
+** Context object passed to the fts5SentenceFinderCb() function.
+*/
+typedef struct Fts5SFinder Fts5SFinder;
+struct Fts5SFinder {
+ int iPos; /* Current token position */
+ int nFirstAlloc; /* Allocated size of aFirst[] */
+ int nFirst; /* Number of entries in aFirst[] */
+ int *aFirst; /* Array of first token in each sentence */
+ const char *zDoc; /* Document being tokenized */
+};
+
+/*
+** Add an entry to the Fts5SFinder.aFirst[] array. Grow the array if
+** necessary. Return SQLITE_OK if successful, or SQLITE_NOMEM if an
+** error occurs.
+*/
+static int fts5SentenceFinderAdd(Fts5SFinder *p, int iAdd){
+ if( p->nFirstAlloc==p->nFirst ){
+ int nNew = p->nFirstAlloc ? p->nFirstAlloc*2 : 64;
+ int *aNew;
+
+ aNew = (int*)sqlite3_realloc(p->aFirst, nNew*sizeof(int));
+ if( aNew==0 ) return SQLITE_NOMEM;
+ p->aFirst = aNew;
+ p->nFirstAlloc = nNew;
+ }
+ p->aFirst[p->nFirst++] = iAdd;
+ return SQLITE_OK;
+}
+
+/*
+** This function is an xTokenize() callback used by the auxiliary snippet()
+** function. Its job is to identify tokens that are the first in a sentence.
+** For each such token, an entry is added to the SFinder.aFirst[] array.
+*/
+static int fts5SentenceFinderCb(
+ void *pContext, /* Pointer to HighlightContext object */
+ int tflags, /* Mask of FTS5_TOKEN_* flags */
+ const char *pToken, /* Buffer containing token */
+ int nToken, /* Size of token in bytes */
+ int iStartOff, /* Start offset of token */
+ int iEndOff /* End offset of token */
+){
+ int rc = SQLITE_OK;
+
+ UNUSED_PARAM2(pToken, nToken);
+ UNUSED_PARAM(iEndOff);
+
+ if( (tflags & FTS5_TOKEN_COLOCATED)==0 ){
+ Fts5SFinder *p = (Fts5SFinder*)pContext;
+ if( p->iPos>0 ){
+ int i;
+ char c = 0;
+ for(i=iStartOff-1; i>=0; i--){
+ c = p->zDoc[i];
+ if( c!=' ' && c!='\t' && c!='\n' && c!='\r' ) break;
+ }
+ if( i!=iStartOff-1 && (c=='.' || c==':') ){
+ rc = fts5SentenceFinderAdd(p, p->iPos);
+ }
+ }else{
+ rc = fts5SentenceFinderAdd(p, 0);
+ }
+ p->iPos++;
+ }
+ return rc;
+}
+
+static int fts5SnippetScore(
+ const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
+ Fts5Context *pFts, /* First arg to pass to pApi functions */
+ int nDocsize, /* Size of column in tokens */
+ unsigned char *aSeen, /* Array with one element per query phrase */
+ int iCol, /* Column to score */
+ int iPos, /* Starting offset to score */
+ int nToken, /* Max tokens per snippet */
+ int *pnScore, /* OUT: Score */
+ int *piPos /* OUT: Adjusted offset */
+){
+ int rc;
+ int i;
+ int ip = 0;
+ int ic = 0;
+ int iOff = 0;
+ int iFirst = -1;
+ int nInst;
+ int nScore = 0;
+ int iLast = 0;
+
+ rc = pApi->xInstCount(pFts, &nInst);
+ for(i=0; i<nInst && rc==SQLITE_OK; i++){
+ rc = pApi->xInst(pFts, i, &ip, &ic, &iOff);
+ if( rc==SQLITE_OK && ic==iCol && iOff>=iPos && iOff<(iPos+nToken) ){
+ nScore += (aSeen[ip] ? 1 : 1000);
+ aSeen[ip] = 1;
+ if( iFirst<0 ) iFirst = iOff;
+ iLast = iOff + pApi->xPhraseSize(pFts, ip);
+ }
+ }
+
+ *pnScore = nScore;
+ if( piPos ){
+ int iAdj = iFirst - (nToken - (iLast-iFirst)) / 2;
+ if( (iAdj+nToken)>nDocsize ) iAdj = nDocsize - nToken;
+ if( iAdj<0 ) iAdj = 0;
+ *piPos = iAdj;
+ }
+
+ return rc;
+}
+
+/*
** Implementation of snippet() function.
*/
static void fts5SnippetFunction(
@@ -180088,9 +181619,10 @@ static void fts5SnippetFunction(
unsigned char *aSeen; /* Array of "seen instance" flags */
int iBestCol; /* Column containing best snippet */
int iBestStart = 0; /* First token of best snippet */
- int iBestLast; /* Last token of best snippet */
int nBestScore = 0; /* Score of best snippet */
int nColSize = 0; /* Total size of iBestCol in tokens */
+ Fts5SFinder sFinder; /* Used to find the beginnings of sentences */
+ int nCol;
if( nVal!=5 ){
const char *zErr = "wrong number of arguments to function snippet()";
@@ -180098,13 +181630,13 @@ static void fts5SnippetFunction(
return;
}
+ nCol = pApi->xColumnCount(pFts);
memset(&ctx, 0, sizeof(HighlightContext));
iCol = sqlite3_value_int(apVal[0]);
ctx.zOpen = (const char*)sqlite3_value_text(apVal[1]);
ctx.zClose = (const char*)sqlite3_value_text(apVal[2]);
zEllips = (const char*)sqlite3_value_text(apVal[3]);
nToken = sqlite3_value_int(apVal[4]);
- iBestLast = nToken-1;
iBestCol = (iCol>=0 ? iCol : 0);
nPhrase = pApi->xPhraseCount(pFts);
@@ -180112,65 +181644,94 @@ static void fts5SnippetFunction(
if( aSeen==0 ){
rc = SQLITE_NOMEM;
}
-
if( rc==SQLITE_OK ){
rc = pApi->xInstCount(pFts, &nInst);
}
- for(i=0; rc==SQLITE_OK && i<nInst; i++){
- int ip, iSnippetCol, iStart;
- memset(aSeen, 0, nPhrase);
- rc = pApi->xInst(pFts, i, &ip, &iSnippetCol, &iStart);
- if( rc==SQLITE_OK && (iCol<0 || iSnippetCol==iCol) ){
- int nScore = 1000;
- int iLast = iStart - 1 + pApi->xPhraseSize(pFts, ip);
- int j;
- aSeen[ip] = 1;
- for(j=i+1; rc==SQLITE_OK && j<nInst; j++){
- int ic; int io; int iFinal;
- rc = pApi->xInst(pFts, j, &ip, &ic, &io);
- iFinal = io + pApi->xPhraseSize(pFts, ip) - 1;
- if( rc==SQLITE_OK && ic==iSnippetCol && iLast<iStart+nToken ){
- nScore += aSeen[ip] ? 1000 : 1;
- aSeen[ip] = 1;
- if( iFinal>iLast ) iLast = iFinal;
+ memset(&sFinder, 0, sizeof(Fts5SFinder));
+ for(i=0; i<nCol; i++){
+ if( iCol<0 || iCol==i ){
+ int nDoc;
+ int nDocsize;
+ int ii;
+ sFinder.iPos = 0;
+ sFinder.nFirst = 0;
+ rc = pApi->xColumnText(pFts, i, &sFinder.zDoc, &nDoc);
+ if( rc!=SQLITE_OK ) break;
+ rc = pApi->xTokenize(pFts,
+ sFinder.zDoc, nDoc, (void*)&sFinder,fts5SentenceFinderCb
+ );
+ if( rc!=SQLITE_OK ) break;
+ rc = pApi->xColumnSize(pFts, i, &nDocsize);
+ if( rc!=SQLITE_OK ) break;
+
+ for(ii=0; rc==SQLITE_OK && ii<nInst; ii++){
+ int ip, ic, io;
+ int iAdj;
+ int nScore;
+ int jj;
+
+ rc = pApi->xInst(pFts, ii, &ip, &ic, &io);
+ if( ic!=i || rc!=SQLITE_OK ) continue;
+ memset(aSeen, 0, nPhrase);
+ rc = fts5SnippetScore(pApi, pFts, nDocsize, aSeen, i,
+ io, nToken, &nScore, &iAdj
+ );
+ if( rc==SQLITE_OK && nScore>nBestScore ){
+ nBestScore = nScore;
+ iBestCol = i;
+ iBestStart = iAdj;
+ nColSize = nDocsize;
}
- }
- if( rc==SQLITE_OK && nScore>nBestScore ){
- iBestCol = iSnippetCol;
- iBestStart = iStart;
- iBestLast = iLast;
- nBestScore = nScore;
+ if( rc==SQLITE_OK && sFinder.nFirst && nDocsize>nToken ){
+ for(jj=0; jj<(sFinder.nFirst-1); jj++){
+ if( sFinder.aFirst[jj+1]>io ) break;
+ }
+
+ if( sFinder.aFirst[jj]<io ){
+ memset(aSeen, 0, nPhrase);
+ rc = fts5SnippetScore(pApi, pFts, nDocsize, aSeen, i,
+ sFinder.aFirst[jj], nToken, &nScore, 0
+ );
+
+ nScore += (sFinder.aFirst[jj]==0 ? 120 : 100);
+ if( rc==SQLITE_OK && nScore>nBestScore ){
+ nBestScore = nScore;
+ iBestCol = i;
+ iBestStart = sFinder.aFirst[jj];
+ nColSize = nDocsize;
+ }
+ }
+ }
}
}
}
if( rc==SQLITE_OK ){
- rc = pApi->xColumnSize(pFts, iBestCol, &nColSize);
- }
- if( rc==SQLITE_OK ){
rc = pApi->xColumnText(pFts, iBestCol, &ctx.zIn, &ctx.nIn);
}
+ if( rc==SQLITE_OK && nColSize==0 ){
+ rc = pApi->xColumnSize(pFts, iBestCol, &nColSize);
+ }
if( ctx.zIn ){
if( rc==SQLITE_OK ){
rc = fts5CInstIterInit(pApi, pFts, iBestCol, &ctx.iter);
}
- if( (iBestStart+nToken-1)>iBestLast ){
- iBestStart -= (iBestStart+nToken-1-iBestLast) / 2;
- }
- if( iBestStart+nToken>nColSize ){
- iBestStart = nColSize - nToken;
- }
- if( iBestStart<0 ) iBestStart = 0;
-
ctx.iRangeStart = iBestStart;
ctx.iRangeEnd = iBestStart + nToken - 1;
if( iBestStart>0 ){
fts5HighlightAppend(&rc, &ctx, zEllips, -1);
}
+
+ /* Advance iterator ctx.iter so that it points to the first coalesced
+ ** phrase instance at or following position iBestStart. */
+ while( ctx.iter.iStart>=0 && ctx.iter.iStart<iBestStart && rc==SQLITE_OK ){
+ rc = fts5CInstIterNext(&ctx.iter);
+ }
+
if( rc==SQLITE_OK ){
rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb);
}
@@ -180179,15 +181740,15 @@ static void fts5SnippetFunction(
}else{
fts5HighlightAppend(&rc, &ctx, zEllips, -1);
}
-
- if( rc==SQLITE_OK ){
- sqlite3_result_text(pCtx, (const char*)ctx.zOut, -1, SQLITE_TRANSIENT);
- }else{
- sqlite3_result_error_code(pCtx, rc);
- }
- sqlite3_free(ctx.zOut);
}
+ if( rc==SQLITE_OK ){
+ sqlite3_result_text(pCtx, (const char*)ctx.zOut, -1, SQLITE_TRANSIENT);
+ }else{
+ sqlite3_result_error_code(pCtx, rc);
+ }
+ sqlite3_free(ctx.zOut);
sqlite3_free(aSeen);
+ sqlite3_free(sFinder.aFirst);
}
/************************************************************************/
@@ -181906,6 +183467,7 @@ static int fts5ExprGetToken(
case ',': tok = FTS5_COMMA; break;
case '+': tok = FTS5_PLUS; break;
case '*': tok = FTS5_STAR; break;
+ case '-': tok = FTS5_MINUS; break;
case '\0': tok = FTS5_EOF; break;
case '"': {
@@ -182492,6 +184054,7 @@ static int fts5ExprNearInitAll(
Fts5ExprNearset *pNear = pNode->pNear;
int i, j;
int rc = SQLITE_OK;
+ int bEof = 1;
assert( pNode->bNomatch==0 );
for(i=0; rc==SQLITE_OK && i<pNear->nPhrase; i++){
@@ -182499,7 +184062,6 @@ static int fts5ExprNearInitAll(
for(j=0; j<pPhrase->nTerm; j++){
Fts5ExprTerm *pTerm = &pPhrase->aTerm[j];
Fts5ExprTerm *p;
- int bEof = 1;
for(p=pTerm; p && rc==SQLITE_OK; p=p->pSynonym){
if( p->pIter ){
@@ -182519,13 +184081,12 @@ static int fts5ExprNearInitAll(
}
}
- if( bEof ){
- pNode->bEof = 1;
- return rc;
- }
+ if( bEof ) break;
}
+ if( bEof ) break;
}
+ pNode->bEof = bEof;
return rc;
}
@@ -183376,7 +184937,6 @@ static int sqlite3Fts5ExprClonePhrase(
){
int rc = SQLITE_OK; /* Return code */
Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */
- int i; /* Used to iterate through phrase terms */
Fts5Expr *pNew = 0; /* Expression to return via *ppNew */
TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */
@@ -183397,7 +184957,7 @@ static int sqlite3Fts5ExprClonePhrase(
if( rc==SQLITE_OK ){
Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset;
if( pColsetOrig ){
- int nByte = sizeof(Fts5Colset) + pColsetOrig->nCol * sizeof(int);
+ int nByte = sizeof(Fts5Colset) + (pColsetOrig->nCol-1) * sizeof(int);
Fts5Colset *pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&rc, nByte);
if( pColset ){
memcpy(pColset, pColsetOrig, nByte);
@@ -183406,18 +184966,25 @@ static int sqlite3Fts5ExprClonePhrase(
}
}
- for(i=0; rc==SQLITE_OK && i<pOrig->nTerm; i++){
- int tflags = 0;
- Fts5ExprTerm *p;
- for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
- const char *zTerm = p->zTerm;
- rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm),
- 0, 0);
- tflags = FTS5_TOKEN_COLOCATED;
- }
- if( rc==SQLITE_OK ){
- sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
+ if( pOrig->nTerm ){
+ int i; /* Used to iterate through phrase terms */
+ for(i=0; rc==SQLITE_OK && i<pOrig->nTerm; i++){
+ int tflags = 0;
+ Fts5ExprTerm *p;
+ for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){
+ const char *zTerm = p->zTerm;
+ rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm),
+ 0, 0);
+ tflags = FTS5_TOKEN_COLOCATED;
+ }
+ if( rc==SQLITE_OK ){
+ sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix;
+ }
}
+ }else{
+ /* This happens when parsing a token or quoted phrase that contains
+ ** no token characters at all. (e.g ... MATCH '""'). */
+ sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase));
}
if( rc==SQLITE_OK ){
@@ -183532,6 +185099,34 @@ static Fts5Colset *fts5ParseColset(
return pNew;
}
+/*
+** Allocate and return an Fts5Colset object specifying the inverse of
+** the colset passed as the second argument. Free the colset passed
+** as the second argument before returning.
+*/
+static Fts5Colset *sqlite3Fts5ParseColsetInvert(Fts5Parse *pParse, Fts5Colset *p){
+ Fts5Colset *pRet;
+ int nCol = pParse->pConfig->nCol;
+
+ pRet = (Fts5Colset*)sqlite3Fts5MallocZero(&pParse->rc,
+ sizeof(Fts5Colset) + sizeof(int)*nCol
+ );
+ if( pRet ){
+ int i;
+ int iOld = 0;
+ for(i=0; i<nCol; i++){
+ if( iOld>=p->nCol || p->aiCol[iOld]!=i ){
+ pRet->aiCol[pRet->nCol++] = i;
+ }else{
+ iOld++;
+ }
+ }
+ }
+
+ sqlite3_free(p);
+ return pRet;
+}
+
static Fts5Colset *sqlite3Fts5ParseColset(
Fts5Parse *pParse, /* Store SQLITE_NOMEM here if required */
Fts5Colset *pColset, /* Existing colset object */
@@ -185627,7 +187222,6 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){
return pRet;
}
-
/*
** Release a reference to data record returned by an earlier call to
** fts5DataRead().
@@ -185636,6 +187230,18 @@ static void fts5DataRelease(Fts5Data *pData){
sqlite3_free(pData);
}
+static Fts5Data *fts5LeafRead(Fts5Index *p, i64 iRowid){
+ Fts5Data *pRet = fts5DataRead(p, iRowid);
+ if( pRet ){
+ if( pRet->szLeaf>pRet->nn ){
+ p->rc = FTS5_CORRUPT;
+ fts5DataRelease(pRet);
+ pRet = 0;
+ }
+ }
+ return pRet;
+}
+
static int fts5IndexPrepareStmt(
Fts5Index *p,
sqlite3_stmt **ppStmt,
@@ -186444,7 +188050,7 @@ static void fts5SegIterNextPage(
pIter->pLeaf = pIter->pNextLeaf;
pIter->pNextLeaf = 0;
}else if( pIter->iLeafPgno<=pSeg->pgnoLast ){
- pIter->pLeaf = fts5DataRead(p,
+ pIter->pLeaf = fts5LeafRead(p,
FTS5_SEGMENT_ROWID(pSeg->iSegid, pIter->iLeafPgno)
);
}else{
@@ -186947,9 +188553,8 @@ static void fts5SegIterNext(
if( pLeaf->nn>pLeaf->szLeaf ){
pIter->iPgidxOff = pLeaf->szLeaf + fts5GetVarint32(
&pLeaf->p[pLeaf->szLeaf], pIter->iEndofDoclist
- );
+ );
}
-
}
else if( pLeaf->nn>pLeaf->szLeaf ){
pIter->iPgidxOff = pLeaf->szLeaf + fts5GetVarint32(
@@ -187194,6 +188799,11 @@ static void fts5LeafSeek(
iTermOff += nKeep;
iOff = iTermOff;
+ if( iOff>=n ){
+ p->rc = FTS5_CORRUPT;
+ return;
+ }
+
/* Read the nKeep field of the next term. */
fts5FastGetVarint32(a, iOff, nKeep);
}
@@ -188121,6 +189731,15 @@ static void fts5IterSetOutputs_Nocolset(Fts5Iter *pIter, Fts5SegIter *pSeg){
}
/*
+** xSetOutputs callback used when the Fts5Colset object has nCol==0 (match
+** against no columns at all).
+*/
+static void fts5IterSetOutputs_ZeroColset(Fts5Iter *pIter, Fts5SegIter *pSeg){
+ UNUSED_PARAM(pSeg);
+ pIter->base.nData = 0;
+}
+
+/*
** xSetOutputs callback used by detail=col when there is a column filter
** and there are 100 or more columns. Also called as a fallback from
** fts5IterSetOutputs_Col100 if the column-list spans more than one page.
@@ -188225,6 +189844,10 @@ static void fts5IterSetOutputCb(int *pRc, Fts5Iter *pIter){
pIter->xSetOutputs = fts5IterSetOutputs_Nocolset;
}
+ else if( pIter->pColset->nCol==0 ){
+ pIter->xSetOutputs = fts5IterSetOutputs_ZeroColset;
+ }
+
else if( pConfig->eDetail==FTS5_DETAIL_FULL ){
pIter->xSetOutputs = fts5IterSetOutputs_Full;
}
@@ -194001,7 +195624,7 @@ static void fts5SourceIdFunc(
){
assert( nArg==0 );
UNUSED_PARAM2(nArg, apUnused);
- sqlite3_result_text(pCtx, "fts5: 2016-08-08 13:40:27 d5e98057028abcf7217d0d2b2e29bbbcdf09d6de", -1, SQLITE_TRANSIENT);
+ sqlite3_result_text(pCtx, "fts5: 2016-11-04 12:08:49 1136863c76576110e710dd5d69ab6bf347c65e36", -1, SQLITE_TRANSIENT);
}
static int fts5Init(sqlite3 *db){
@@ -194089,7 +195712,7 @@ static int fts5Init(sqlite3 *db){
#ifdef _WIN32
__declspec(dllexport)
#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_fts_init(
+SQLITE_API int sqlite3_fts_init(
sqlite3 *db,
char **pzErrMsg,
const sqlite3_api_routines *pApi
@@ -194102,7 +195725,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_fts_init(
#ifdef _WIN32
__declspec(dllexport)
#endif
-SQLITE_API int SQLITE_STDCALL sqlite3_fts5_init(
+SQLITE_API int sqlite3_fts5_init(
sqlite3 *db,
char **pzErrMsg,
const sqlite3_api_routines *pApi
@@ -197483,8 +199106,19 @@ static int fts5VocabBestIndexMethod(
}
}
- pInfo->idxNum = idxNum;
+ /* This virtual table always delivers results in ascending order of
+ ** the "term" column (column 0). So if the user has requested this
+ ** specifically - "ORDER BY term" or "ORDER BY term ASC" - set the
+ ** sqlite3_index_info.orderByConsumed flag to tell the core the results
+ ** are already in sorted order. */
+ if( pInfo->nOrderBy==1
+ && pInfo->aOrderBy[0].iColumn==0
+ && pInfo->aOrderBy[0].desc==0
+ ){
+ pInfo->orderByConsumed = 1;
+ }
+ pInfo->idxNum = idxNum;
return SQLITE_OK;
}
@@ -197847,9 +199481,9 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){
#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) */
-#else // USE_LIBSQLITE3
-// If users really want to link against the system sqlite3 we
-// need to make this file a noop.
-#endif
/************** End of fts5.c ************************************************/
+#else // USE_LIBSQLITE3
+ // If users really want to link against the system sqlite3 we
+// need to make this file a noop.
+ #endif \ No newline at end of file
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
index 430ffee..d900cdd 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
@@ -1,3 +1,4 @@
+#ifndef USE_LIBSQLITE3
/*
** 2001 September 15
**
@@ -30,7 +31,6 @@
** the version number) and changes its name to "sqlite3.h" as
** part of the build process.
*/
-#ifndef USE_LIBSQLITE3
#ifndef SQLITE3_H
#define SQLITE3_H
#include <stdarg.h> /* Needed for the definition of va_list */
@@ -109,7 +109,8 @@ extern "C" {
** be held constant and Z will be incremented or else Y will be incremented
** and Z will be reset to zero.
**
-** Since version 3.6.18, SQLite source code has been stored in the
+** Since [version 3.6.18] ([dateof:3.6.18]),
+** SQLite source code has been stored in the
** <a href="http://www.fossil-scm.org/">Fossil configuration management
** system</a>. ^The SQLITE_SOURCE_ID macro evaluates to
** a string which identifies a particular check-in of SQLite
@@ -121,9 +122,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.14.0"
-#define SQLITE_VERSION_NUMBER 3014000
-#define SQLITE_SOURCE_ID "2016-08-08 13:40:27 d5e98057028abcf7217d0d2b2e29bbbcdf09d6de"
+#define SQLITE_VERSION "3.15.1"
+#define SQLITE_VERSION_NUMBER 3015001
+#define SQLITE_SOURCE_ID "2016-11-04 12:08:49 1136863c76576110e710dd5d69ab6bf347c65e36"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -156,9 +157,9 @@ extern "C" {
** See also: [sqlite_version()] and [sqlite_source_id()].
*/
SQLITE_API SQLITE_EXTERN const char sqlite3_version[];
-SQLITE_API const char *SQLITE_STDCALL sqlite3_libversion(void);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sourceid(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void);
+SQLITE_API const char *sqlite3_libversion(void);
+SQLITE_API const char *sqlite3_sourceid(void);
+SQLITE_API int sqlite3_libversion_number(void);
/*
** CAPI3REF: Run-Time Library Compilation Options Diagnostics
@@ -183,8 +184,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void);
** [sqlite_compileoption_get()] and the [compile_options pragma].
*/
#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
+SQLITE_API int sqlite3_compileoption_used(const char *zOptName);
+SQLITE_API const char *sqlite3_compileoption_get(int N);
#endif
/*
@@ -223,7 +224,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
**
** See the [threading mode] documentation for additional information.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void);
+SQLITE_API int sqlite3_threadsafe(void);
/*
** CAPI3REF: Database Connection Handle
@@ -320,8 +321,8 @@ typedef sqlite_uint64 sqlite3_uint64;
** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer
** argument is a harmless no-op.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_close(sqlite3*);
-SQLITE_API int SQLITE_STDCALL sqlite3_close_v2(sqlite3*);
+SQLITE_API int sqlite3_close(sqlite3*);
+SQLITE_API int sqlite3_close_v2(sqlite3*);
/*
** The type for a callback function.
@@ -392,7 +393,7 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**);
** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
** </ul>
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_exec(
+SQLITE_API int sqlite3_exec(
sqlite3*, /* An open database */
const char *sql, /* SQL to be evaluated */
int (*callback)(void*,int,char**,char**), /* Callback function */
@@ -453,7 +454,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec(
** [result codes]. However, experience has shown that many of
** these result codes are too coarse-grained. They do not provide as
** much information about problems as programmers might like. In an effort to
-** address this, newer versions of SQLite (version 3.3.8 and later) include
+** address this, newer versions of SQLite (version 3.3.8 [dateof:3.3.8]
+** and later) include
** support for additional result codes that provide more detailed information
** about errors. These [extended result codes] are enabled or disabled
** on a per database connection basis using the
@@ -977,6 +979,12 @@ struct sqlite3_io_methods {
** on whether or not the file has been renamed, moved, or deleted since it
** was first opened.
**
+** <li>[[SQLITE_FCNTL_WIN32_GET_HANDLE]]
+** The [SQLITE_FCNTL_WIN32_GET_HANDLE] opcode can be used to obtain the
+** underlying native file handle associated with a file handle. This file
+** control interprets its argument as a pointer to a native file handle and
+** writes the resulting value there.
+**
** <li>[[SQLITE_FCNTL_WIN32_SET_HANDLE]]
** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This
** opcode causes the xFileControl method to swap the file handle with the one
@@ -1027,6 +1035,7 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_RBU 26
#define SQLITE_FCNTL_VFS_POINTER 27
#define SQLITE_FCNTL_JOURNAL_POINTER 28
+#define SQLITE_FCNTL_WIN32_GET_HANDLE 29
/* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
@@ -1391,10 +1400,10 @@ struct sqlite3_vfs {
** must return [SQLITE_OK] on success and some other [error code] upon
** failure.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
+SQLITE_API int sqlite3_initialize(void);
+SQLITE_API int sqlite3_shutdown(void);
+SQLITE_API int sqlite3_os_init(void);
+SQLITE_API int sqlite3_os_end(void);
/*
** CAPI3REF: Configuring The SQLite Library
@@ -1427,7 +1436,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
** ^If the option is unknown or SQLite is unable to set the option
** then this routine returns a non-zero [error code].
*/
-SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...);
+SQLITE_API int sqlite3_config(int, ...);
/*
** CAPI3REF: Configure database connections
@@ -1446,7 +1455,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...);
** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if
** the call is considered successful.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3*, int op, ...);
+SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...);
/*
** CAPI3REF: Memory Allocation Routines
@@ -1970,8 +1979,18 @@ struct sqlite3_mem_methods {
** be a NULL pointer, in which case the new setting is not reported back.
** </dd>
**
+** <dt>SQLITE_DBCONFIG_MAINDBNAME</dt>
+** <dd> ^This option is used to change the name of the "main" database
+** schema. ^The sole argument is a pointer to a constant UTF8 string
+** which will become the new schema name in place of "main". ^SQLite
+** does not make a copy of the new main schema name string, so the application
+** must ensure that the argument passed into this DBCONFIG option is unchanged
+** until after the database connection closes.
+** </dd>
+**
** </dl>
*/
+#define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */
#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */
#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */
#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */
@@ -1987,7 +2006,7 @@ struct sqlite3_mem_methods {
** [extended result codes] feature of SQLite. ^The extended result
** codes are disabled by default for historical compatibility.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff);
+SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff);
/*
** CAPI3REF: Last Insert Rowid
@@ -2039,7 +2058,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff)
** unpredictable and might not equal either the old or the new
** last insert [rowid].
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*);
+SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*);
/*
** CAPI3REF: Count The Number Of Rows Modified
@@ -2092,7 +2111,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*);
** while [sqlite3_changes()] is running then the value returned
** is unpredictable and not meaningful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*);
+SQLITE_API int sqlite3_changes(sqlite3*);
/*
** CAPI3REF: Total Number Of Rows Modified
@@ -2116,7 +2135,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*);
** while [sqlite3_total_changes()] is running then the value
** returned is unpredictable and not meaningful.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*);
+SQLITE_API int sqlite3_total_changes(sqlite3*);
/*
** CAPI3REF: Interrupt A Long-Running Query
@@ -2156,7 +2175,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*);
** If the database connection closes while [sqlite3_interrupt()]
** is running then bad things will likely happen.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*);
+SQLITE_API void sqlite3_interrupt(sqlite3*);
/*
** CAPI3REF: Determine If An SQL Statement Is Complete
@@ -2191,8 +2210,8 @@ SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*);
** The input to [sqlite3_complete16()] must be a zero-terminated
** UTF-16 string in native byte order.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *sql);
-SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql);
+SQLITE_API int sqlite3_complete(const char *sql);
+SQLITE_API int sqlite3_complete16(const void *sql);
/*
** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors
@@ -2253,7 +2272,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql);
** A busy handler must not close the database connection
** or [prepared statement] that invoked the busy handler.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*);
+SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*);
/*
** CAPI3REF: Set A Busy Timeout
@@ -2276,7 +2295,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*,int(*)(void*,int),vo
**
** See also: [PRAGMA busy_timeout]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms);
+SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms);
/*
** CAPI3REF: Convenience Routines For Running Queries
@@ -2351,7 +2370,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms);
** reflected in subsequent calls to [sqlite3_errcode()] or
** [sqlite3_errmsg()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
+SQLITE_API int sqlite3_get_table(
sqlite3 *db, /* An open database */
const char *zSql, /* SQL to be evaluated */
char ***pazResult, /* Results of the query */
@@ -2359,7 +2378,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
int *pnColumn, /* Number of result columns written here */
char **pzErrmsg /* Error msg written here */
);
-SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
+SQLITE_API void sqlite3_free_table(char **result);
/*
** CAPI3REF: Formatted String Printing Functions
@@ -2465,10 +2484,10 @@ SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
** addition that after the string has been read and copied into
** the result, [sqlite3_free()] is called on the input string.)^
*/
-SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char*,...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char*, va_list);
-SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int,char*,const char*, ...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list);
+SQLITE_API char *sqlite3_mprintf(const char*,...);
+SQLITE_API char *sqlite3_vmprintf(const char*, va_list);
+SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...);
+SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list);
/*
** CAPI3REF: Memory Allocation Subsystem
@@ -2558,12 +2577,12 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list
** a block of memory after it has been released using
** [sqlite3_free()] or [sqlite3_realloc()].
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void*, int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void*, sqlite3_uint64);
-SQLITE_API void SQLITE_STDCALL sqlite3_free(void*);
-SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*);
+SQLITE_API void *sqlite3_malloc(int);
+SQLITE_API void *sqlite3_malloc64(sqlite3_uint64);
+SQLITE_API void *sqlite3_realloc(void*, int);
+SQLITE_API void *sqlite3_realloc64(void*, sqlite3_uint64);
+SQLITE_API void sqlite3_free(void*);
+SQLITE_API sqlite3_uint64 sqlite3_msize(void*);
/*
** CAPI3REF: Memory Allocator Statistics
@@ -2588,8 +2607,8 @@ SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*);
** by [sqlite3_memory_highwater(1)] is the high-water mark
** prior to the reset.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag);
+SQLITE_API sqlite3_int64 sqlite3_memory_used(void);
+SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag);
/*
** CAPI3REF: Pseudo-Random Number Generator
@@ -2612,7 +2631,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag);
** internally and without recourse to the [sqlite3_vfs] xRandomness
** method.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P);
+SQLITE_API void sqlite3_randomness(int N, void *P);
/*
** CAPI3REF: Compile-Time Authorization Callbacks
@@ -2695,7 +2714,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P);
** as stated in the previous paragraph, sqlite3_step() invokes
** sqlite3_prepare_v2() to reprepare a statement after a schema change.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
+SQLITE_API int sqlite3_set_authorizer(
sqlite3*,
int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
void *pUserData
@@ -2803,9 +2822,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
** sqlite3_profile() function is considered experimental and is
** subject to change in future versions of SQLite.
*/
-SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_trace(sqlite3*,
+SQLITE_API SQLITE_DEPRECATED void *sqlite3_trace(sqlite3*,
void(*xTrace)(void*,const char*), void*);
-SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_profile(sqlite3*,
+SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
/*
@@ -2894,7 +2913,7 @@ SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_profile(sqlite3*,
** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which
** are deprecated.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2(
+SQLITE_API int sqlite3_trace_v2(
sqlite3*,
unsigned uMask,
int(*xCallback)(unsigned,void*,void*,void*),
@@ -2933,7 +2952,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2(
** database connections for the meaning of "modify" in this paragraph.
**
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
+SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
/*
** CAPI3REF: Opening A New Database Connection
@@ -3162,15 +3181,15 @@ SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(vo
**
** See also: [sqlite3_temp_directory]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_open(
+SQLITE_API int sqlite3_open(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_open16(
+SQLITE_API int sqlite3_open16(
const void *filename, /* Database filename (UTF-16) */
sqlite3 **ppDb /* OUT: SQLite db handle */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
+SQLITE_API int sqlite3_open_v2(
const char *filename, /* Database filename (UTF-8) */
sqlite3 **ppDb, /* OUT: SQLite db handle */
int flags, /* Flags */
@@ -3216,9 +3235,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
** VFS method, then the behavior of this routine is undefined and probably
** undesirable.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilename, const char *zParam);
-SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
+SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam);
+SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
/*
@@ -3262,11 +3281,11 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const cha
** was invoked incorrectly by the application. In that case, the
** error code and message may or may not be set.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db);
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3*);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errstr(int);
+SQLITE_API int sqlite3_errcode(sqlite3 *db);
+SQLITE_API int sqlite3_extended_errcode(sqlite3 *db);
+SQLITE_API const char *sqlite3_errmsg(sqlite3*);
+SQLITE_API const void *sqlite3_errmsg16(sqlite3*);
+SQLITE_API const char *sqlite3_errstr(int);
/*
** CAPI3REF: Prepared Statement Object
@@ -3334,7 +3353,7 @@ typedef struct sqlite3_stmt sqlite3_stmt;
**
** New run-time limit categories may be added in future releases.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
+SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal);
/*
** CAPI3REF: Run-Time Limit Categories
@@ -3486,28 +3505,28 @@ SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
** </li>
** </ol>
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare(
+SQLITE_API int sqlite3_prepare(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare_v2(
+SQLITE_API int sqlite3_prepare_v2(
sqlite3 *db, /* Database handle */
const char *zSql, /* SQL statement, UTF-8 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const char **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16(
+SQLITE_API int sqlite3_prepare16(
sqlite3 *db, /* Database handle */
const void *zSql, /* SQL statement, UTF-16 encoded */
int nByte, /* Maximum length of zSql in bytes. */
sqlite3_stmt **ppStmt, /* OUT: Statement handle */
const void **pzTail /* OUT: Pointer to unused portion of zSql */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
+SQLITE_API int sqlite3_prepare16_v2(
sqlite3 *db, /* Database handle */
const void *zSql, /* SQL statement, UTF-16 encoded */
int nByte, /* Maximum length of zSql in bytes. */
@@ -3546,8 +3565,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
** is obtained from [sqlite3_malloc()] and must be free by the application
** by passing it to [sqlite3_free()].
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt);
-SQLITE_API char *SQLITE_STDCALL sqlite3_expanded_sql(sqlite3_stmt *pStmt);
+SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt);
+SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If An SQL Statement Writes The Database
@@ -3579,7 +3598,7 @@ SQLITE_API char *SQLITE_STDCALL sqlite3_expanded_sql(sqlite3_stmt *pStmt);
** change the configuration of a database connection, they do not make
** changes to the content of the database files on disk.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
@@ -3600,7 +3619,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
** for example, in diagnostic routines to search for prepared
** statements that are holding a transaction open.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt*);
+SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*);
/*
** CAPI3REF: Dynamically Typed Value Object
@@ -3764,20 +3783,20 @@ typedef struct sqlite3_context sqlite3_context;
** See also: [sqlite3_bind_parameter_count()],
** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64,
+SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
+SQLITE_API int sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64,
void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt*, int, double);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int(sqlite3_stmt*, int, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt*, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64,
+SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double);
+SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int);
+SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
+SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int);
+SQLITE_API int sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*));
+SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
+SQLITE_API int sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64,
void(*)(void*), unsigned char encoding);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64);
+SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
+SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
+SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64);
/*
** CAPI3REF: Number Of SQL Parameters
@@ -3798,7 +3817,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite
** [sqlite3_bind_parameter_name()], and
** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*);
+SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*);
/*
** CAPI3REF: Name Of A Host Parameter
@@ -3826,7 +3845,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*);
** [sqlite3_bind_parameter_count()], and
** [sqlite3_bind_parameter_index()].
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*, int);
+SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int);
/*
** CAPI3REF: Index Of A Parameter With A Given Name
@@ -3843,7 +3862,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*,
** [sqlite3_bind_parameter_count()], and
** [sqlite3_bind_parameter_name()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
+SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
/*
** CAPI3REF: Reset All Bindings On A Prepared Statement
@@ -3853,7 +3872,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const
** the [sqlite3_bind_blob | bindings] on a [prepared statement].
** ^Use this routine to reset all host parameters to NULL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*);
+SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*);
/*
** CAPI3REF: Number Of Columns In A Result Set
@@ -3865,7 +3884,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*);
**
** See also: [sqlite3_data_count()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Column Names In A Result Set
@@ -3894,8 +3913,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt);
** then the name of the column is unspecified and may change from
** one release of SQLite to the next.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_name(sqlite3_stmt*, int N);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N);
+SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N);
+SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N);
/*
** CAPI3REF: Source Of Data In A Query Result
@@ -3943,12 +3962,12 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N
** for the same [prepared statement] and result column
** at the same time then the results are undefined.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_database_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_table_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_origin_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int);
/*
** CAPI3REF: Declared Datatype Of A Query Result
@@ -3980,8 +3999,8 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*
** is associated with individual values, not with the containers
** used to hold those values.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_decltype(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,int);
+SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int);
+SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int);
/*
** CAPI3REF: Evaluate An SQL Statement
@@ -4042,7 +4061,8 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,in
** other than [SQLITE_ROW] before any subsequent invocation of
** sqlite3_step(). Failure to reset the prepared statement using
** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from
-** sqlite3_step(). But after version 3.6.23.1, sqlite3_step() began
+** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1],
+** sqlite3_step() began
** calling [sqlite3_reset()] automatically in this circumstance rather
** than returning [SQLITE_MISUSE]. This is not considered a compatibility
** break because any application that ever receives an SQLITE_MISUSE error
@@ -4061,7 +4081,7 @@ SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,in
** then the more specific [error codes] are returned directly
** by sqlite3_step(). The use of the "v2" interface is recommended.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*);
+SQLITE_API int sqlite3_step(sqlite3_stmt*);
/*
** CAPI3REF: Number of columns in a result set
@@ -4082,7 +4102,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*);
**
** See also: [sqlite3_column_count()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Fundamental Datatypes
@@ -4272,16 +4292,16 @@ SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
** pointer. Subsequent calls to [sqlite3_errcode()] will return
** [SQLITE_NOMEM].)^
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
-SQLITE_API double SQLITE_STDCALL sqlite3_column_double(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_int(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_column_int64(sqlite3_stmt*, int iCol);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_column_text(sqlite3_stmt*, int iCol);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_text16(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_type(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
+SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol);
+SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol);
+SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol);
+SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol);
+SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol);
/*
** CAPI3REF: Destroy A Prepared Statement Object
@@ -4309,7 +4329,7 @@ SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int
** statement after it has been finalized can result in undefined and
** undesirable behavior such as segfaults and heap corruption.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Reset A Prepared Statement Object
@@ -4336,7 +4356,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt);
** ^The [sqlite3_reset(S)] interface does not change the values
** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
+SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Create Or Redefine SQL Functions
@@ -4436,7 +4456,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
** close the database connection nor finalize or reset the prepared
** statement in which the function is running.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
+SQLITE_API int sqlite3_create_function(
sqlite3 *db,
const char *zFunctionName,
int nArg,
@@ -4446,7 +4466,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
+SQLITE_API int sqlite3_create_function16(
sqlite3 *db,
const void *zFunctionName,
int nArg,
@@ -4456,7 +4476,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
void (*xFinal)(sqlite3_context*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
+SQLITE_API int sqlite3_create_function_v2(
sqlite3 *db,
const char *zFunctionName,
int nArg,
@@ -4502,12 +4522,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
** these functions, we will not explain what they do.
*/
#ifndef SQLITE_OMIT_DEPRECATED
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_expired(sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_global_recover(void);
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_thread_cleanup(void);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
+SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void);
+SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
void*,sqlite3_int64);
#endif
@@ -4557,18 +4577,18 @@ SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(voi
** These routines must be called from the same thread as
** the SQL function that supplied the [sqlite3_value*] parameters.
*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes16(sqlite3_value*);
-SQLITE_API double SQLITE_STDCALL sqlite3_value_double(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_int(sqlite3_value*);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_value_int64(sqlite3_value*);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_blob(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes(sqlite3_value*);
+SQLITE_API int sqlite3_value_bytes16(sqlite3_value*);
+SQLITE_API double sqlite3_value_double(sqlite3_value*);
+SQLITE_API int sqlite3_value_int(sqlite3_value*);
+SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*);
+SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*);
+SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*);
+SQLITE_API int sqlite3_value_type(sqlite3_value*);
+SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
/*
** CAPI3REF: Finding The Subtype Of SQL Values
@@ -4584,7 +4604,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*);
** from the result of one [application-defined SQL function] into the
** input of another.
*/
-SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value*);
+SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
/*
** CAPI3REF: Copy And Free SQL Values
@@ -4600,8 +4620,8 @@ SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value*);
** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer
** then sqlite3_value_free(V) is a harmless no-op.
*/
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value*);
-SQLITE_API void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*);
+SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value*);
+SQLITE_API void sqlite3_value_free(sqlite3_value*);
/*
** CAPI3REF: Obtain Aggregate Function Context
@@ -4646,7 +4666,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*);
** This routine must be called from the same thread in which
** the aggregate SQL function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int nBytes);
+SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes);
/*
** CAPI3REF: User Data For Functions
@@ -4661,7 +4681,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int
** This routine must be called from the same thread in which
** the application-defined function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*);
+SQLITE_API void *sqlite3_user_data(sqlite3_context*);
/*
** CAPI3REF: Database Connection For Functions
@@ -4673,7 +4693,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*);
** and [sqlite3_create_function16()] routines that originally
** registered the application defined function.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*);
+SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
/*
** CAPI3REF: Function Auxiliary Data
@@ -4727,8 +4747,8 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*);
** These routines must be called from the same thread in which
** the SQL function is running.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context*, int N);
-SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
+SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
+SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
/*
@@ -4864,27 +4884,27 @@ typedef void (*sqlite3_destructor_type)(void*);
** than the one containing the application-defined function that received
** the [sqlite3_context] pointer, the results are undefined.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64(sqlite3_context*,const void*,
+SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_blob64(sqlite3_context*,const void*,
sqlite3_uint64,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_double(sqlite3_context*, double);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error(sqlite3_context*, const char*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error16(sqlite3_context*, const void*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_nomem(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_null(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64,
+SQLITE_API void sqlite3_result_double(sqlite3_context*, double);
+SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int);
+SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int);
+SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*);
+SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int(sqlite3_context*, int);
+SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
+SQLITE_API void sqlite3_result_null(sqlite3_context*);
+SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64,
void(*)(void*), unsigned char encoding);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context*, sqlite3_value*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context*, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
+SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
+SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
+SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*);
+SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n);
+SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
/*
@@ -4899,7 +4919,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite
** The number of subtype bytes preserved by SQLite might increase
** in future releases of SQLite.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned int);
+SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);
/*
** CAPI3REF: Define New Collating Sequences
@@ -4981,14 +5001,14 @@ SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned
**
** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation(
+SQLITE_API int sqlite3_create_collation(
sqlite3*,
const char *zName,
int eTextRep,
void *pArg,
int(*xCompare)(void*,int,const void*,int,const void*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
+SQLITE_API int sqlite3_create_collation_v2(
sqlite3*,
const char *zName,
int eTextRep,
@@ -4996,7 +5016,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
int(*xCompare)(void*,int,const void*,int,const void*),
void(*xDestroy)(void*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
+SQLITE_API int sqlite3_create_collation16(
sqlite3*,
const void *zName,
int eTextRep,
@@ -5031,12 +5051,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
** [sqlite3_create_collation()], [sqlite3_create_collation16()], or
** [sqlite3_create_collation_v2()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed(
+SQLITE_API int sqlite3_collation_needed(
sqlite3*,
void*,
void(*)(void*,sqlite3*,int eTextRep,const char*)
);
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
+SQLITE_API int sqlite3_collation_needed16(
sqlite3*,
void*,
void(*)(void*,sqlite3*,int eTextRep,const void*)
@@ -5050,11 +5070,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
** The code to implement this API is not available in the public release
** of SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_key(
+SQLITE_API int sqlite3_key(
sqlite3 *db, /* Database to be rekeyed */
const void *pKey, int nKey /* The key */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_key_v2(
+SQLITE_API int sqlite3_key_v2(
sqlite3 *db, /* Database to be rekeyed */
const char *zDbName, /* Name of the database */
const void *pKey, int nKey /* The key */
@@ -5068,11 +5088,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_key_v2(
** The code to implement this API is not available in the public release
** of SQLite.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey(
+SQLITE_API int sqlite3_rekey(
sqlite3 *db, /* Database to be rekeyed */
const void *pKey, int nKey /* The new key */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2(
+SQLITE_API int sqlite3_rekey_v2(
sqlite3 *db, /* Database to be rekeyed */
const char *zDbName, /* Name of the database */
const void *pKey, int nKey /* The new key */
@@ -5082,7 +5102,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2(
** Specify the activation key for a SEE database. Unless
** activated, none of the SEE routines will work.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_see(
+SQLITE_API void sqlite3_activate_see(
const char *zPassPhrase /* Activation phrase */
);
#endif
@@ -5092,7 +5112,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_activate_see(
** Specify the activation key for a CEROD database. Unless
** activated, none of the CEROD routines will work.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod(
+SQLITE_API void sqlite3_activate_cerod(
const char *zPassPhrase /* Activation phrase */
);
#endif
@@ -5114,7 +5134,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod(
** all, then the behavior of sqlite3_sleep() may deviate from the description
** in the previous paragraphs.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int);
+SQLITE_API int sqlite3_sleep(int);
/*
** CAPI3REF: Name Of The Folder Holding Temporary Files
@@ -5233,7 +5253,7 @@ SQLITE_API SQLITE_EXTERN char *sqlite3_data_directory;
** connection while this routine is running, then the return value
** is undefined.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*);
+SQLITE_API int sqlite3_get_autocommit(sqlite3*);
/*
** CAPI3REF: Find The Database Handle Of A Prepared Statement
@@ -5246,7 +5266,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*);
** to the [sqlite3_prepare_v2()] call (or its variants) that was used to
** create the statement in the first place.
*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*);
+SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
/*
** CAPI3REF: Return The Filename For A Database Connection
@@ -5263,7 +5283,7 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*);
** will be an absolute pathname, even if the filename used
** to open the database originally was a URI or relative pathname.
*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const char *zDbName);
+SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName);
/*
** CAPI3REF: Determine if a database is read-only
@@ -5273,7 +5293,7 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const cha
** of connection D is read-only, 0 if it is read/write, or -1 if N is not
** the name of a database on connection D.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
+SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
/*
** CAPI3REF: Find the next prepared statement
@@ -5289,7 +5309,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbNa
** [sqlite3_next_stmt(D,S)] must refer to an open database
** connection and in particular must not be a NULL pointer.
*/
-SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
+SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
/*
** CAPI3REF: Commit And Rollback Notification Callbacks
@@ -5338,8 +5358,8 @@ SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_
**
** See also the [sqlite3_update_hook()] interface.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
-SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
+SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
+SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
/*
** CAPI3REF: Data Change Notification Callbacks
@@ -5390,7 +5410,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *),
** See also the [sqlite3_commit_hook()], [sqlite3_rollback_hook()],
** and [sqlite3_preupdate_hook()] interfaces.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
+SQLITE_API void *sqlite3_update_hook(
sqlite3*,
void(*)(void *,int ,char const *,char const *,sqlite3_int64),
void*
@@ -5405,7 +5425,8 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
** and disabled if the argument is false.)^
**
** ^Cache sharing is enabled and disabled for an entire process.
-** This is a change as of SQLite version 3.5.0. In prior versions of SQLite,
+** This is a change as of SQLite [version 3.5.0] ([dateof:3.5.0]).
+** In prior versions of SQLite,
** sharing was enabled or disabled for each thread separately.
**
** ^(The cache sharing mode set by this interface effects all subsequent
@@ -5430,7 +5451,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
**
** See Also: [SQLite Shared-Cache Mode]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int);
+SQLITE_API int sqlite3_enable_shared_cache(int);
/*
** CAPI3REF: Attempt To Free Heap Memory
@@ -5446,7 +5467,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int);
**
** See also: [sqlite3_db_release_memory()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int);
+SQLITE_API int sqlite3_release_memory(int);
/*
** CAPI3REF: Free Memory Used By A Database Connection
@@ -5460,7 +5481,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int);
**
** See also: [sqlite3_release_memory()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
+SQLITE_API int sqlite3_db_release_memory(sqlite3*);
/*
** CAPI3REF: Impose A Limit On Heap Size
@@ -5499,7 +5520,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
** from the heap.
** </ul>)^
**
-** Beginning with SQLite version 3.7.3, the soft heap limit is enforced
+** Beginning with SQLite [version 3.7.3] ([dateof:3.7.3]),
+** the soft heap limit is enforced
** regardless of whether or not the [SQLITE_ENABLE_MEMORY_MANAGEMENT]
** compile-time option is invoked. With [SQLITE_ENABLE_MEMORY_MANAGEMENT],
** the soft heap limit is enforced on every memory allocation. Without
@@ -5512,7 +5534,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
** The circumstances under which SQLite will enforce the soft heap limit may
** changes in future releases of SQLite.
*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 N);
+SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N);
/*
** CAPI3REF: Deprecated Soft Heap Limit Interface
@@ -5523,7 +5545,7 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64
** only. All new applications should use the
** [sqlite3_soft_heap_limit64()] interface rather than this one.
*/
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
+SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N);
/*
@@ -5593,7 +5615,7 @@ SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
** parsed, if that has not already been done, and returns an error if
** any errors are encountered while loading the schema.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
+SQLITE_API int sqlite3_table_column_metadata(
sqlite3 *db, /* Connection handle */
const char *zDbName, /* Database name or NULL */
const char *zTableName, /* Table name */
@@ -5649,7 +5671,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
**
** See also the [load_extension() SQL function].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
+SQLITE_API int sqlite3_load_extension(
sqlite3 *db, /* Load the extension into this database connection */
const char *zFile, /* Name of the shared library containing extension */
const char *zProc, /* Entry point. Derived from zFile if 0 */
@@ -5681,7 +5703,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
** remains disabled. This will prevent SQL injections from giving attackers
** access to extension loading capabilities.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff);
+SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff);
/*
** CAPI3REF: Automatically Load Statically Linked Extensions
@@ -5719,7 +5741,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono
** See also: [sqlite3_reset_auto_extension()]
** and [sqlite3_cancel_auto_extension()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void(*xEntryPoint)(void));
+SQLITE_API int sqlite3_auto_extension(void(*xEntryPoint)(void));
/*
** CAPI3REF: Cancel Automatic Extension Loading
@@ -5731,7 +5753,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void(*xEntryPoint)(void));
** unregistered and it returns 0 if X was not on the list of initialization
** routines.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
+SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
/*
** CAPI3REF: Reset Automatic Extension Loading
@@ -5739,7 +5761,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void(*xEntryPoint)(v
** ^This interface disables all automatic extensions previously
** registered using [sqlite3_auto_extension()].
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void);
+SQLITE_API void sqlite3_reset_auto_extension(void);
/*
** The interface to the virtual-table mechanism is currently considered
@@ -5893,13 +5915,15 @@ struct sqlite3_module {
** the xUpdate method are automatically rolled back by SQLite.
**
** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
-** structure for SQLite version 3.8.2. If a virtual table extension is
+** structure for SQLite [version 3.8.2] ([dateof:3.8.2]).
+** If a virtual table extension is
** used with an SQLite version earlier than 3.8.2, the results of attempting
** to read or write the estimatedRows field are undefined (but are likely
** to included crashing the application). The estimatedRows field should
** therefore only be used if [sqlite3_libversion_number()] returns a
** value greater than or equal to 3008002. Similarly, the idxFlags field
-** was added for version 3.9.0. It may therefore only be used if
+** was added for [version 3.9.0] ([dateof:3.9.0]).
+** It may therefore only be used if
** sqlite3_libversion_number() returns a value greater than or equal to
** 3009000.
*/
@@ -5984,13 +6008,13 @@ struct sqlite3_index_info {
** interface is equivalent to sqlite3_create_module_v2() with a NULL
** destructor.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module(
+SQLITE_API int sqlite3_create_module(
sqlite3 *db, /* SQLite connection to register module with */
const char *zName, /* Name of the module */
const sqlite3_module *p, /* Methods for the module */
void *pClientData /* Client data for xCreate/xConnect */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2(
+SQLITE_API int sqlite3_create_module_v2(
sqlite3 *db, /* SQLite connection to register module with */
const char *zName, /* Name of the module */
const sqlite3_module *p, /* Methods for the module */
@@ -6053,7 +6077,7 @@ struct sqlite3_vtab_cursor {
** to declare the format (the names and datatypes of the columns) of
** the virtual tables they implement.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL);
+SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
/*
** CAPI3REF: Overload A Function For A Virtual Table
@@ -6072,7 +6096,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL);
** purpose is to be a placeholder function that can be overloaded
** by a [virtual table].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
+SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
/*
** The interface to the virtual-table mechanism defined above (back up
@@ -6171,7 +6195,7 @@ typedef struct sqlite3_blob sqlite3_blob;
** To avoid a resource leak, every open [BLOB handle] should eventually
** be released by a call to [sqlite3_blob_close()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
+SQLITE_API int sqlite3_blob_open(
sqlite3*,
const char *zDb,
const char *zTable,
@@ -6204,7 +6228,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
**
** ^This function sets the database handle error code and message.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
+SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
/*
** CAPI3REF: Close A BLOB Handle
@@ -6227,7 +6251,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64)
** is passed a valid open blob handle, the values returned by the
** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *);
+SQLITE_API int sqlite3_blob_close(sqlite3_blob *);
/*
** CAPI3REF: Return The Size Of An Open BLOB
@@ -6243,7 +6267,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *);
** been closed by [sqlite3_blob_close()]. Passing any other pointer in
** to this routine results in undefined and probably undesirable behavior.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *);
+SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *);
/*
** CAPI3REF: Read Data From A BLOB Incrementally
@@ -6272,7 +6296,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *);
**
** See also: [sqlite3_blob_write()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
+SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
/*
** CAPI3REF: Write Data Into A BLOB Incrementally
@@ -6314,7 +6338,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N,
**
** See also: [sqlite3_blob_read()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
+SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
/*
** CAPI3REF: Virtual File System Objects
@@ -6345,9 +6369,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z,
** ^(If the default VFS is unregistered, another VFS is chosen as
** the default. The choice for the new VFS is arbitrary.)^
*/
-SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfsName);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
+SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName);
+SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
+SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
/*
** CAPI3REF: Mutexes
@@ -6463,11 +6487,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
**
** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()].
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*);
+SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int);
+SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*);
+SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*);
/*
** CAPI3REF: Mutex Methods Object
@@ -6577,8 +6601,8 @@ struct sqlite3_mutex_methods {
** interface should also return 1 when given a NULL pointer.
*/
#ifndef NDEBUG
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*);
+SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*);
#endif
/*
@@ -6597,7 +6621,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */
#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */
#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */
-#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */
+#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_randomness() */
#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */
#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */
#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */
@@ -6618,7 +6642,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
** ^If the [threading mode] is Single-thread or Multi-thread then this
** routine returns a NULL pointer.
*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*);
+SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*);
/*
** CAPI3REF: Low-Level Control Of Database Files
@@ -6653,7 +6677,7 @@ SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*);
**
** See also: [SQLITE_FCNTL_LOCKSTATE]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
+SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
/*
** CAPI3REF: Testing Interface
@@ -6672,7 +6696,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName
** Unlike most of the SQLite API, this function is not guaranteed to
** operate consistently from one release to the next.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
+SQLITE_API int sqlite3_test_control(int op, ...);
/*
** CAPI3REF: Testing Interface Operation Codes
@@ -6701,6 +6725,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17
#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */
+#define SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD 19
#define SQLITE_TESTCTRL_NEVER_CORRUPT 20
#define SQLITE_TESTCTRL_VDBE_COVERAGE 21
#define SQLITE_TESTCTRL_BYTEORDER 22
@@ -6735,8 +6760,8 @@ SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
**
** See also: [sqlite3_db_status()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
-SQLITE_API int SQLITE_STDCALL sqlite3_status64(
+SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
+SQLITE_API int sqlite3_status64(
int op,
sqlite3_int64 *pCurrent,
sqlite3_int64 *pHighwater,
@@ -6861,7 +6886,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_status64(
**
** See also: [sqlite3_status()] and [sqlite3_stmt_status()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
+SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
/*
** CAPI3REF: Status Parameters for database connections
@@ -7004,7 +7029,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
**
** See also: [sqlite3_status()] and [sqlite3_db_status()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
+SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
/*
** CAPI3REF: Status Parameters for prepared statements
@@ -7473,16 +7498,16 @@ typedef struct sqlite3_backup sqlite3_backup;
** same time as another thread is invoking sqlite3_backup_step() it is
** possible that they return invalid values.
*/
-SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
+SQLITE_API sqlite3_backup *sqlite3_backup_init(
sqlite3 *pDest, /* Destination database handle */
const char *zDestName, /* Destination database name */
sqlite3 *pSource, /* Source database handle */
const char *zSourceName /* Source database name */
);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage);
+SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p);
+SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p);
/*
** CAPI3REF: Unlock Notification
@@ -7599,7 +7624,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p);
** the special "DROP TABLE/INDEX" case, the extended error code is just
** SQLITE_LOCKED.)^
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
+SQLITE_API int sqlite3_unlock_notify(
sqlite3 *pBlocked, /* Waiting connection */
void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */
void *pNotifyArg /* Argument to pass to xNotify */
@@ -7614,8 +7639,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
** strings in a case-independent fashion, using the same definition of "case
** independence" that SQLite uses internally when comparing identifiers.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *, const char *);
-SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);
+SQLITE_API int sqlite3_stricmp(const char *, const char *);
+SQLITE_API int sqlite3_strnicmp(const char *, const char *, int);
/*
** CAPI3REF: String Globbing
@@ -7632,7 +7657,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);
**
** See also: [sqlite3_strlike()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zStr);
+SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr);
/*
** CAPI3REF: String LIKE Matching
@@ -7655,7 +7680,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zSt
**
** See also: [sqlite3_strglob()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc);
+SQLITE_API int sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc);
/*
** CAPI3REF: Error Logging Interface
@@ -7678,7 +7703,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_strlike(const char *zGlob, const char *zSt
** a few hundred characters, it will be truncated to the length of the
** buffer.
*/
-SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...);
+SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...);
/*
** CAPI3REF: Write-Ahead Log Commit Hook
@@ -7714,7 +7739,7 @@ SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...)
** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will
** overwrite any prior [sqlite3_wal_hook()] settings.
*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
+SQLITE_API void *sqlite3_wal_hook(
sqlite3*,
int(*)(void *,sqlite3*,const char*,int),
void*
@@ -7749,7 +7774,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
** is only necessary if the default setting is found to be suboptimal
** for a particular application.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
+SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
/*
** CAPI3REF: Checkpoint a database
@@ -7771,7 +7796,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
** start a callback but which do not need the full power (and corresponding
** complication) of [sqlite3_wal_checkpoint_v2()].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
+SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
/*
** CAPI3REF: Checkpoint a database
@@ -7865,7 +7890,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zD
** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface
** from SQL.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
+SQLITE_API int sqlite3_wal_checkpoint_v2(
sqlite3 *db, /* Database handle */
const char *zDb, /* Name of attached database (or NULL) */
int eMode, /* SQLITE_CHECKPOINT_* value */
@@ -7901,7 +7926,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options
** may be added in the future.
*/
-SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...);
+SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
/*
** CAPI3REF: Virtual Table Configuration Options
@@ -7954,7 +7979,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...);
** of the SQL statement that triggered the call to the [xUpdate] method of the
** [virtual table].
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *);
+SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *);
/*
** CAPI3REF: Conflict resolution modes
@@ -8059,7 +8084,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *);
**
** See also: [sqlite3_stmt_scanstatus_reset()]
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
+SQLITE_API int sqlite3_stmt_scanstatus(
sqlite3_stmt *pStmt, /* Prepared statement for which info desired */
int idx, /* Index of loop to report on */
int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
@@ -8075,7 +8100,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
** This API is only available if the library is built with pre-processor
** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined.
*/
-SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
+SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
/*
** CAPI3REF: Flush caches to disk mid-transaction
@@ -8107,7 +8132,7 @@ SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
** ^This function does not set the database handle error code or message
** returned by the [sqlite3_errcode()] and [sqlite3_errmsg()] functions.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3*);
+SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
/*
** CAPI3REF: The pre-update hook.
@@ -8187,7 +8212,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3*);
**
** See also: [sqlite3_update_hook()]
*/
-SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_preupdate_hook(
+SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook(
sqlite3 *db,
void(*xPreUpdate)(
void *pCtx, /* Copy of third arg to preupdate_hook() */
@@ -8200,10 +8225,10 @@ SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_preupdate_hook(
),
void*
);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_count(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_depth(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_count(sqlite3 *);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_depth(sqlite3 *);
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
/*
** CAPI3REF: Low-level system error code
@@ -8215,7 +8240,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3
** called to get back the underlying "errno" that caused the problem, such
** as ENOSPC, EAUTH, EISDIR, and so forth.
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_system_errno(sqlite3*);
+SQLITE_API int sqlite3_system_errno(sqlite3*);
/*
** CAPI3REF: Database Snapshot
@@ -8265,7 +8290,7 @@ typedef struct sqlite3_snapshot sqlite3_snapshot;
** The [sqlite3_snapshot_get()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_get(
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get(
sqlite3 *db,
const char *zSchema,
sqlite3_snapshot **ppSnapshot
@@ -8303,7 +8328,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_get(
** The [sqlite3_snapshot_open()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_open(
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open(
sqlite3 *db,
const char *zSchema,
sqlite3_snapshot *pSnapshot
@@ -8320,7 +8345,7 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_open(
** The [sqlite3_snapshot_free()] interface is only available when the
** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
*/
-SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3_snapshot*);
+SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*);
/*
** CAPI3REF: Compare the ages of two snapshot handles.
@@ -8344,7 +8369,7 @@ SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3
** snapshot than P2, zero if the two handles refer to the same database
** snapshot, and a positive value if P1 is a newer snapshot than P2.
*/
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_cmp(
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
sqlite3_snapshot *p1,
sqlite3_snapshot *p2
);
@@ -8402,7 +8427,7 @@ typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info;
**
** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zGeom(... params ...)
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
+SQLITE_API int sqlite3_rtree_geometry_callback(
sqlite3 *db,
const char *zGeom,
int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*),
@@ -8428,7 +8453,7 @@ struct sqlite3_rtree_geometry {
**
** SELECT ... FROM <rtree> WHERE <rtree col> MATCH $zQueryFunc(... params ...)
*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback(
+SQLITE_API int sqlite3_rtree_query_callback(
sqlite3 *db,
const char *zQueryFunc,
int (*xQueryFunc)(sqlite3_rtree_query_info*),
@@ -8640,7 +8665,7 @@ int sqlite3session_attach(
** CAPI3REF: Set a table filter on a Session Object.
**
** The second argument (xFilter) is the "filter callback". For changes to rows
-** in tables that are not attached to the Session oject, the filter is called
+** in tables that are not attached to the Session object, the filter is called
** to determine whether changes to the table's rows should be tracked or not.
** If xFilter returns 0, changes is not tracked. Note that once a table is
** attached, xFilter will not be called again.
@@ -8906,7 +8931,7 @@ int sqlite3session_isempty(sqlite3_session *pSession);
** [sqlite3changeset_invert()] functions, all changes within the changeset
** that apply to a single table are grouped together. This means that when
** an application iterates through a changeset using an iterator created by
-** this function, all changes that relate to a single table are visted
+** this function, all changes that relate to a single table are visited
** consecutively. There is no chance that the iterator will visit a change
** the applies to table X, then one for table Y, and then later on visit
** another change for table X.
@@ -8993,7 +9018,7 @@ int sqlite3changeset_op(
** 0x01 if the corresponding column is part of the tables primary key, or
** 0x00 if it is not.
**
-** If argumet pnCol is not NULL, then *pnCol is set to the number of columns
+** If argument pnCol is not NULL, then *pnCol is set to the number of columns
** in the table.
**
** If this function is called when the iterator does not point to a valid
@@ -9210,12 +9235,12 @@ int sqlite3changeset_concat(
/*
-** Changegroup handle.
+** CAPI3REF: Changegroup Handle
*/
typedef struct sqlite3_changegroup sqlite3_changegroup;
/*
-** CAPI3REF: Combine two or more changesets into a single changeset.
+** CAPI3REF: Create A New Changegroup Object
**
** An sqlite3_changegroup object is used to combine two or more changesets
** (or patchsets) into a single changeset (or patchset). A single changegroup
@@ -9252,6 +9277,8 @@ typedef struct sqlite3_changegroup sqlite3_changegroup;
int sqlite3changegroup_new(sqlite3_changegroup **pp);
/*
+** CAPI3REF: Add A Changeset To A Changegroup
+**
** Add all changes within the changeset (or patchset) in buffer pData (size
** nData bytes) to the changegroup.
**
@@ -9266,7 +9293,7 @@ int sqlite3changegroup_new(sqlite3_changegroup **pp);
** apply to the same row as a change already present in the changegroup if
** the two rows have the same primary key.
**
-** Changes to rows that that do not already appear in the changegroup are
+** Changes to rows that do not already appear in the changegroup are
** simply copied into it. Or, if both the new changeset and the changegroup
** contain changes that apply to a single row, the final contents of the
** changegroup depends on the type of each change, as follows:
@@ -9327,6 +9354,8 @@ int sqlite3changegroup_new(sqlite3_changegroup **pp);
int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
/*
+** CAPI3REF: Obtain A Composite Changeset From A Changegroup
+**
** Obtain a buffer containing a changeset (or patchset) representing the
** current contents of the changegroup. If the inputs to the changegroup
** were themselves changesets, the output is a changeset. Or, if the
@@ -9355,7 +9384,7 @@ int sqlite3changegroup_output(
);
/*
-** Delete a changegroup object.
+** CAPI3REF: Delete A Changegroup Object
*/
void sqlite3changegroup_delete(sqlite3_changegroup*);
@@ -10339,9 +10368,9 @@ struct fts5_api {
#endif
#endif /* _FTS5_H */
-#else // USE_LIBSQLITE3
-// If users really want to link against the system sqlite3 we
-// need to make this file a noop.
-#endif
/******** End of fts5.h *********/
+#else // USE_LIBSQLITE3
+ // If users really want to link against the system sqlite3 we
+// need to make this file a noop.
+ #endif \ No newline at end of file
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go
index de9d7d2..64933f1 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go
@@ -114,12 +114,14 @@ import (
"strings"
"time"
"unsafe"
+
+ "golang.org/x/net/context"
)
-// Timestamp formats understood by both this module and SQLite.
-// The first format in the slice will be used when saving time values
-// into the database. When parsing a string from a timestamp or
-// datetime column, the formats are tried in order.
+// SQLiteTimestampFormats is timestamp formats understood by both this module
+// and SQLite. The first format in the slice will be used when saving time
+// values into the database. When parsing a string from a timestamp or datetime
+// column, the formats are tried in order.
var SQLiteTimestampFormats = []string{
// By default, store timestamps with whatever timezone they come with.
// When parsed, they will be returned with the same timezone.
@@ -139,20 +141,20 @@ func init() {
}
// Version returns SQLite library version information.
-func Version() (libVersion string, libVersionNumber int, sourceId string) {
+func Version() (libVersion string, libVersionNumber int, sourceID string) {
libVersion = C.GoString(C.sqlite3_libversion())
libVersionNumber = int(C.sqlite3_libversion_number())
- sourceId = C.GoString(C.sqlite3_sourceid())
- return libVersion, libVersionNumber, sourceId
+ sourceID = C.GoString(C.sqlite3_sourceid())
+ return libVersion, libVersionNumber, sourceID
}
-// Driver struct.
+// SQLiteDriver implement sql.Driver.
type SQLiteDriver struct {
Extensions []string
ConnectHook func(*SQLiteConn) error
}
-// Conn struct.
+// SQLiteConn implement sql.Conn.
type SQLiteConn struct {
db *C.sqlite3
loc *time.Location
@@ -161,35 +163,34 @@ type SQLiteConn struct {
aggregators []*aggInfo
}
-// Tx struct.
+// SQLiteTx implemen sql.Tx.
type SQLiteTx struct {
c *SQLiteConn
}
-// Stmt struct.
+// SQLiteStmt implement sql.Stmt.
type SQLiteStmt struct {
c *SQLiteConn
s *C.sqlite3_stmt
- nv int
- nn []string
t string
closed bool
cls bool
}
-// Result struct.
+// SQLiteResult implement sql.Result.
type SQLiteResult struct {
id int64
changes int64
}
-// Rows struct.
+// SQLiteRows implement sql.Rows.
type SQLiteRows struct {
s *SQLiteStmt
nc int
cols []string
decltype []string
cls bool
+ done chan struct{}
}
type functionInfo struct {
@@ -295,19 +296,19 @@ func (ai *aggInfo) Done(ctx *C.sqlite3_context) {
// Commit transaction.
func (tx *SQLiteTx) Commit() error {
- _, err := tx.c.exec("COMMIT")
+ _, err := tx.c.exec(context.Background(), "COMMIT", nil)
if err != nil && err.(Error).Code == C.SQLITE_BUSY {
// sqlite3 will leave the transaction open in this scenario.
// However, database/sql considers the transaction complete once we
// return from Commit() - we must clean up to honour its semantics.
- tx.c.exec("ROLLBACK")
+ tx.c.exec(context.Background(), "ROLLBACK", nil)
}
return err
}
// Rollback transaction.
func (tx *SQLiteTx) Rollback() error {
- _, err := tx.c.exec("ROLLBACK")
+ _, err := tx.c.exec(context.Background(), "ROLLBACK", nil)
return err
}
@@ -381,13 +382,17 @@ func (c *SQLiteConn) RegisterFunc(name string, impl interface{}, pure bool) erro
if pure {
opts |= C.SQLITE_DETERMINISTIC
}
- rv := C._sqlite3_create_function(c.db, cname, C.int(numArgs), C.int(opts), C.uintptr_t(newHandle(c, &fi)), (*[0]byte)(unsafe.Pointer(C.callbackTrampoline)), nil, nil)
+ rv := sqlite3_create_function(c.db, cname, C.int(numArgs), C.int(opts), newHandle(c, &fi), C.callbackTrampoline, nil, nil)
if rv != C.SQLITE_OK {
return c.lastError()
}
return nil
}
+func sqlite3_create_function(db *C.sqlite3, zFunctionName *C.char, nArg C.int, eTextRep C.int, pApp uintptr, xFunc unsafe.Pointer, xStep unsafe.Pointer, xFinal unsafe.Pointer) C.int {
+ return C._sqlite3_create_function(db, zFunctionName, nArg, eTextRep, C.uintptr_t(pApp), (*[0]byte)(unsafe.Pointer(xFunc)), (*[0]byte)(unsafe.Pointer(xStep)), (*[0]byte)(unsafe.Pointer(xFinal)))
+}
+
// AutoCommit return which currently auto commit or not.
func (c *SQLiteConn) AutoCommit() bool {
return int(C.sqlite3_get_autocommit(c.db)) != 0
@@ -401,14 +406,22 @@ func (c *SQLiteConn) lastError() Error {
}
}
-// Implements Execer
+// Exec implements Execer.
func (c *SQLiteConn) Exec(query string, args []driver.Value) (driver.Result, error) {
- if len(args) == 0 {
- return c.exec(query)
+ list := make([]namedValue, len(args))
+ for i, v := range args {
+ list[i] = namedValue{
+ Ordinal: i + 1,
+ Value: v,
+ }
}
+ return c.exec(context.Background(), query, list)
+}
+func (c *SQLiteConn) exec(ctx context.Context, query string, args []namedValue) (driver.Result, error) {
+ start := 0
for {
- s, err := c.Prepare(query)
+ s, err := c.prepare(ctx, query)
if err != nil {
return nil, err
}
@@ -418,12 +431,16 @@ func (c *SQLiteConn) Exec(query string, args []driver.Value) (driver.Result, err
if len(args) < na {
return nil, fmt.Errorf("Not enough args to execute query. Expected %d, got %d.", na, len(args))
}
- res, err = s.Exec(args[:na])
+ for i := 0; i < na; i++ {
+ args[i].Ordinal -= start
+ }
+ res, err = s.(*SQLiteStmt).exec(ctx, args[:na])
if err != nil && err != driver.ErrSkip {
s.Close()
return nil, err
}
args = args[na:]
+ start += na
}
tail := s.(*SQLiteStmt).t
s.Close()
@@ -434,10 +451,28 @@ func (c *SQLiteConn) Exec(query string, args []driver.Value) (driver.Result, err
}
}
-// Implements Queryer
+type namedValue struct {
+ Name string
+ Ordinal int
+ Value driver.Value
+}
+
+// Query implements Queryer.
func (c *SQLiteConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ list := make([]namedValue, len(args))
+ for i, v := range args {
+ list[i] = namedValue{
+ Ordinal: i + 1,
+ Value: v,
+ }
+ }
+ return c.query(context.Background(), query, list)
+}
+
+func (c *SQLiteConn) query(ctx context.Context, query string, args []namedValue) (driver.Rows, error) {
+ start := 0
for {
- s, err := c.Prepare(query)
+ s, err := c.prepare(ctx, query)
if err != nil {
return nil, err
}
@@ -446,12 +481,16 @@ func (c *SQLiteConn) Query(query string, args []driver.Value) (driver.Rows, erro
if len(args) < na {
return nil, fmt.Errorf("Not enough args to execute query. Expected %d, got %d.", na, len(args))
}
- rows, err := s.Query(args[:na])
+ for i := 0; i < na; i++ {
+ args[i].Ordinal -= start
+ }
+ rows, err := s.(*SQLiteStmt).query(ctx, args[:na])
if err != nil && err != driver.ErrSkip {
s.Close()
- return nil, err
+ return rows, err
}
args = args[na:]
+ start += na
tail := s.(*SQLiteStmt).t
if tail == "" {
return rows, nil
@@ -462,21 +501,13 @@ func (c *SQLiteConn) Query(query string, args []driver.Value) (driver.Rows, erro
}
}
-func (c *SQLiteConn) exec(cmd string) (driver.Result, error) {
- pcmd := C.CString(cmd)
- defer C.free(unsafe.Pointer(pcmd))
-
- var rowid, changes C.longlong
- rv := C._sqlite3_exec(c.db, pcmd, &rowid, &changes)
- if rv != C.SQLITE_OK {
- return nil, c.lastError()
- }
- return &SQLiteResult{int64(rowid), int64(changes)}, nil
-}
-
// Begin transaction.
func (c *SQLiteConn) Begin() (driver.Tx, error) {
- if _, err := c.exec(c.txlock); err != nil {
+ return c.begin(context.Background())
+}
+
+func (c *SQLiteConn) begin(ctx context.Context) (driver.Tx, error) {
+ if _, err := c.exec(ctx, c.txlock, nil); err != nil {
return nil, err
}
return &SQLiteTx{c}, nil
@@ -507,7 +538,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
var loc *time.Location
txlock := "BEGIN"
- busy_timeout := 5000
+ busyTimeout := 5000
pos := strings.IndexRune(dsn, '?')
if pos >= 1 {
params, err := url.ParseQuery(dsn[pos+1:])
@@ -533,7 +564,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
if err != nil {
return nil, fmt.Errorf("Invalid _busy_timeout: %v: %v", val, err)
}
- busy_timeout = int(iv)
+ busyTimeout = int(iv)
}
// _txlock
@@ -570,7 +601,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
return nil, errors.New("sqlite succeeded without returning a database")
}
- rv = C.sqlite3_busy_timeout(db, C.int(busy_timeout))
+ rv = C.sqlite3_busy_timeout(db, C.int(busyTimeout))
if rv != C.SQLITE_OK {
return nil, Error{Code: ErrNo(rv)}
}
@@ -606,6 +637,10 @@ func (c *SQLiteConn) Close() error {
// Prepare the query string. Return a new statement.
func (c *SQLiteConn) Prepare(query string) (driver.Stmt, error) {
+ return c.prepare(context.Background(), query)
+}
+
+func (c *SQLiteConn) prepare(ctx context.Context, query string) (driver.Stmt, error) {
pquery := C.CString(query)
defer C.free(unsafe.Pointer(pquery))
var s *C.sqlite3_stmt
@@ -618,15 +653,7 @@ func (c *SQLiteConn) Prepare(query string) (driver.Stmt, error) {
if tail != nil && *tail != '\000' {
t = strings.TrimSpace(C.GoString(tail))
}
- nv := int(C.sqlite3_bind_parameter_count(s))
- var nn []string
- for i := 0; i < nv; i++ {
- pn := C.GoString(C.sqlite3_bind_parameter_name(s, C.int(i+1)))
- if len(pn) > 1 && pn[0] == '$' && 48 <= pn[1] && pn[1] <= 57 {
- nn = append(nn, C.GoString(C.sqlite3_bind_parameter_name(s, C.int(i+1))))
- }
- }
- ss := &SQLiteStmt{c: c, s: s, nv: nv, nn: nn, t: t}
+ ss := &SQLiteStmt{c: c, s: s, t: t}
runtime.SetFinalizer(ss, (*SQLiteStmt).Close)
return ss, nil
}
@@ -648,9 +675,9 @@ func (s *SQLiteStmt) Close() error {
return nil
}
-// Return a number of parameters.
+// NumInput return a number of parameters.
func (s *SQLiteStmt) NumInput() int {
- return s.nv
+ return int(C.sqlite3_bind_parameter_count(s.s))
}
type bindArg struct {
@@ -658,31 +685,23 @@ type bindArg struct {
v driver.Value
}
-func (s *SQLiteStmt) bind(args []driver.Value) error {
+func (s *SQLiteStmt) bind(args []namedValue) error {
rv := C.sqlite3_reset(s.s)
if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE {
return s.c.lastError()
}
- var vargs []bindArg
- narg := len(args)
- vargs = make([]bindArg, narg)
- if len(s.nn) > 0 {
- for i, v := range s.nn {
- if pi, err := strconv.Atoi(v[1:]); err == nil {
- vargs[i] = bindArg{pi, args[i]}
- }
- }
- } else {
- for i, v := range args {
- vargs[i] = bindArg{i + 1, v}
+ for i, v := range args {
+ if v.Name != "" {
+ cname := C.CString(":" + v.Name)
+ args[i].Ordinal = int(C.sqlite3_bind_parameter_index(s.s, cname))
+ C.free(unsafe.Pointer(cname))
}
}
- for _, varg := range vargs {
- n := C.int(varg.n)
- v := varg.v
- switch v := v.(type) {
+ for _, arg := range args {
+ n := C.int(arg.Ordinal)
+ switch v := arg.Value.(type) {
case nil:
rv = C.sqlite3_bind_null(s.s, n)
case string:
@@ -722,29 +741,81 @@ func (s *SQLiteStmt) bind(args []driver.Value) error {
// Query the statement with arguments. Return records.
func (s *SQLiteStmt) Query(args []driver.Value) (driver.Rows, error) {
+ list := make([]namedValue, len(args))
+ for i, v := range args {
+ list[i] = namedValue{
+ Ordinal: i + 1,
+ Value: v,
+ }
+ }
+ return s.query(context.Background(), list)
+}
+
+func (s *SQLiteStmt) query(ctx context.Context, args []namedValue) (driver.Rows, error) {
if err := s.bind(args); err != nil {
return nil, err
}
- return &SQLiteRows{s, int(C.sqlite3_column_count(s.s)), nil, nil, s.cls}, nil
+
+ rows := &SQLiteRows{
+ s: s,
+ nc: int(C.sqlite3_column_count(s.s)),
+ cols: nil,
+ decltype: nil,
+ cls: s.cls,
+ done: make(chan struct{}),
+ }
+
+ go func() {
+ select {
+ case <-ctx.Done():
+ C.sqlite3_interrupt(s.c.db)
+ rows.Close()
+ case <-rows.done:
+ }
+ }()
+
+ return rows, nil
}
-// Return last inserted ID.
+// LastInsertId teturn last inserted ID.
func (r *SQLiteResult) LastInsertId() (int64, error) {
return r.id, nil
}
-// Return how many rows affected.
+// RowsAffected return how many rows affected.
func (r *SQLiteResult) RowsAffected() (int64, error) {
return r.changes, nil
}
-// Execute the statement with arguments. Return result object.
+// Exec execute the statement with arguments. Return result object.
func (s *SQLiteStmt) Exec(args []driver.Value) (driver.Result, error) {
+ list := make([]namedValue, len(args))
+ for i, v := range args {
+ list[i] = namedValue{
+ Ordinal: i + 1,
+ Value: v,
+ }
+ }
+ return s.exec(context.Background(), list)
+}
+
+func (s *SQLiteStmt) exec(ctx context.Context, args []namedValue) (driver.Result, error) {
if err := s.bind(args); err != nil {
C.sqlite3_reset(s.s)
C.sqlite3_clear_bindings(s.s)
return nil, err
}
+
+ done := make(chan struct{})
+ defer close(done)
+ go func() {
+ select {
+ case <-ctx.Done():
+ C.sqlite3_interrupt(s.c.db)
+ case <-done:
+ }
+ }()
+
var rowid, changes C.longlong
rv := C._sqlite3_step(s.s, &rowid, &changes)
if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE {
@@ -753,7 +824,8 @@ func (s *SQLiteStmt) Exec(args []driver.Value) (driver.Result, error) {
C.sqlite3_clear_bindings(s.s)
return nil, err
}
- return &SQLiteResult{int64(rowid), int64(changes)}, nil
+
+ return &SQLiteResult{id: int64(rowid), changes: int64(changes)}, nil
}
// Close the rows.
@@ -761,6 +833,9 @@ func (rc *SQLiteRows) Close() error {
if rc.s.closed {
return nil
}
+ if rc.done != nil {
+ close(rc.done)
+ }
if rc.cls {
return rc.s.Close()
}
@@ -771,7 +846,7 @@ func (rc *SQLiteRows) Close() error {
return nil
}
-// Return column names.
+// Columns return column names.
func (rc *SQLiteRows) Columns() []string {
if rc.nc != len(rc.cols) {
rc.cols = make([]string, rc.nc)
@@ -782,7 +857,7 @@ func (rc *SQLiteRows) Columns() []string {
return rc.cols
}
-// Return column types.
+// DeclTypes return column types.
func (rc *SQLiteRows) DeclTypes() []string {
if rc.decltype == nil {
rc.decltype = make([]string, rc.nc)
@@ -793,7 +868,7 @@ func (rc *SQLiteRows) DeclTypes() []string {
return rc.decltype
}
-// Move cursor to next.
+// Next move cursor to next.
func (rc *SQLiteRows) Next(dest []driver.Value) error {
rv := C.sqlite3_step(rc.s.s)
if rv == C.SQLITE_DONE {
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go
new file mode 100644
index 0000000..f9e08e1
--- /dev/null
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go
@@ -0,0 +1,69 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// +build go1.8
+
+package sqlite3
+
+import (
+ "database/sql/driver"
+ "errors"
+
+ "context"
+)
+
+// Ping implement Pinger.
+func (c *SQLiteConn) Ping(ctx context.Context) error {
+ if c.db == nil {
+ return errors.New("Connection was closed")
+ }
+ return nil
+}
+
+// QueryContext implement QueryerContext.
+func (c *SQLiteConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ list := make([]namedValue, len(args))
+ for i, nv := range args {
+ list[i] = namedValue(nv)
+ }
+ return c.query(ctx, query, list)
+}
+
+// ExecContext implement ExecerContext.
+func (c *SQLiteConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ list := make([]namedValue, len(args))
+ for i, nv := range args {
+ list[i] = namedValue(nv)
+ }
+ return c.exec(ctx, query, list)
+}
+
+// PrepareContext implement ConnPrepareContext.
+func (c *SQLiteConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ return c.prepare(ctx, query)
+}
+
+// BeginTx implement ConnBeginTx.
+func (c *SQLiteConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ return c.begin(ctx)
+}
+
+// QueryContext implement QueryerContext.
+func (s *SQLiteStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ list := make([]namedValue, len(args))
+ for i, nv := range args {
+ list[i] = namedValue(nv)
+ }
+ return s.query(ctx, list)
+}
+
+// ExecContext implement ExecerContext.
+func (s *SQLiteStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ list := make([]namedValue, len(args))
+ for i, nv := range args {
+ list[i] = namedValue(nv)
+ }
+ return s.exec(ctx, list)
+}
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_icu.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_icu.go
index 4c5492b..e960626 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3_icu.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_icu.go
@@ -2,7 +2,7 @@
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
-// +build icu
+// +build icu
package sqlite3
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
index e6e0801..b5bccb1 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
@@ -42,6 +42,7 @@ func (c *SQLiteConn) loadExtensions(extensions []string) error {
return nil
}
+// LoadExtension load the sqlite3 extension.
func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
rv := C.sqlite3_enable_load_extension(c.db, 1)
if rv != C.SQLITE_OK {
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go
new file mode 100644
index 0000000..200d071
--- /dev/null
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_type.go
@@ -0,0 +1,57 @@
+package sqlite3
+
+/*
+#ifndef USE_LIBSQLITE3
+#include <sqlite3-binding.h>
+#else
+#include <sqlite3.h>
+#endif
+*/
+import "C"
+import (
+ "reflect"
+ "time"
+)
+
+// ColumnTypeDatabaseTypeName implement RowsColumnTypeDatabaseTypeName.
+func (rc *SQLiteRows) ColumnTypeDatabaseTypeName(i int) string {
+ return C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i)))
+}
+
+/*
+func (rc *SQLiteRows) ColumnTypeLength(index int) (length int64, ok bool) {
+ return 0, false
+}
+
+func (rc *SQLiteRows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
+ return 0, 0, false
+}
+*/
+
+// ColumnTypeNullable implement RowsColumnTypeNullable.
+func (rc *SQLiteRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return true, true
+}
+
+// ColumnTypeScanType implement RowsColumnTypeScanType.
+func (rc *SQLiteRows) ColumnTypeScanType(i int) reflect.Type {
+ switch C.sqlite3_column_type(rc.s.s, C.int(i)) {
+ case C.SQLITE_INTEGER:
+ switch C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i))) {
+ case "timestamp", "datetime", "date":
+ return reflect.TypeOf(time.Time{})
+ case "boolean":
+ return reflect.TypeOf(false)
+ }
+ return reflect.TypeOf(int64(0))
+ case C.SQLITE_FLOAT:
+ return reflect.TypeOf(float64(0))
+ case C.SQLITE_BLOB:
+ return reflect.SliceOf(reflect.TypeOf(byte(0)))
+ case C.SQLITE_NULL:
+ return reflect.TypeOf(nil)
+ case C.SQLITE_TEXT:
+ return reflect.TypeOf("")
+ }
+ return reflect.SliceOf(reflect.TypeOf(byte(0)))
+}
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h
index 0c28610..2eb064d 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h
@@ -13,7 +13,7 @@
** This header file defines the SQLite interface for use by
** shared libraries that want to be imported as extensions into
** an SQLite instance. Shared libraries that intend to be loaded
-** as extensions by SQLite should #include this file instead of
+** as extensions by SQLite should #include this file instead of
** sqlite3.h.
*/
#ifndef SQLITE3EXT_H
@@ -544,14 +544,14 @@ typedef int (*sqlite3_loadext_entry)(
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
- /* This case when the file really is being compiled as a loadable
+ /* This case when the file really is being compiled as a loadable
** extension */
# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;
# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v;
# define SQLITE_EXTENSION_INIT3 \
extern const sqlite3_api_routines *sqlite3_api;
#else
- /* This case when the file is being statically linked into the
+ /* This case when the file is being statically linked into the
** application */
# define SQLITE_EXTENSION_INIT1 /*no-op*/
# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */
@@ -562,4 +562,4 @@ typedef int (*sqlite3_loadext_entry)(
#else // USE_LIBSQLITE3
// If users really want to link against the system sqlite3 we
// need to make this file a noop.
- #endif
+ #endif \ No newline at end of file
diff --git a/vendor/github.com/mattn/go-sqlite3/tracecallback.go b/vendor/github.com/mattn/go-sqlite3/tracecallback.go
index bf222b5..93688d4 100644
--- a/vendor/github.com/mattn/go-sqlite3/tracecallback.go
+++ b/vendor/github.com/mattn/go-sqlite3/tracecallback.go
@@ -17,7 +17,7 @@ package sqlite3
void stepTrampoline(sqlite3_context*, int, sqlite3_value**);
void doneTrampoline(sqlite3_context*);
-void traceCallbackTrampoline(unsigned traceEventCode, void *ctx, void *p, void *x);
+int traceCallbackTrampoline(unsigned int traceEventCode, void *ctx, void *p, void *x);
*/
import "C"
@@ -76,7 +76,7 @@ type TraceUserCallback func(TraceInfo) int
type TraceConfig struct {
Callback TraceUserCallback
- EventMask uint
+ EventMask C.uint
WantExpandedSQL bool
}
@@ -102,13 +102,13 @@ func fillExpandedSQL(info *TraceInfo, db *C.sqlite3, pStmt unsafe.Pointer) {
//export traceCallbackTrampoline
func traceCallbackTrampoline(
- traceEventCode uint,
+ traceEventCode C.uint,
// Parameter named 'C' in SQLite docs = Context given at registration:
ctx unsafe.Pointer,
// Parameter named 'P' in SQLite docs (Primary event data?):
p unsafe.Pointer,
// Parameter named 'X' in SQLite docs (eXtra event data?):
- xValue unsafe.Pointer) int {
+ xValue unsafe.Pointer) C.int {
if ctx == nil {
panic(fmt.Sprintf("No context (ev 0x%x)", traceEventCode))
@@ -196,7 +196,7 @@ func traceCallbackTrampoline(
if traceConf.Callback != nil {
r = traceConf.Callback(info)
}
- return r
+ return C.int(r)
}
type traceMapEntry struct {
@@ -358,7 +358,7 @@ func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool
if pure {
opts |= C.SQLITE_DETERMINISTIC
}
- rv := C._sqlite3_create_function(c.db, cname, C.int(stepNArgs), C.int(opts), C.uintptr_t(newHandle(c, &ai)), nil, (*[0]byte)(unsafe.Pointer(C.stepTrampoline)), (*[0]byte)(unsafe.Pointer(C.doneTrampoline)))
+ rv := sqlite3_create_function(c.db, cname, C.int(stepNArgs), C.int(opts), newHandle(c, &ai), nil, C.stepTrampoline, C.doneTrampoline)
if rv != C.SQLITE_OK {
return c.lastError()
}
@@ -396,7 +396,7 @@ func (c *SQLiteConn) SetTrace(requested *TraceConfig) error {
// The callback trampoline function does cleanup on Close event,
// regardless of the presence or absence of the user callback.
// Therefore it needs the Close event to be selected:
- actualEventMask := reqCopy.EventMask | TraceClose
+ actualEventMask := uint(reqCopy.EventMask | TraceClose)
err := c.setSQLiteTrace(actualEventMask)
return err
}
diff --git a/vendor/github.com/mattn/go-sqlite3/tracecallback_noimpl.go b/vendor/github.com/mattn/go-sqlite3/tracecallback_noimpl.go
index c0a49e9..f270415 100644
--- a/vendor/github.com/mattn/go-sqlite3/tracecallback_noimpl.go
+++ b/vendor/github.com/mattn/go-sqlite3/tracecallback_noimpl.go
@@ -4,6 +4,7 @@ package sqlite3
import "errors"
+// RegisterAggregator register the aggregator.
func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool) error {
return errors.New("This feature is not implemented")
}
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
index 8996b02..47e1f9e 100644
--- a/vendor/github.com/mitchellh/go-homedir/homedir.go
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -87,8 +87,8 @@ func dirUnix() (string, error) {
cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
- // If "getent" is missing, ignore it
- if err == exec.ErrNotFound {
+ // If the error is ErrNotFound, we ignore it. Otherwise, return it.
+ if err != exec.ErrNotFound {
return "", err
}
} else {
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index b0ab89b..b0ab9a3 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -1,5 +1,5 @@
// The mapstructure package exposes functionality to convert an
-// abitrary map[string]interface{} into a native Go structure.
+// arbitrary map[string]interface{} into a native Go structure.
//
// The Go structure can be arbitrarily complex, containing slices,
// other structs, etc. and the decoder will properly decode nested
@@ -546,7 +546,12 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er
// into that. Then set the value of the pointer to this type.
valType := val.Type()
valElemType := valType.Elem()
- realVal := reflect.New(valElemType)
+
+ realVal:=val
+ if realVal.IsNil() || d.config.ZeroFields {
+ realVal = reflect.New(valElemType)
+ }
+
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
return err
}
@@ -562,20 +567,24 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
valElemType := valType.Elem()
sliceType := reflect.SliceOf(valElemType)
- // Check input type
- if dataValKind != reflect.Array && dataValKind != reflect.Slice {
- // Accept empty map instead of array/slice in weakly typed mode
- if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
- val.Set(reflect.MakeSlice(sliceType, 0, 0))
- return nil
- } else {
- return fmt.Errorf(
- "'%s': source data must be an array or slice, got %s", name, dataValKind)
+ valSlice:=val
+ if valSlice.IsNil() || d.config.ZeroFields {
+
+ // Check input type
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ // Accept empty map instead of array/slice in weakly typed mode
+ if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
+ val.Set(reflect.MakeSlice(sliceType, 0, 0))
+ return nil
+ } else {
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+ }
}
- }
- // Make a new slice to hold our result, same size as the original data.
- valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+ // Make a new slice to hold our result, same size as the original data.
+ valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+ }
// Accumulate any errors
errors := make([]string, 0)
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go
index b67664f..d62ca5f 100644
--- a/vendor/github.com/pelletier/go-toml/keysparsing.go
+++ b/vendor/github.com/pelletier/go-toml/keysparsing.go
@@ -4,6 +4,7 @@ package toml
import (
"bytes"
+ "errors"
"fmt"
"unicode"
)
@@ -47,7 +48,7 @@ func parseKey(key string) ([]string, error) {
} else {
if !wasInQuotes {
if buffer.Len() == 0 {
- return nil, fmt.Errorf("empty key group")
+ return nil, errors.New("empty table key")
}
groups = append(groups, buffer.String())
buffer.Reset()
@@ -67,23 +68,23 @@ func parseKey(key string) ([]string, error) {
return nil, fmt.Errorf("invalid bare character: %c", char)
}
if !inQuotes && expectDot {
- return nil, fmt.Errorf("what?")
+ return nil, errors.New("what?")
}
buffer.WriteRune(char)
expectDot = false
}
}
if inQuotes {
- return nil, fmt.Errorf("mismatched quotes")
+ return nil, errors.New("mismatched quotes")
}
if escapeNext {
- return nil, fmt.Errorf("unfinished escape sequence")
+ return nil, errors.New("unfinished escape sequence")
}
if buffer.Len() > 0 {
groups = append(groups, buffer.String())
}
if len(groups) == 0 {
- return nil, fmt.Errorf("empty key")
+ return nil, errors.New("empty key")
}
return groups, nil
}
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
index eb4d999..4b378d4 100644
--- a/vendor/github.com/pelletier/go-toml/lexer.go
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -129,7 +129,7 @@ func (l *tomlLexer) lexVoid() tomlLexStateFn {
next := l.peek()
switch next {
case '[':
- return l.lexKeyGroup
+ return l.lexTableKey
case '#':
return l.lexComment
case '=':
@@ -516,21 +516,21 @@ func (l *tomlLexer) lexString() tomlLexStateFn {
return l.lexRvalue
}
-func (l *tomlLexer) lexKeyGroup() tomlLexStateFn {
+func (l *tomlLexer) lexTableKey() tomlLexStateFn {
l.next()
if l.peek() == '[' {
- // token '[[' signifies an array of anonymous key groups
+ // token '[[' signifies an array of tables
l.next()
l.emit(tokenDoubleLeftBracket)
- return l.lexInsideKeyGroupArray
+ return l.lexInsideTableArrayKey
}
- // vanilla key group
+ // vanilla table key
l.emit(tokenLeftBracket)
- return l.lexInsideKeyGroup
+ return l.lexInsideTableKey
}
-func (l *tomlLexer) lexInsideKeyGroupArray() tomlLexStateFn {
+func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
case ']':
@@ -545,15 +545,15 @@ func (l *tomlLexer) lexInsideKeyGroupArray() tomlLexStateFn {
l.emit(tokenDoubleRightBracket)
return l.lexVoid
case '[':
- return l.errorf("group name cannot contain ']'")
+ return l.errorf("table array key cannot contain ']'")
default:
l.next()
}
}
- return l.errorf("unclosed key group array")
+ return l.errorf("unclosed table array key")
}
-func (l *tomlLexer) lexInsideKeyGroup() tomlLexStateFn {
+func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
case ']':
@@ -564,12 +564,12 @@ func (l *tomlLexer) lexInsideKeyGroup() tomlLexStateFn {
l.emit(tokenRightBracket)
return l.lexVoid
case '[':
- return l.errorf("group name cannot contain ']'")
+ return l.errorf("table key cannot contain ']'")
default:
l.next()
}
}
- return l.errorf("unclosed key group")
+ return l.errorf("unclosed table key")
}
func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
index 25932d7..20e90a3 100644
--- a/vendor/github.com/pelletier/go-toml/parser.go
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -3,6 +3,7 @@
package toml
import (
+ "errors"
"fmt"
"reflect"
"regexp"
@@ -15,8 +16,8 @@ type tomlParser struct {
flow chan token
tree *TomlTree
tokensBuffer []token
- currentGroup []string
- seenGroupKeys []string
+ currentTable []string
+ seenTableKeys []string
}
type tomlParserStateFn func() tomlParserStateFn
@@ -95,13 +96,13 @@ func (p *tomlParser) parseGroupArray() tomlParserStateFn {
startToken := p.getToken() // discard the [[
key := p.getToken()
if key.typ != tokenKeyGroupArray {
- p.raiseError(key, "unexpected token %s, was expecting a key group array", key)
+ p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
}
- // get or create group array element at the indicated part in the path
+ // get or create table array element at the indicated part in the path
keys, err := parseKey(key.val)
if err != nil {
- p.raiseError(key, "invalid group array key: %s", err)
+ p.raiseError(key, "invalid table array key: %s", err)
}
p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
destTree := p.tree.GetPath(keys)
@@ -111,32 +112,32 @@ func (p *tomlParser) parseGroupArray() tomlParserStateFn {
} else if target, ok := destTree.([]*TomlTree); ok && target != nil {
array = destTree.([]*TomlTree)
} else {
- p.raiseError(key, "key %s is already assigned and not of type group array", key)
+ p.raiseError(key, "key %s is already assigned and not of type table array", key)
}
- p.currentGroup = keys
+ p.currentTable = keys
- // add a new tree to the end of the group array
+ // add a new tree to the end of the table array
newTree := newTomlTree()
newTree.position = startToken.Position
array = append(array, newTree)
- p.tree.SetPath(p.currentGroup, array)
+ p.tree.SetPath(p.currentTable, array)
- // remove all keys that were children of this group array
+ // remove all keys that were children of this table array
prefix := key.val + "."
found := false
- for ii := 0; ii < len(p.seenGroupKeys); {
- groupKey := p.seenGroupKeys[ii]
- if strings.HasPrefix(groupKey, prefix) {
- p.seenGroupKeys = append(p.seenGroupKeys[:ii], p.seenGroupKeys[ii+1:]...)
+ for ii := 0; ii < len(p.seenTableKeys); {
+ tableKey := p.seenTableKeys[ii]
+ if strings.HasPrefix(tableKey, prefix) {
+ p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
} else {
- found = (groupKey == key.val)
+ found = (tableKey == key.val)
ii++
}
}
// keep this key name from use by other kinds of assignments
if !found {
- p.seenGroupKeys = append(p.seenGroupKeys, key.val)
+ p.seenTableKeys = append(p.seenTableKeys, key.val)
}
// move to next parser state
@@ -148,24 +149,24 @@ func (p *tomlParser) parseGroup() tomlParserStateFn {
startToken := p.getToken() // discard the [
key := p.getToken()
if key.typ != tokenKeyGroup {
- p.raiseError(key, "unexpected token %s, was expecting a key group", key)
+ p.raiseError(key, "unexpected token %s, was expecting a table key", key)
}
- for _, item := range p.seenGroupKeys {
+ for _, item := range p.seenTableKeys {
if item == key.val {
p.raiseError(key, "duplicated tables")
}
}
- p.seenGroupKeys = append(p.seenGroupKeys, key.val)
+ p.seenTableKeys = append(p.seenTableKeys, key.val)
keys, err := parseKey(key.val)
if err != nil {
- p.raiseError(key, "invalid group array key: %s", err)
+ p.raiseError(key, "invalid table array key: %s", err)
}
if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
p.raiseError(key, "%s", err)
}
p.assume(tokenRightBracket)
- p.currentGroup = keys
+ p.currentTable = keys
return p.parseStart
}
@@ -174,26 +175,26 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
p.assume(tokenEqual)
value := p.parseRvalue()
- var groupKey []string
- if len(p.currentGroup) > 0 {
- groupKey = p.currentGroup
+ var tableKey []string
+ if len(p.currentTable) > 0 {
+ tableKey = p.currentTable
} else {
- groupKey = []string{}
+ tableKey = []string{}
}
- // find the group to assign, looking out for arrays of groups
+ // find the table to assign, looking out for arrays of tables
var targetNode *TomlTree
- switch node := p.tree.GetPath(groupKey).(type) {
+ switch node := p.tree.GetPath(tableKey).(type) {
case []*TomlTree:
targetNode = node[len(node)-1]
case *TomlTree:
targetNode = node
default:
- p.raiseError(key, "Unknown group type for path: %s",
- strings.Join(groupKey, "."))
+ p.raiseError(key, "Unknown table type for path: %s",
+ strings.Join(tableKey, "."))
}
- // assign value to the found group
+ // assign value to the found table
keyVals, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "%s", err)
@@ -203,7 +204,7 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
}
keyVal := keyVals[0]
localKey := []string{keyVal}
- finalKey := append(groupKey, keyVal)
+ finalKey := append(tableKey, keyVal)
if targetNode.GetPath(localKey) != nil {
p.raiseError(key, "The following key was defined twice: %s",
strings.Join(finalKey, "."))
@@ -211,7 +212,7 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
var toInsert interface{}
switch value.(type) {
- case *TomlTree:
+ case *TomlTree, []*TomlTree:
toInsert = value
default:
toInsert = &tomlValue{value, key.Position}
@@ -224,7 +225,7 @@ var numberUnderscoreInvalidRegexp *regexp.Regexp
func cleanupNumberToken(value string) (string, error) {
if numberUnderscoreInvalidRegexp.MatchString(value) {
- return "", fmt.Errorf("invalid use of _ in number")
+ return "", errors.New("invalid use of _ in number")
}
cleanedVal := strings.Replace(value, "_", "", -1)
return cleanedVal, nil
@@ -380,8 +381,8 @@ func parseToml(flow chan token) *TomlTree {
flow: flow,
tree: result,
tokensBuffer: make([]token, 0),
- currentGroup: make([]string, 0),
- seenGroupKeys: make([]string, 0),
+ currentTable: make([]string, 0),
+ seenTableKeys: make([]string, 0),
}
parser.run()
return result
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_conversions.go b/vendor/github.com/pelletier/go-toml/tomltree_conversions.go
index bf9321b..db3da0d 100644
--- a/vendor/github.com/pelletier/go-toml/tomltree_conversions.go
+++ b/vendor/github.com/pelletier/go-toml/tomltree_conversions.go
@@ -79,11 +79,11 @@ func toTomlValue(item interface{}, indent int) string {
case time.Time:
return tab + value.Format(time.RFC3339)
case []interface{}:
- result := tab + "[\n"
+ values := []string{}
for _, item := range value {
- result += toTomlValue(item, indent+2) + ",\n"
+ values = append(values, toTomlValue(item, 0))
}
- return result + tab + "]"
+ return "[" + strings.Join(values, ",") + "]"
case nil:
return ""
default:
@@ -92,57 +92,66 @@ func toTomlValue(item interface{}, indent int) string {
}
// Recursive support function for ToString()
-// Outputs a tree, using the provided keyspace to prefix group names
+// Outputs a tree, using the provided keyspace to prefix table names
func (t *TomlTree) toToml(indent, keyspace string) string {
- result := ""
+ resultChunks := []string{}
for k, v := range t.values {
// figure out the keyspace
combinedKey := k
if keyspace != "" {
combinedKey = keyspace + "." + combinedKey
}
+ resultChunk := ""
// output based on type
switch node := v.(type) {
case []*TomlTree:
for _, item := range node {
if len(item.Keys()) > 0 {
- result += fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey)
+ resultChunk += fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey)
}
- result += item.toToml(indent+" ", combinedKey)
+ resultChunk += item.toToml(indent+" ", combinedKey)
}
+ resultChunks = append(resultChunks, resultChunk)
case *TomlTree:
if len(node.Keys()) > 0 {
- result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
+ resultChunk += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
}
- result += node.toToml(indent+" ", combinedKey)
+ resultChunk += node.toToml(indent+" ", combinedKey)
+ resultChunks = append(resultChunks, resultChunk)
case map[string]interface{}:
sub := TreeFromMap(node)
if len(sub.Keys()) > 0 {
- result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
+ resultChunk += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
}
- result += sub.toToml(indent+" ", combinedKey)
+ resultChunk += sub.toToml(indent+" ", combinedKey)
+ resultChunks = append(resultChunks, resultChunk)
case map[string]string:
sub := TreeFromMap(convertMapStringString(node))
if len(sub.Keys()) > 0 {
- result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
+ resultChunk += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
}
- result += sub.toToml(indent+" ", combinedKey)
+ resultChunk += sub.toToml(indent+" ", combinedKey)
+ resultChunks = append(resultChunks, resultChunk)
case map[interface{}]interface{}:
sub := TreeFromMap(convertMapInterfaceInterface(node))
if len(sub.Keys()) > 0 {
- result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
+ resultChunk += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
}
- result += sub.toToml(indent+" ", combinedKey)
+ resultChunk += sub.toToml(indent+" ", combinedKey)
+ resultChunks = append(resultChunks, resultChunk)
case *tomlValue:
- result += fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(node.value, 0))
+ resultChunk = fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(node.value, 0))
+ resultChunks = append([]string{resultChunk}, resultChunks...)
default:
- result += fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(v, 0))
+ resultChunk = fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(v, 0))
+ resultChunks = append([]string{resultChunk}, resultChunks...)
}
+
}
- return result
+ return strings.Join(resultChunks, "")
}
func convertMapStringString(in map[string]string) map[string]interface{} {
diff --git a/vendor/github.com/pkg/sftp/CONTRIBUTORS b/vendor/github.com/pkg/sftp/CONTRIBUTORS
deleted file mode 100644
index 7eff823..0000000
--- a/vendor/github.com/pkg/sftp/CONTRIBUTORS
+++ /dev/null
@@ -1,2 +0,0 @@
-Dave Cheney <dave@cheney.net>
-Saulius Gurklys <s4uliu5@gmail.com>
diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE
deleted file mode 100644
index b7b5392..0000000
--- a/vendor/github.com/pkg/sftp/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) 2013, Dave Cheney
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/sftp/README.md b/vendor/github.com/pkg/sftp/README.md
deleted file mode 100644
index e406b1e..0000000
--- a/vendor/github.com/pkg/sftp/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-sftp
-----
-
-The `sftp` package provides support for file system operations on remote ssh servers using the SFTP subsystem.
-
-[![UNIX Build Status](https://travis-ci.org/pkg/sftp.svg?branch=master)](https://travis-ci.org/pkg/sftp) [![GoDoc](http://godoc.org/github.com/pkg/sftp?status.svg)](http://godoc.org/github.com/pkg/sftp)
-
-usage and examples
-------------------
-
-See [godoc.org/github.com/pkg/sftp](http://godoc.org/github.com/pkg/sftp) for examples and usage.
-
-The basic operation of the package mirrors the facilities of the [os](http://golang.org/pkg/os) package.
-
-The Walker interface for directory traversal is heavily inspired by Keith Rarick's [fs](http://godoc.org/github.com/kr/fs) package.
-
-roadmap
--------
-
- * There is way too much duplication in the Client methods. If there was an unmarshal(interface{}) method this would reduce a heap of the duplication.
-
-contributing
-------------
-
-We welcome pull requests, bug fixes and issue reports.
-
-Before proposing a large change, first please discuss your change by raising an issue.
diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go
deleted file mode 100644
index 3e4c291..0000000
--- a/vendor/github.com/pkg/sftp/attrs.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package sftp
-
-// ssh_FXP_ATTRS support
-// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5
-
-import (
- "os"
- "syscall"
- "time"
-)
-
-const (
- ssh_FILEXFER_ATTR_SIZE = 0x00000001
- ssh_FILEXFER_ATTR_UIDGID = 0x00000002
- ssh_FILEXFER_ATTR_PERMISSIONS = 0x00000004
- ssh_FILEXFER_ATTR_ACMODTIME = 0x00000008
- ssh_FILEXFER_ATTR_EXTENDED = 0x80000000
-)
-
-// fileInfo is an artificial type designed to satisfy os.FileInfo.
-type fileInfo struct {
- name string
- size int64
- mode os.FileMode
- mtime time.Time
- sys interface{}
-}
-
-// Name returns the base name of the file.
-func (fi *fileInfo) Name() string { return fi.name }
-
-// Size returns the length in bytes for regular files; system-dependent for others.
-func (fi *fileInfo) Size() int64 { return fi.size }
-
-// Mode returns file mode bits.
-func (fi *fileInfo) Mode() os.FileMode { return fi.mode }
-
-// ModTime returns the last modification time of the file.
-func (fi *fileInfo) ModTime() time.Time { return fi.mtime }
-
-// IsDir returns true if the file is a directory.
-func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() }
-
-func (fi *fileInfo) Sys() interface{} { return fi.sys }
-
-// FileStat holds the original unmarshalled values from a call to READDIR or *STAT.
-// It is exported for the purposes of accessing the raw values via os.FileInfo.Sys()
-type FileStat struct {
- Size uint64
- Mode uint32
- Mtime uint32
- Atime uint32
- UID uint32
- GID uint32
- Extended []StatExtended
-}
-
-// StatExtended contains additional, extended information for a FileStat.
-type StatExtended struct {
- ExtType string
- ExtData string
-}
-
-func fileInfoFromStat(st *FileStat, name string) os.FileInfo {
- fs := &fileInfo{
- name: name,
- size: int64(st.Size),
- mode: toFileMode(st.Mode),
- mtime: time.Unix(int64(st.Mtime), 0),
- sys: st,
- }
- return fs
-}
-
-func fileStatFromInfo(fi os.FileInfo) (uint32, FileStat) {
- mtime := fi.ModTime().Unix()
- atime := mtime
- var flags uint32 = ssh_FILEXFER_ATTR_SIZE |
- ssh_FILEXFER_ATTR_PERMISSIONS |
- ssh_FILEXFER_ATTR_ACMODTIME
-
- fileStat := FileStat{
- Size: uint64(fi.Size()),
- Mode: fromFileMode(fi.Mode()),
- Mtime: uint32(mtime),
- Atime: uint32(atime),
- }
-
- // os specific file stat decoding
- fileStatFromInfoOs(fi, &flags, &fileStat)
-
- return flags, fileStat
-}
-
-func unmarshalAttrs(b []byte) (*FileStat, []byte) {
- flags, b := unmarshalUint32(b)
- var fs FileStat
- if flags&ssh_FILEXFER_ATTR_SIZE == ssh_FILEXFER_ATTR_SIZE {
- fs.Size, b = unmarshalUint64(b)
- }
- if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID {
- fs.UID, b = unmarshalUint32(b)
- }
- if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID {
- fs.GID, b = unmarshalUint32(b)
- }
- if flags&ssh_FILEXFER_ATTR_PERMISSIONS == ssh_FILEXFER_ATTR_PERMISSIONS {
- fs.Mode, b = unmarshalUint32(b)
- }
- if flags&ssh_FILEXFER_ATTR_ACMODTIME == ssh_FILEXFER_ATTR_ACMODTIME {
- fs.Atime, b = unmarshalUint32(b)
- fs.Mtime, b = unmarshalUint32(b)
- }
- if flags&ssh_FILEXFER_ATTR_EXTENDED == ssh_FILEXFER_ATTR_EXTENDED {
- var count uint32
- count, b = unmarshalUint32(b)
- ext := make([]StatExtended, count, count)
- for i := uint32(0); i < count; i++ {
- var typ string
- var data string
- typ, b = unmarshalString(b)
- data, b = unmarshalString(b)
- ext[i] = StatExtended{typ, data}
- }
- fs.Extended = ext
- }
- return &fs, b
-}
-
-func marshalFileInfo(b []byte, fi os.FileInfo) []byte {
- // attributes variable struct, and also variable per protocol version
- // spec version 3 attributes:
- // uint32 flags
- // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE
- // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID
- // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID
- // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS
- // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME
- // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME
- // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED
- // string extended_type
- // string extended_data
- // ... more extended data (extended_type - extended_data pairs),
- // so that number of pairs equals extended_count
-
- flags, fileStat := fileStatFromInfo(fi)
-
- b = marshalUint32(b, flags)
- if flags&ssh_FILEXFER_ATTR_SIZE != 0 {
- b = marshalUint64(b, fileStat.Size)
- }
- if flags&ssh_FILEXFER_ATTR_UIDGID != 0 {
- b = marshalUint32(b, fileStat.UID)
- b = marshalUint32(b, fileStat.GID)
- }
- if flags&ssh_FILEXFER_ATTR_PERMISSIONS != 0 {
- b = marshalUint32(b, fileStat.Mode)
- }
- if flags&ssh_FILEXFER_ATTR_ACMODTIME != 0 {
- b = marshalUint32(b, fileStat.Atime)
- b = marshalUint32(b, fileStat.Mtime)
- }
-
- return b
-}
-
-// toFileMode converts sftp filemode bits to the os.FileMode specification
-func toFileMode(mode uint32) os.FileMode {
- var fm = os.FileMode(mode & 0777)
- switch mode & syscall.S_IFMT {
- case syscall.S_IFBLK:
- fm |= os.ModeDevice
- case syscall.S_IFCHR:
- fm |= os.ModeDevice | os.ModeCharDevice
- case syscall.S_IFDIR:
- fm |= os.ModeDir
- case syscall.S_IFIFO:
- fm |= os.ModeNamedPipe
- case syscall.S_IFLNK:
- fm |= os.ModeSymlink
- case syscall.S_IFREG:
- // nothing to do
- case syscall.S_IFSOCK:
- fm |= os.ModeSocket
- }
- if mode&syscall.S_ISGID != 0 {
- fm |= os.ModeSetgid
- }
- if mode&syscall.S_ISUID != 0 {
- fm |= os.ModeSetuid
- }
- if mode&syscall.S_ISVTX != 0 {
- fm |= os.ModeSticky
- }
- return fm
-}
-
-// fromFileMode converts from the os.FileMode specification to sftp filemode bits
-func fromFileMode(mode os.FileMode) uint32 {
- ret := uint32(0)
-
- if mode&os.ModeDevice != 0 {
- if mode&os.ModeCharDevice != 0 {
- ret |= syscall.S_IFCHR
- } else {
- ret |= syscall.S_IFBLK
- }
- }
- if mode&os.ModeDir != 0 {
- ret |= syscall.S_IFDIR
- }
- if mode&os.ModeSymlink != 0 {
- ret |= syscall.S_IFLNK
- }
- if mode&os.ModeNamedPipe != 0 {
- ret |= syscall.S_IFIFO
- }
- if mode&os.ModeSetgid != 0 {
- ret |= syscall.S_ISGID
- }
- if mode&os.ModeSetuid != 0 {
- ret |= syscall.S_ISUID
- }
- if mode&os.ModeSticky != 0 {
- ret |= syscall.S_ISVTX
- }
- if mode&os.ModeSocket != 0 {
- ret |= syscall.S_IFSOCK
- }
-
- if mode&os.ModeType == 0 {
- ret |= syscall.S_IFREG
- }
- ret |= uint32(mode & os.ModePerm)
-
- return ret
-}
diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go
deleted file mode 100644
index 81cf3ea..0000000
--- a/vendor/github.com/pkg/sftp/attrs_stubs.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !cgo,!plan9 windows android
-
-package sftp
-
-import (
- "os"
-)
-
-func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) {
- // todo
-}
diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go
deleted file mode 100644
index ab6ecde..0000000
--- a/vendor/github.com/pkg/sftp/attrs_unix.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris
-// +build cgo
-
-package sftp
-
-import (
- "os"
- "syscall"
-)
-
-func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) {
- if statt, ok := fi.Sys().(*syscall.Stat_t); ok {
- *flags |= ssh_FILEXFER_ATTR_UIDGID
- fileStat.UID = statt.Uid
- fileStat.GID = statt.Gid
- }
-}
diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go
deleted file mode 100644
index e95bbab..0000000
--- a/vendor/github.com/pkg/sftp/client.go
+++ /dev/null
@@ -1,1128 +0,0 @@
-package sftp
-
-import (
- "bytes"
- "encoding/binary"
- "io"
- "os"
- "path"
- "sync/atomic"
- "time"
-
- "github.com/kr/fs"
- "github.com/pkg/errors"
- "golang.org/x/crypto/ssh"
-)
-
-// MaxPacket sets the maximum size of the payload.
-func MaxPacket(size int) func(*Client) error {
- return func(c *Client) error {
- if size < 1<<15 {
- return errors.Errorf("size must be greater or equal to 32k")
- }
- c.maxPacket = size
- return nil
- }
-}
-
-// NewClient creates a new SFTP client on conn, using zero or more option
-// functions.
-func NewClient(conn *ssh.Client, opts ...func(*Client) error) (*Client, error) {
- s, err := conn.NewSession()
- if err != nil {
- return nil, err
- }
- if err := s.RequestSubsystem("sftp"); err != nil {
- return nil, err
- }
- pw, err := s.StdinPipe()
- if err != nil {
- return nil, err
- }
- pr, err := s.StdoutPipe()
- if err != nil {
- return nil, err
- }
-
- return NewClientPipe(pr, pw, opts...)
-}
-
-// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser.
-// This can be used for connecting to an SFTP server over TCP/TLS or by using
-// the system's ssh client program (e.g. via exec.Command).
-func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...func(*Client) error) (*Client, error) {
- sftp := &Client{
- clientConn: clientConn{
- conn: conn{
- Reader: rd,
- WriteCloser: wr,
- },
- inflight: make(map[uint32]chan<- result),
- },
- maxPacket: 1 << 15,
- }
- if err := sftp.applyOptions(opts...); err != nil {
- wr.Close()
- return nil, err
- }
- if err := sftp.sendInit(); err != nil {
- wr.Close()
- return nil, err
- }
- if err := sftp.recvVersion(); err != nil {
- wr.Close()
- return nil, err
- }
- sftp.clientConn.wg.Add(1)
- go sftp.loop()
- return sftp, nil
-}
-
-// Client represents an SFTP session on a *ssh.ClientConn SSH connection.
-// Multiple Clients can be active on a single SSH connection, and a Client
-// may be called concurrently from multiple Goroutines.
-//
-// Client implements the github.com/kr/fs.FileSystem interface.
-type Client struct {
- clientConn
-
- maxPacket int // max packet size read or written.
- nextid uint32
-}
-
-// Create creates the named file mode 0666 (before umask), truncating it if
-// it already exists. If successful, methods on the returned File can be
-// used for I/O; the associated file descriptor has mode O_RDWR.
-func (c *Client) Create(path string) (*File, error) {
- return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC))
-}
-
-const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
-
-func (c *Client) sendInit() error {
- return c.clientConn.conn.sendPacket(sshFxInitPacket{
- Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
- })
-}
-
-// returns the next value of c.nextid
-func (c *Client) nextID() uint32 {
- return atomic.AddUint32(&c.nextid, 1)
-}
-
-func (c *Client) recvVersion() error {
- typ, data, err := c.recvPacket()
- if err != nil {
- return err
- }
- if typ != ssh_FXP_VERSION {
- return &unexpectedPacketErr{ssh_FXP_VERSION, typ}
- }
-
- version, _ := unmarshalUint32(data)
- if version != sftpProtocolVersion {
- return &unexpectedVersionErr{sftpProtocolVersion, version}
- }
-
- return nil
-}
-
-// Walk returns a new Walker rooted at root.
-func (c *Client) Walk(root string) *fs.Walker {
- return fs.WalkFS(root, c)
-}
-
-// ReadDir reads the directory named by dirname and returns a list of
-// directory entries.
-func (c *Client) ReadDir(p string) ([]os.FileInfo, error) {
- handle, err := c.opendir(p)
- if err != nil {
- return nil, err
- }
- defer c.close(handle) // this has to defer earlier than the lock below
- var attrs []os.FileInfo
- var done = false
- for !done {
- id := c.nextID()
- typ, data, err1 := c.sendPacket(sshFxpReaddirPacket{
- ID: id,
- Handle: handle,
- })
- if err1 != nil {
- err = err1
- done = true
- break
- }
- switch typ {
- case ssh_FXP_NAME:
- sid, data := unmarshalUint32(data)
- if sid != id {
- return nil, &unexpectedIDErr{id, sid}
- }
- count, data := unmarshalUint32(data)
- for i := uint32(0); i < count; i++ {
- var filename string
- filename, data = unmarshalString(data)
- _, data = unmarshalString(data) // discard longname
- var attr *FileStat
- attr, data = unmarshalAttrs(data)
- if filename == "." || filename == ".." {
- continue
- }
- attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename)))
- }
- case ssh_FXP_STATUS:
- // TODO(dfc) scope warning!
- err = normaliseError(unmarshalStatus(id, data))
- done = true
- default:
- return nil, unimplementedPacketErr(typ)
- }
- }
- if err == io.EOF {
- err = nil
- }
- return attrs, err
-}
-
-func (c *Client) opendir(path string) (string, error) {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpOpendirPacket{
- ID: id,
- Path: path,
- })
- if err != nil {
- return "", err
- }
- switch typ {
- case ssh_FXP_HANDLE:
- sid, data := unmarshalUint32(data)
- if sid != id {
- return "", &unexpectedIDErr{id, sid}
- }
- handle, _ := unmarshalString(data)
- return handle, nil
- case ssh_FXP_STATUS:
- return "", unmarshalStatus(id, data)
- default:
- return "", unimplementedPacketErr(typ)
- }
-}
-
-// Stat returns a FileInfo structure describing the file specified by path 'p'.
-// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file.
-func (c *Client) Stat(p string) (os.FileInfo, error) {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpStatPacket{
- ID: id,
- Path: p,
- })
- if err != nil {
- return nil, err
- }
- switch typ {
- case ssh_FXP_ATTRS:
- sid, data := unmarshalUint32(data)
- if sid != id {
- return nil, &unexpectedIDErr{id, sid}
- }
- attr, _ := unmarshalAttrs(data)
- return fileInfoFromStat(attr, path.Base(p)), nil
- case ssh_FXP_STATUS:
- return nil, normaliseError(unmarshalStatus(id, data))
- default:
- return nil, unimplementedPacketErr(typ)
- }
-}
-
-// Lstat returns a FileInfo structure describing the file specified by path 'p'.
-// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link.
-func (c *Client) Lstat(p string) (os.FileInfo, error) {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpLstatPacket{
- ID: id,
- Path: p,
- })
- if err != nil {
- return nil, err
- }
- switch typ {
- case ssh_FXP_ATTRS:
- sid, data := unmarshalUint32(data)
- if sid != id {
- return nil, &unexpectedIDErr{id, sid}
- }
- attr, _ := unmarshalAttrs(data)
- return fileInfoFromStat(attr, path.Base(p)), nil
- case ssh_FXP_STATUS:
- return nil, normaliseError(unmarshalStatus(id, data))
- default:
- return nil, unimplementedPacketErr(typ)
- }
-}
-
-// ReadLink reads the target of a symbolic link.
-func (c *Client) ReadLink(p string) (string, error) {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpReadlinkPacket{
- ID: id,
- Path: p,
- })
- if err != nil {
- return "", err
- }
- switch typ {
- case ssh_FXP_NAME:
- sid, data := unmarshalUint32(data)
- if sid != id {
- return "", &unexpectedIDErr{id, sid}
- }
- count, data := unmarshalUint32(data)
- if count != 1 {
- return "", unexpectedCount(1, count)
- }
- filename, _ := unmarshalString(data) // ignore dummy attributes
- return filename, nil
- case ssh_FXP_STATUS:
- return "", unmarshalStatus(id, data)
- default:
- return "", unimplementedPacketErr(typ)
- }
-}
-
-// Symlink creates a symbolic link at 'newname', pointing at target 'oldname'
-func (c *Client) Symlink(oldname, newname string) error {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpSymlinkPacket{
- ID: id,
- Linkpath: newname,
- Targetpath: oldname,
- })
- if err != nil {
- return err
- }
- switch typ {
- case ssh_FXP_STATUS:
- return normaliseError(unmarshalStatus(id, data))
- default:
- return unimplementedPacketErr(typ)
- }
-}
-
-// setstat is a convience wrapper to allow for changing of various parts of the file descriptor.
-func (c *Client) setstat(path string, flags uint32, attrs interface{}) error {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpSetstatPacket{
- ID: id,
- Path: path,
- Flags: flags,
- Attrs: attrs,
- })
- if err != nil {
- return err
- }
- switch typ {
- case ssh_FXP_STATUS:
- return normaliseError(unmarshalStatus(id, data))
- default:
- return unimplementedPacketErr(typ)
- }
-}
-
-// Chtimes changes the access and modification times of the named file.
-func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error {
- type times struct {
- Atime uint32
- Mtime uint32
- }
- attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())}
- return c.setstat(path, ssh_FILEXFER_ATTR_ACMODTIME, attrs)
-}
-
-// Chown changes the user and group owners of the named file.
-func (c *Client) Chown(path string, uid, gid int) error {
- type owner struct {
- UID uint32
- GID uint32
- }
- attrs := owner{uint32(uid), uint32(gid)}
- return c.setstat(path, ssh_FILEXFER_ATTR_UIDGID, attrs)
-}
-
-// Chmod changes the permissions of the named file.
-func (c *Client) Chmod(path string, mode os.FileMode) error {
- return c.setstat(path, ssh_FILEXFER_ATTR_PERMISSIONS, uint32(mode))
-}
-
-// Truncate sets the size of the named file. Although it may be safely assumed
-// that if the size is less than its current size it will be truncated to fit,
-// the SFTP protocol does not specify what behavior the server should do when setting
-// size greater than the current size.
-func (c *Client) Truncate(path string, size int64) error {
- return c.setstat(path, ssh_FILEXFER_ATTR_SIZE, uint64(size))
-}
-
-// Open opens the named file for reading. If successful, methods on the
-// returned file can be used for reading; the associated file descriptor
-// has mode O_RDONLY.
-func (c *Client) Open(path string) (*File, error) {
- return c.open(path, flags(os.O_RDONLY))
-}
-
-// OpenFile is the generalized open call; most users will use Open or
-// Create instead. It opens the named file with specified flag (O_RDONLY
-// etc.). If successful, methods on the returned File can be used for I/O.
-func (c *Client) OpenFile(path string, f int) (*File, error) {
- return c.open(path, flags(f))
-}
-
-func (c *Client) open(path string, pflags uint32) (*File, error) {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpOpenPacket{
- ID: id,
- Path: path,
- Pflags: pflags,
- })
- if err != nil {
- return nil, err
- }
- switch typ {
- case ssh_FXP_HANDLE:
- sid, data := unmarshalUint32(data)
- if sid != id {
- return nil, &unexpectedIDErr{id, sid}
- }
- handle, _ := unmarshalString(data)
- return &File{c: c, path: path, handle: handle}, nil
- case ssh_FXP_STATUS:
- return nil, normaliseError(unmarshalStatus(id, data))
- default:
- return nil, unimplementedPacketErr(typ)
- }
-}
-
-// close closes a handle handle previously returned in the response
-// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid
-// immediately after this request has been sent.
-func (c *Client) close(handle string) error {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpClosePacket{
- ID: id,
- Handle: handle,
- })
- if err != nil {
- return err
- }
- switch typ {
- case ssh_FXP_STATUS:
- return normaliseError(unmarshalStatus(id, data))
- default:
- return unimplementedPacketErr(typ)
- }
-}
-
-func (c *Client) fstat(handle string) (*FileStat, error) {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpFstatPacket{
- ID: id,
- Handle: handle,
- })
- if err != nil {
- return nil, err
- }
- switch typ {
- case ssh_FXP_ATTRS:
- sid, data := unmarshalUint32(data)
- if sid != id {
- return nil, &unexpectedIDErr{id, sid}
- }
- attr, _ := unmarshalAttrs(data)
- return attr, nil
- case ssh_FXP_STATUS:
- return nil, unmarshalStatus(id, data)
- default:
- return nil, unimplementedPacketErr(typ)
- }
-}
-
-// StatVFS retrieves VFS statistics from a remote host.
-//
-// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature
-// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt.
-func (c *Client) StatVFS(path string) (*StatVFS, error) {
- // send the StatVFS packet to the server
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpStatvfsPacket{
- ID: id,
- Path: path,
- })
- if err != nil {
- return nil, err
- }
-
- switch typ {
- // server responded with valid data
- case ssh_FXP_EXTENDED_REPLY:
- var response StatVFS
- err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response)
- if err != nil {
- return nil, errors.New("can not parse reply")
- }
-
- return &response, nil
-
- // the resquest failed
- case ssh_FXP_STATUS:
- return nil, errors.New(fxp(ssh_FXP_STATUS).String())
-
- default:
- return nil, unimplementedPacketErr(typ)
- }
-}
-
-// Join joins any number of path elements into a single path, adding a
-// separating slash if necessary. The result is Cleaned; in particular, all
-// empty strings are ignored.
-func (c *Client) Join(elem ...string) string { return path.Join(elem...) }
-
-// Remove removes the specified file or directory. An error will be returned if no
-// file or directory with the specified path exists, or if the specified directory
-// is not empty.
-func (c *Client) Remove(path string) error {
- err := c.removeFile(path)
- if err, ok := err.(*StatusError); ok {
- switch err.Code {
- // some servers, *cough* osx *cough*, return EPERM, not ENODIR.
- // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY
- case ssh_FX_PERMISSION_DENIED, ssh_FX_FAILURE, ssh_FX_FILE_IS_A_DIRECTORY:
- return c.removeDirectory(path)
- }
- }
- return err
-}
-
-func (c *Client) removeFile(path string) error {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpRemovePacket{
- ID: id,
- Filename: path,
- })
- if err != nil {
- return err
- }
- switch typ {
- case ssh_FXP_STATUS:
- return normaliseError(unmarshalStatus(id, data))
- default:
- return unimplementedPacketErr(typ)
- }
-}
-
-func (c *Client) removeDirectory(path string) error {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpRmdirPacket{
- ID: id,
- Path: path,
- })
- if err != nil {
- return err
- }
- switch typ {
- case ssh_FXP_STATUS:
- return normaliseError(unmarshalStatus(id, data))
- default:
- return unimplementedPacketErr(typ)
- }
-}
-
-// Rename renames a file.
-func (c *Client) Rename(oldname, newname string) error {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpRenamePacket{
- ID: id,
- Oldpath: oldname,
- Newpath: newname,
- })
- if err != nil {
- return err
- }
- switch typ {
- case ssh_FXP_STATUS:
- return normaliseError(unmarshalStatus(id, data))
- default:
- return unimplementedPacketErr(typ)
- }
-}
-
-func (c *Client) realpath(path string) (string, error) {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpRealpathPacket{
- ID: id,
- Path: path,
- })
- if err != nil {
- return "", err
- }
- switch typ {
- case ssh_FXP_NAME:
- sid, data := unmarshalUint32(data)
- if sid != id {
- return "", &unexpectedIDErr{id, sid}
- }
- count, data := unmarshalUint32(data)
- if count != 1 {
- return "", unexpectedCount(1, count)
- }
- filename, _ := unmarshalString(data) // ignore attributes
- return filename, nil
- case ssh_FXP_STATUS:
- return "", normaliseError(unmarshalStatus(id, data))
- default:
- return "", unimplementedPacketErr(typ)
- }
-}
-
-// Getwd returns the current working directory of the server. Operations
-// involving relative paths will be based at this location.
-func (c *Client) Getwd() (string, error) {
- return c.realpath(".")
-}
-
-// Mkdir creates the specified directory. An error will be returned if a file or
-// directory with the specified path already exists, or if the directory's
-// parent folder does not exist (the method cannot create complete paths).
-func (c *Client) Mkdir(path string) error {
- id := c.nextID()
- typ, data, err := c.sendPacket(sshFxpMkdirPacket{
- ID: id,
- Path: path,
- })
- if err != nil {
- return err
- }
- switch typ {
- case ssh_FXP_STATUS:
- return normaliseError(unmarshalStatus(id, data))
- default:
- return unimplementedPacketErr(typ)
- }
-}
-
-// applyOptions applies options functions to the Client.
-// If an error is encountered, option processing ceases.
-func (c *Client) applyOptions(opts ...func(*Client) error) error {
- for _, f := range opts {
- if err := f(c); err != nil {
- return err
- }
- }
- return nil
-}
-
-// File represents a remote file.
-type File struct {
- c *Client
- path string
- handle string
- offset uint64 // current offset within remote file
-}
-
-// Close closes the File, rendering it unusable for I/O. It returns an
-// error, if any.
-func (f *File) Close() error {
- return f.c.close(f.handle)
-}
-
-// Name returns the name of the file as presented to Open or Create.
-func (f *File) Name() string {
- return f.path
-}
-
-const maxConcurrentRequests = 64
-
-// Read reads up to len(b) bytes from the File. It returns the number of
-// bytes read and an error, if any. EOF is signaled by a zero count with
-// err set to io.EOF.
-func (f *File) Read(b []byte) (int, error) {
- // Split the read into multiple maxPacket sized concurrent reads
- // bounded by maxConcurrentRequests. This allows reads with a suitably
- // large buffer to transfer data at a much faster rate due to
- // overlapping round trip times.
- inFlight := 0
- desiredInFlight := 1
- offset := f.offset
- ch := make(chan result, 1)
- type inflightRead struct {
- b []byte
- offset uint64
- }
- reqs := map[uint32]inflightRead{}
- type offsetErr struct {
- offset uint64
- err error
- }
- var firstErr offsetErr
-
- sendReq := func(b []byte, offset uint64) {
- reqID := f.c.nextID()
- f.c.dispatchRequest(ch, sshFxpReadPacket{
- ID: reqID,
- Handle: f.handle,
- Offset: offset,
- Len: uint32(len(b)),
- })
- inFlight++
- reqs[reqID] = inflightRead{b: b, offset: offset}
- }
-
- var read int
- for len(b) > 0 || inFlight > 0 {
- for inFlight < desiredInFlight && len(b) > 0 && firstErr.err == nil {
- l := min(len(b), f.c.maxPacket)
- rb := b[:l]
- sendReq(rb, offset)
- offset += uint64(l)
- b = b[l:]
- }
-
- if inFlight == 0 {
- break
- }
- select {
- case res := <-ch:
- inFlight--
- if res.err != nil {
- firstErr = offsetErr{offset: 0, err: res.err}
- break
- }
- reqID, data := unmarshalUint32(res.data)
- req, ok := reqs[reqID]
- if !ok {
- firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)}
- break
- }
- delete(reqs, reqID)
- switch res.typ {
- case ssh_FXP_STATUS:
- if firstErr.err == nil || req.offset < firstErr.offset {
- firstErr = offsetErr{
- offset: req.offset,
- err: normaliseError(unmarshalStatus(reqID, res.data)),
- }
- break
- }
- case ssh_FXP_DATA:
- l, data := unmarshalUint32(data)
- n := copy(req.b, data[:l])
- read += n
- if n < len(req.b) {
- sendReq(req.b[l:], req.offset+uint64(l))
- }
- if desiredInFlight < maxConcurrentRequests {
- desiredInFlight++
- }
- default:
- firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)}
- break
- }
- }
- }
- // If the error is anything other than EOF, then there
- // may be gaps in the data copied to the buffer so it's
- // best to return 0 so the caller can't make any
- // incorrect assumptions about the state of the buffer.
- if firstErr.err != nil && firstErr.err != io.EOF {
- read = 0
- }
- f.offset += uint64(read)
- return read, firstErr.err
-}
-
-// WriteTo writes the file to w. The return value is the number of bytes
-// written. Any error encountered during the write is also returned.
-func (f *File) WriteTo(w io.Writer) (int64, error) {
- fi, err := f.Stat()
- if err != nil {
- return 0, err
- }
- inFlight := 0
- desiredInFlight := 1
- offset := f.offset
- writeOffset := offset
- fileSize := uint64(fi.Size())
- ch := make(chan result, 1)
- type inflightRead struct {
- b []byte
- offset uint64
- }
- reqs := map[uint32]inflightRead{}
- pendingWrites := map[uint64][]byte{}
- type offsetErr struct {
- offset uint64
- err error
- }
- var firstErr offsetErr
-
- sendReq := func(b []byte, offset uint64) {
- reqID := f.c.nextID()
- f.c.dispatchRequest(ch, sshFxpReadPacket{
- ID: reqID,
- Handle: f.handle,
- Offset: offset,
- Len: uint32(len(b)),
- })
- inFlight++
- reqs[reqID] = inflightRead{b: b, offset: offset}
- }
-
- var copied int64
- for firstErr.err == nil || inFlight > 0 {
- for inFlight < desiredInFlight && firstErr.err == nil {
- b := make([]byte, f.c.maxPacket)
- sendReq(b, offset)
- offset += uint64(f.c.maxPacket)
- if offset > fileSize {
- desiredInFlight = 1
- }
- }
-
- if inFlight == 0 {
- break
- }
- select {
- case res := <-ch:
- inFlight--
- if res.err != nil {
- firstErr = offsetErr{offset: 0, err: res.err}
- break
- }
- reqID, data := unmarshalUint32(res.data)
- req, ok := reqs[reqID]
- if !ok {
- firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)}
- break
- }
- delete(reqs, reqID)
- switch res.typ {
- case ssh_FXP_STATUS:
- if firstErr.err == nil || req.offset < firstErr.offset {
- firstErr = offsetErr{offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data))}
- break
- }
- case ssh_FXP_DATA:
- l, data := unmarshalUint32(data)
- if req.offset == writeOffset {
- nbytes, err := w.Write(data)
- copied += int64(nbytes)
- if err != nil {
- firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: err}
- break
- }
- if nbytes < int(l) {
- firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: io.ErrShortWrite}
- break
- }
- switch {
- case offset > fileSize:
- desiredInFlight = 1
- case desiredInFlight < maxConcurrentRequests:
- desiredInFlight++
- }
- writeOffset += uint64(nbytes)
- for pendingData, ok := pendingWrites[writeOffset]; ok; pendingData, ok = pendingWrites[writeOffset] {
- nbytes, err := w.Write(pendingData)
- if err != nil {
- firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: err}
- break
- }
- if nbytes < len(pendingData) {
- firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: io.ErrShortWrite}
- break
- }
- writeOffset += uint64(nbytes)
- inFlight--
- }
- } else {
- // Don't write the data yet because
- // this response came in out of order
- // and we need to wait for responses
- // for earlier segments of the file.
- inFlight++ // Pending writes should still be considered inFlight.
- pendingWrites[req.offset] = data
- }
- default:
- firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)}
- break
- }
- }
- }
- if firstErr.err != io.EOF {
- return copied, firstErr.err
- }
- return copied, nil
-}
-
-// Stat returns the FileInfo structure describing file. If there is an
-// error.
-func (f *File) Stat() (os.FileInfo, error) {
- fs, err := f.c.fstat(f.handle)
- if err != nil {
- return nil, err
- }
- return fileInfoFromStat(fs, path.Base(f.path)), nil
-}
-
-// Write writes len(b) bytes to the File. It returns the number of bytes
-// written and an error, if any. Write returns a non-nil error when n !=
-// len(b).
-func (f *File) Write(b []byte) (int, error) {
- // Split the write into multiple maxPacket sized concurrent writes
- // bounded by maxConcurrentRequests. This allows writes with a suitably
- // large buffer to transfer data at a much faster rate due to
- // overlapping round trip times.
- inFlight := 0
- desiredInFlight := 1
- offset := f.offset
- ch := make(chan result, 1)
- var firstErr error
- written := len(b)
- for len(b) > 0 || inFlight > 0 {
- for inFlight < desiredInFlight && len(b) > 0 && firstErr == nil {
- l := min(len(b), f.c.maxPacket)
- rb := b[:l]
- f.c.dispatchRequest(ch, sshFxpWritePacket{
- ID: f.c.nextID(),
- Handle: f.handle,
- Offset: offset,
- Length: uint32(len(rb)),
- Data: rb,
- })
- inFlight++
- offset += uint64(l)
- b = b[l:]
- }
-
- if inFlight == 0 {
- break
- }
- select {
- case res := <-ch:
- inFlight--
- if res.err != nil {
- firstErr = res.err
- break
- }
- switch res.typ {
- case ssh_FXP_STATUS:
- id, _ := unmarshalUint32(res.data)
- err := normaliseError(unmarshalStatus(id, res.data))
- if err != nil && firstErr == nil {
- firstErr = err
- break
- }
- if desiredInFlight < maxConcurrentRequests {
- desiredInFlight++
- }
- default:
- firstErr = unimplementedPacketErr(res.typ)
- break
- }
- }
- }
- // If error is non-nil, then there may be gaps in the data written to
- // the file so it's best to return 0 so the caller can't make any
- // incorrect assumptions about the state of the file.
- if firstErr != nil {
- written = 0
- }
- f.offset += uint64(written)
- return written, firstErr
-}
-
-// ReadFrom reads data from r until EOF and writes it to the file. The return
-// value is the number of bytes read. Any error except io.EOF encountered
-// during the read is also returned.
-func (f *File) ReadFrom(r io.Reader) (int64, error) {
- inFlight := 0
- desiredInFlight := 1
- offset := f.offset
- ch := make(chan result, 1)
- var firstErr error
- read := int64(0)
- b := make([]byte, f.c.maxPacket)
- for inFlight > 0 || firstErr == nil {
- for inFlight < desiredInFlight && firstErr == nil {
- n, err := r.Read(b)
- if err != nil {
- firstErr = err
- }
- f.c.dispatchRequest(ch, sshFxpWritePacket{
- ID: f.c.nextID(),
- Handle: f.handle,
- Offset: offset,
- Length: uint32(n),
- Data: b[:n],
- })
- inFlight++
- offset += uint64(n)
- read += int64(n)
- }
-
- if inFlight == 0 {
- break
- }
- select {
- case res := <-ch:
- inFlight--
- if res.err != nil {
- firstErr = res.err
- break
- }
- switch res.typ {
- case ssh_FXP_STATUS:
- id, _ := unmarshalUint32(res.data)
- err := normaliseError(unmarshalStatus(id, res.data))
- if err != nil && firstErr == nil {
- firstErr = err
- break
- }
- if desiredInFlight < maxConcurrentRequests {
- desiredInFlight++
- }
- default:
- firstErr = unimplementedPacketErr(res.typ)
- break
- }
- }
- }
- if firstErr == io.EOF {
- firstErr = nil
- }
- // If error is non-nil, then there may be gaps in the data written to
- // the file so it's best to return 0 so the caller can't make any
- // incorrect assumptions about the state of the file.
- if firstErr != nil {
- read = 0
- }
- f.offset += uint64(read)
- return read, firstErr
-}
-
-// Seek implements io.Seeker by setting the client offset for the next Read or
-// Write. It returns the next offset read. Seeking before or after the end of
-// the file is undefined. Seeking relative to the end calls Stat.
-func (f *File) Seek(offset int64, whence int) (int64, error) {
- switch whence {
- case os.SEEK_SET:
- f.offset = uint64(offset)
- case os.SEEK_CUR:
- f.offset = uint64(int64(f.offset) + offset)
- case os.SEEK_END:
- fi, err := f.Stat()
- if err != nil {
- return int64(f.offset), err
- }
- f.offset = uint64(fi.Size() + offset)
- default:
- return int64(f.offset), unimplementedSeekWhence(whence)
- }
- return int64(f.offset), nil
-}
-
-// Chown changes the uid/gid of the current file.
-func (f *File) Chown(uid, gid int) error {
- return f.c.Chown(f.path, uid, gid)
-}
-
-// Chmod changes the permissions of the current file.
-func (f *File) Chmod(mode os.FileMode) error {
- return f.c.Chmod(f.path, mode)
-}
-
-// Truncate sets the size of the current file. Although it may be safely assumed
-// that if the size is less than its current size it will be truncated to fit,
-// the SFTP protocol does not specify what behavior the server should do when setting
-// size greater than the current size.
-func (f *File) Truncate(size int64) error {
- return f.c.Truncate(f.path, size)
-}
-
-func min(a, b int) int {
- if a > b {
- return b
- }
- return a
-}
-
-// normaliseError normalises an error into a more standard form that can be
-// checked against stdlib errors like io.EOF or os.ErrNotExist.
-func normaliseError(err error) error {
- switch err := err.(type) {
- case *StatusError:
- switch err.Code {
- case ssh_FX_EOF:
- return io.EOF
- case ssh_FX_NO_SUCH_FILE:
- return os.ErrNotExist
- case ssh_FX_OK:
- return nil
- default:
- return err
- }
- default:
- return err
- }
-}
-
-func unmarshalStatus(id uint32, data []byte) error {
- sid, data := unmarshalUint32(data)
- if sid != id {
- return &unexpectedIDErr{id, sid}
- }
- code, data := unmarshalUint32(data)
- msg, data, err := unmarshalStringSafe(data)
- if err != nil {
- return err
- }
- lang, _, _ := unmarshalStringSafe(data)
- return &StatusError{
- Code: code,
- msg: msg,
- lang: lang,
- }
-}
-
-func marshalStatus(b []byte, err StatusError) []byte {
- b = marshalUint32(b, err.Code)
- b = marshalString(b, err.msg)
- b = marshalString(b, err.lang)
- return b
-}
-
-// flags converts the flags passed to OpenFile into ssh flags.
-// Unsupported flags are ignored.
-func flags(f int) uint32 {
- var out uint32
- switch f & os.O_WRONLY {
- case os.O_WRONLY:
- out |= ssh_FXF_WRITE
- case os.O_RDONLY:
- out |= ssh_FXF_READ
- }
- if f&os.O_RDWR == os.O_RDWR {
- out |= ssh_FXF_READ | ssh_FXF_WRITE
- }
- if f&os.O_APPEND == os.O_APPEND {
- out |= ssh_FXF_APPEND
- }
- if f&os.O_CREATE == os.O_CREATE {
- out |= ssh_FXF_CREAT
- }
- if f&os.O_TRUNC == os.O_TRUNC {
- out |= ssh_FXF_TRUNC
- }
- if f&os.O_EXCL == os.O_EXCL {
- out |= ssh_FXF_EXCL
- }
- return out
-}
diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go
deleted file mode 100644
index 9b03d11..0000000
--- a/vendor/github.com/pkg/sftp/conn.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package sftp
-
-import (
- "encoding"
- "io"
- "sync"
-
- "github.com/pkg/errors"
-)
-
-// conn implements a bidirectional channel on which client and server
-// connections are multiplexed.
-type conn struct {
- io.Reader
- io.WriteCloser
- sync.Mutex // used to serialise writes to sendPacket
-}
-
-func (c *conn) recvPacket() (uint8, []byte, error) {
- return recvPacket(c)
-}
-
-func (c *conn) sendPacket(m encoding.BinaryMarshaler) error {
- c.Lock()
- defer c.Unlock()
- return sendPacket(c, m)
-}
-
-type clientConn struct {
- conn
- wg sync.WaitGroup
- sync.Mutex // protects inflight
- inflight map[uint32]chan<- result // outstanding requests
-}
-
-// Close closes the SFTP session.
-func (c *clientConn) Close() error {
- defer c.wg.Wait()
- return c.conn.Close()
-}
-
-func (c *clientConn) loop() {
- defer c.wg.Done()
- err := c.recv()
- if err != nil {
- c.broadcastErr(err)
- }
-}
-
-// recv continuously reads from the server and forwards responses to the
-// appropriate channel.
-func (c *clientConn) recv() error {
- defer c.conn.Close()
- for {
- typ, data, err := c.recvPacket()
- if err != nil {
- return err
- }
- sid, _ := unmarshalUint32(data)
- c.Lock()
- ch, ok := c.inflight[sid]
- delete(c.inflight, sid)
- c.Unlock()
- if !ok {
- // This is an unexpected occurrence. Send the error
- // back to all listeners so that they terminate
- // gracefully.
- return errors.Errorf("sid: %v not fond", sid)
- }
- ch <- result{typ: typ, data: data}
- }
-}
-
-// result captures the result of receiving the a packet from the server
-type result struct {
- typ byte
- data []byte
- err error
-}
-
-type idmarshaler interface {
- id() uint32
- encoding.BinaryMarshaler
-}
-
-func (c *clientConn) sendPacket(p idmarshaler) (byte, []byte, error) {
- ch := make(chan result, 1)
- c.dispatchRequest(ch, p)
- s := <-ch
- return s.typ, s.data, s.err
-}
-
-func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) {
- c.Lock()
- c.inflight[p.id()] = ch
- if err := c.conn.sendPacket(p); err != nil {
- delete(c.inflight, p.id())
- ch <- result{err: err}
- }
- c.Unlock()
-}
-
-// broadcastErr sends an error to all goroutines waiting for a response.
-func (c *clientConn) broadcastErr(err error) {
- c.Lock()
- listeners := make([]chan<- result, 0, len(c.inflight))
- for _, ch := range c.inflight {
- listeners = append(listeners, ch)
- }
- c.Unlock()
- for _, ch := range listeners {
- ch <- result{err: err}
- }
-}
-
-type serverConn struct {
- conn
-}
-
-func (s *serverConn) sendError(p id, err error) error {
- return s.sendPacket(statusFromError(p, err))
-}
diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go
deleted file mode 100644
index 3e264ab..0000000
--- a/vendor/github.com/pkg/sftp/debug.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build debug
-
-package sftp
-
-import "log"
-
-func debug(fmt string, args ...interface{}) {
- log.Printf(fmt, args...)
-}
diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go
deleted file mode 100644
index fba4159..0000000
--- a/vendor/github.com/pkg/sftp/packet.go
+++ /dev/null
@@ -1,901 +0,0 @@
-package sftp
-
-import (
- "bytes"
- "encoding"
- "encoding/binary"
- "fmt"
- "io"
- "os"
- "reflect"
-
- "github.com/pkg/errors"
-)
-
-var (
- errShortPacket = errors.New("packet too short")
- errUnknownExtendedPacket = errors.New("unknown extended packet")
-)
-
-const (
- debugDumpTxPacket = false
- debugDumpRxPacket = false
- debugDumpTxPacketBytes = false
- debugDumpRxPacketBytes = false
-)
-
-func marshalUint32(b []byte, v uint32) []byte {
- return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
-}
-
-func marshalUint64(b []byte, v uint64) []byte {
- return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v))
-}
-
-func marshalString(b []byte, v string) []byte {
- return append(marshalUint32(b, uint32(len(v))), v...)
-}
-
-func marshal(b []byte, v interface{}) []byte {
- if v == nil {
- return b
- }
- switch v := v.(type) {
- case uint8:
- return append(b, v)
- case uint32:
- return marshalUint32(b, v)
- case uint64:
- return marshalUint64(b, v)
- case string:
- return marshalString(b, v)
- case os.FileInfo:
- return marshalFileInfo(b, v)
- default:
- switch d := reflect.ValueOf(v); d.Kind() {
- case reflect.Struct:
- for i, n := 0, d.NumField(); i < n; i++ {
- b = append(marshal(b, d.Field(i).Interface()))
- }
- return b
- case reflect.Slice:
- for i, n := 0, d.Len(); i < n; i++ {
- b = append(marshal(b, d.Index(i).Interface()))
- }
- return b
- default:
- panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v))
- }
- }
-}
-
-func unmarshalUint32(b []byte) (uint32, []byte) {
- v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
- return v, b[4:]
-}
-
-func unmarshalUint32Safe(b []byte) (uint32, []byte, error) {
- var v uint32
- if len(b) < 4 {
- return 0, nil, errShortPacket
- }
- v, b = unmarshalUint32(b)
- return v, b, nil
-}
-
-func unmarshalUint64(b []byte) (uint64, []byte) {
- h, b := unmarshalUint32(b)
- l, b := unmarshalUint32(b)
- return uint64(h)<<32 | uint64(l), b
-}
-
-func unmarshalUint64Safe(b []byte) (uint64, []byte, error) {
- var v uint64
- if len(b) < 8 {
- return 0, nil, errShortPacket
- }
- v, b = unmarshalUint64(b)
- return v, b, nil
-}
-
-func unmarshalString(b []byte) (string, []byte) {
- n, b := unmarshalUint32(b)
- return string(b[:n]), b[n:]
-}
-
-func unmarshalStringSafe(b []byte) (string, []byte, error) {
- n, b, err := unmarshalUint32Safe(b)
- if err != nil {
- return "", nil, err
- }
- if int64(n) > int64(len(b)) {
- return "", nil, errShortPacket
- }
- return string(b[:n]), b[n:], nil
-}
-
-// sendPacket marshals p according to RFC 4234.
-func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error {
- bb, err := m.MarshalBinary()
- if err != nil {
- return errors.Errorf("binary marshaller failed: %v", err)
- }
- if debugDumpTxPacketBytes {
- debug("send packet: %s %d bytes %x", fxp(bb[0]), len(bb), bb[1:])
- } else if debugDumpTxPacket {
- debug("send packet: %s %d bytes", fxp(bb[0]), len(bb))
- }
- l := uint32(len(bb))
- hdr := []byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}
- _, err = w.Write(hdr)
- if err != nil {
- return errors.Errorf("failed to send packet header: %v", err)
- }
- _, err = w.Write(bb)
- if err != nil {
- return errors.Errorf("failed to send packet body: %v", err)
- }
- return nil
-}
-
-func recvPacket(r io.Reader) (uint8, []byte, error) {
- var b = []byte{0, 0, 0, 0}
- if _, err := io.ReadFull(r, b); err != nil {
- return 0, nil, err
- }
- l, _ := unmarshalUint32(b)
- b = make([]byte, l)
- if _, err := io.ReadFull(r, b); err != nil {
- debug("recv packet %d bytes: err %v", l, err)
- return 0, nil, err
- }
- if debugDumpRxPacketBytes {
- debug("recv packet: %s %d bytes %x", fxp(b[0]), l, b[1:])
- } else if debugDumpRxPacket {
- debug("recv packet: %s %d bytes", fxp(b[0]), l)
- }
- return b[0], b[1:], nil
-}
-
-type extensionPair struct {
- Name string
- Data string
-}
-
-func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) {
- var ep extensionPair
- var err error
- ep.Name, b, err = unmarshalStringSafe(b)
- if err != nil {
- return ep, b, err
- }
- ep.Data, b, err = unmarshalStringSafe(b)
- if err != nil {
- return ep, b, err
- }
- return ep, b, err
-}
-
-// Here starts the definition of packets along with their MarshalBinary
-// implementations.
-// Manually writing the marshalling logic wins us a lot of time and
-// allocation.
-
-type sshFxInitPacket struct {
- Version uint32
- Extensions []extensionPair
-}
-
-func (p sshFxInitPacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 // byte + uint32
- for _, e := range p.Extensions {
- l += 4 + len(e.Name) + 4 + len(e.Data)
- }
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_INIT)
- b = marshalUint32(b, p.Version)
- for _, e := range p.Extensions {
- b = marshalString(b, e.Name)
- b = marshalString(b, e.Data)
- }
- return b, nil
-}
-
-func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.Version, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- }
- for len(b) > 0 {
- var ep extensionPair
- ep, b, err = unmarshalExtensionPair(b)
- if err != nil {
- return err
- }
- p.Extensions = append(p.Extensions, ep)
- }
- return nil
-}
-
-type sshFxVersionPacket struct {
- Version uint32
- Extensions []struct {
- Name, Data string
- }
-}
-
-func (p sshFxVersionPacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 // byte + uint32
- for _, e := range p.Extensions {
- l += 4 + len(e.Name) + 4 + len(e.Data)
- }
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_VERSION)
- b = marshalUint32(b, p.Version)
- for _, e := range p.Extensions {
- b = marshalString(b, e.Name)
- b = marshalString(b, e.Data)
- }
- return b, nil
-}
-
-func marshalIDString(packetType byte, id uint32, str string) ([]byte, error) {
- l := 1 + 4 + // type(byte) + uint32
- 4 + len(str)
-
- b := make([]byte, 0, l)
- b = append(b, packetType)
- b = marshalUint32(b, id)
- b = marshalString(b, str)
- return b, nil
-}
-
-func unmarshalIDString(b []byte, id *uint32, str *string) error {
- var err error
- *id, b, err = unmarshalUint32Safe(b)
- if err != nil {
- return err
- }
- *str, b, err = unmarshalStringSafe(b)
- return err
-}
-
-type sshFxpReaddirPacket struct {
- ID uint32
- Handle string
-}
-
-func (p sshFxpReaddirPacket) id() uint32 { return p.ID }
-
-func (p sshFxpReaddirPacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_READDIR, p.ID, p.Handle)
-}
-
-func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Handle)
-}
-
-type sshFxpOpendirPacket struct {
- ID uint32
- Path string
-}
-
-func (p sshFxpOpendirPacket) id() uint32 { return p.ID }
-
-func (p sshFxpOpendirPacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_OPENDIR, p.ID, p.Path)
-}
-
-func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Path)
-}
-
-type sshFxpLstatPacket struct {
- ID uint32
- Path string
-}
-
-func (p sshFxpLstatPacket) id() uint32 { return p.ID }
-
-func (p sshFxpLstatPacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_LSTAT, p.ID, p.Path)
-}
-
-func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Path)
-}
-
-type sshFxpStatPacket struct {
- ID uint32
- Path string
-}
-
-func (p sshFxpStatPacket) id() uint32 { return p.ID }
-
-func (p sshFxpStatPacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_STAT, p.ID, p.Path)
-}
-
-func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Path)
-}
-
-type sshFxpFstatPacket struct {
- ID uint32
- Handle string
-}
-
-func (p sshFxpFstatPacket) id() uint32 { return p.ID }
-
-func (p sshFxpFstatPacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_FSTAT, p.ID, p.Handle)
-}
-
-func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Handle)
-}
-
-type sshFxpClosePacket struct {
- ID uint32
- Handle string
-}
-
-func (p sshFxpClosePacket) id() uint32 { return p.ID }
-
-func (p sshFxpClosePacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_CLOSE, p.ID, p.Handle)
-}
-
-func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Handle)
-}
-
-type sshFxpRemovePacket struct {
- ID uint32
- Filename string
-}
-
-func (p sshFxpRemovePacket) id() uint32 { return p.ID }
-
-func (p sshFxpRemovePacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_REMOVE, p.ID, p.Filename)
-}
-
-func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Filename)
-}
-
-type sshFxpRmdirPacket struct {
- ID uint32
- Path string
-}
-
-func (p sshFxpRmdirPacket) id() uint32 { return p.ID }
-
-func (p sshFxpRmdirPacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_RMDIR, p.ID, p.Path)
-}
-
-func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Path)
-}
-
-type sshFxpSymlinkPacket struct {
- ID uint32
- Targetpath string
- Linkpath string
-}
-
-func (p sshFxpSymlinkPacket) id() uint32 { return p.ID }
-
-func (p sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 + // type(byte) + uint32
- 4 + len(p.Targetpath) +
- 4 + len(p.Linkpath)
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_SYMLINK)
- b = marshalUint32(b, p.ID)
- b = marshalString(b, p.Targetpath)
- b = marshalString(b, p.Linkpath)
- return b, nil
-}
-
-func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil {
- return err
- } else if p.Linkpath, b, err = unmarshalStringSafe(b); err != nil {
- return err
- }
- return nil
-}
-
-type sshFxpReadlinkPacket struct {
- ID uint32
- Path string
-}
-
-func (p sshFxpReadlinkPacket) id() uint32 { return p.ID }
-
-func (p sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_READLINK, p.ID, p.Path)
-}
-
-func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Path)
-}
-
-type sshFxpRealpathPacket struct {
- ID uint32
- Path string
-}
-
-func (p sshFxpRealpathPacket) id() uint32 { return p.ID }
-
-func (p sshFxpRealpathPacket) MarshalBinary() ([]byte, error) {
- return marshalIDString(ssh_FXP_REALPATH, p.ID, p.Path)
-}
-
-func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error {
- return unmarshalIDString(b, &p.ID, &p.Path)
-}
-
-type sshFxpNameAttr struct {
- Name string
- LongName string
- Attrs []interface{}
-}
-
-func (p sshFxpNameAttr) MarshalBinary() ([]byte, error) {
- b := []byte{}
- b = marshalString(b, p.Name)
- b = marshalString(b, p.LongName)
- for _, attr := range p.Attrs {
- b = marshal(b, attr)
- }
- return b, nil
-}
-
-type sshFxpNamePacket struct {
- ID uint32
- NameAttrs []sshFxpNameAttr
-}
-
-func (p sshFxpNamePacket) MarshalBinary() ([]byte, error) {
- b := []byte{}
- b = append(b, ssh_FXP_NAME)
- b = marshalUint32(b, p.ID)
- b = marshalUint32(b, uint32(len(p.NameAttrs)))
- for _, na := range p.NameAttrs {
- ab, err := na.MarshalBinary()
- if err != nil {
- return nil, err
- }
-
- b = append(b, ab...)
- }
- return b, nil
-}
-
-type sshFxpOpenPacket struct {
- ID uint32
- Path string
- Pflags uint32
- Flags uint32 // ignored
-}
-
-func (p sshFxpOpenPacket) id() uint32 { return p.ID }
-
-func (p sshFxpOpenPacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 +
- 4 + len(p.Path) +
- 4 + 4
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_OPEN)
- b = marshalUint32(b, p.ID)
- b = marshalString(b, p.Path)
- b = marshalUint32(b, p.Pflags)
- b = marshalUint32(b, p.Flags)
- return b, nil
-}
-
-func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Path, b, err = unmarshalStringSafe(b); err != nil {
- return err
- } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- }
- return nil
-}
-
-type sshFxpReadPacket struct {
- ID uint32
- Handle string
- Offset uint64
- Len uint32
-}
-
-func (p sshFxpReadPacket) id() uint32 { return p.ID }
-
-func (p sshFxpReadPacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 + // type(byte) + uint32
- 4 + len(p.Handle) +
- 8 + 4 // uint64 + uint32
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_READ)
- b = marshalUint32(b, p.ID)
- b = marshalString(b, p.Handle)
- b = marshalUint64(b, p.Offset)
- b = marshalUint32(b, p.Len)
- return b, nil
-}
-
-func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil {
- return err
- } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil {
- return err
- } else if p.Len, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- }
- return nil
-}
-
-type sshFxpRenamePacket struct {
- ID uint32
- Oldpath string
- Newpath string
-}
-
-func (p sshFxpRenamePacket) id() uint32 { return p.ID }
-
-func (p sshFxpRenamePacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 + // type(byte) + uint32
- 4 + len(p.Oldpath) +
- 4 + len(p.Newpath)
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_RENAME)
- b = marshalUint32(b, p.ID)
- b = marshalString(b, p.Oldpath)
- b = marshalString(b, p.Newpath)
- return b, nil
-}
-
-func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil {
- return err
- } else if p.Newpath, b, err = unmarshalStringSafe(b); err != nil {
- return err
- }
- return nil
-}
-
-type sshFxpWritePacket struct {
- ID uint32
- Handle string
- Offset uint64
- Length uint32
- Data []byte
-}
-
-func (p sshFxpWritePacket) id() uint32 { return p.ID }
-
-func (p sshFxpWritePacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 + // type(byte) + uint32
- 4 + len(p.Handle) +
- 8 + 4 + // uint64 + uint32
- len(p.Data)
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_WRITE)
- b = marshalUint32(b, p.ID)
- b = marshalString(b, p.Handle)
- b = marshalUint64(b, p.Offset)
- b = marshalUint32(b, p.Length)
- b = append(b, p.Data...)
- return b, nil
-}
-
-func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil {
- return err
- } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil {
- return err
- } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if uint32(len(b)) < p.Length {
- return errShortPacket
- }
-
- p.Data = append([]byte{}, b[:p.Length]...)
- return nil
-}
-
-type sshFxpMkdirPacket struct {
- ID uint32
- Path string
- Flags uint32 // ignored
-}
-
-func (p sshFxpMkdirPacket) id() uint32 { return p.ID }
-
-func (p sshFxpMkdirPacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 + // type(byte) + uint32
- 4 + len(p.Path) +
- 4 // uint32
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_MKDIR)
- b = marshalUint32(b, p.ID)
- b = marshalString(b, p.Path)
- b = marshalUint32(b, p.Flags)
- return b, nil
-}
-
-func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Path, b, err = unmarshalStringSafe(b); err != nil {
- return err
- } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- }
- return nil
-}
-
-type sshFxpSetstatPacket struct {
- ID uint32
- Path string
- Flags uint32
- Attrs interface{}
-}
-
-type sshFxpFsetstatPacket struct {
- ID uint32
- Handle string
- Flags uint32
- Attrs interface{}
-}
-
-func (p sshFxpSetstatPacket) id() uint32 { return p.ID }
-func (p sshFxpFsetstatPacket) id() uint32 { return p.ID }
-
-func (p sshFxpSetstatPacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 + // type(byte) + uint32
- 4 + len(p.Path) +
- 4 // uint32 + uint64
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_SETSTAT)
- b = marshalUint32(b, p.ID)
- b = marshalString(b, p.Path)
- b = marshalUint32(b, p.Flags)
- b = marshal(b, p.Attrs)
- return b, nil
-}
-
-func (p sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 + // type(byte) + uint32
- 4 + len(p.Handle) +
- 4 // uint32 + uint64
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_FSETSTAT)
- b = marshalUint32(b, p.ID)
- b = marshalString(b, p.Handle)
- b = marshalUint32(b, p.Flags)
- b = marshal(b, p.Attrs)
- return b, nil
-}
-
-func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Path, b, err = unmarshalStringSafe(b); err != nil {
- return err
- } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- }
- p.Attrs = b
- return nil
-}
-
-func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil {
- return err
- } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- }
- p.Attrs = b
- return nil
-}
-
-type sshFxpHandlePacket struct {
- ID uint32
- Handle string
-}
-
-func (p sshFxpHandlePacket) MarshalBinary() ([]byte, error) {
- b := []byte{ssh_FXP_HANDLE}
- b = marshalUint32(b, p.ID)
- b = marshalString(b, p.Handle)
- return b, nil
-}
-
-type sshFxpStatusPacket struct {
- ID uint32
- StatusError
-}
-
-func (p sshFxpStatusPacket) MarshalBinary() ([]byte, error) {
- b := []byte{ssh_FXP_STATUS}
- b = marshalUint32(b, p.ID)
- b = marshalStatus(b, p.StatusError)
- return b, nil
-}
-
-type sshFxpDataPacket struct {
- ID uint32
- Length uint32
- Data []byte
-}
-
-func (p sshFxpDataPacket) MarshalBinary() ([]byte, error) {
- b := []byte{ssh_FXP_DATA}
- b = marshalUint32(b, p.ID)
- b = marshalUint32(b, p.Length)
- b = append(b, p.Data[:p.Length]...)
- return b, nil
-}
-
-func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if uint32(len(b)) < p.Length {
- return errors.New("truncated packet")
- }
-
- p.Data = make([]byte, p.Length)
- copy(p.Data, b)
- return nil
-}
-
-type sshFxpStatvfsPacket struct {
- ID uint32
- Path string
-}
-
-func (p sshFxpStatvfsPacket) id() uint32 { return p.ID }
-
-func (p sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) {
- l := 1 + 4 + // type(byte) + uint32
- len(p.Path) +
- len("statvfs@openssh.com")
-
- b := make([]byte, 0, l)
- b = append(b, ssh_FXP_EXTENDED)
- b = marshalUint32(b, p.ID)
- b = marshalString(b, "statvfs@openssh.com")
- b = marshalString(b, p.Path)
- return b, nil
-}
-
-// A StatVFS contains statistics about a filesystem.
-type StatVFS struct {
- ID uint32
- Bsize uint64 /* file system block size */
- Frsize uint64 /* fundamental fs block size */
- Blocks uint64 /* number of blocks (unit f_frsize) */
- Bfree uint64 /* free blocks in file system */
- Bavail uint64 /* free blocks for non-root */
- Files uint64 /* total file inodes */
- Ffree uint64 /* free file inodes */
- Favail uint64 /* free file inodes for to non-root */
- Fsid uint64 /* file system id */
- Flag uint64 /* bit mask of f_flag values */
- Namemax uint64 /* maximum filename length */
-}
-
-// TotalSpace calculates the amount of total space in a filesystem.
-func (p *StatVFS) TotalSpace() uint64 {
- return p.Frsize * p.Blocks
-}
-
-// FreeSpace calculates the amount of free space in a filesystem.
-func (p *StatVFS) FreeSpace() uint64 {
- return p.Frsize * p.Bfree
-}
-
-// Convert to ssh_FXP_EXTENDED_REPLY packet binary format
-func (p *StatVFS) MarshalBinary() ([]byte, error) {
- var buf bytes.Buffer
- buf.Write([]byte{ssh_FXP_EXTENDED_REPLY})
- err := binary.Write(&buf, binary.BigEndian, p)
- return buf.Bytes(), err
-}
-
-type sshFxpExtendedPacket struct {
- ID uint32
- ExtendedRequest string
- SpecificPacket interface {
- serverRespondablePacket
- readonly() bool
- }
-}
-
-func (p sshFxpExtendedPacket) id() uint32 { return p.ID }
-func (p sshFxpExtendedPacket) readonly() bool { return p.SpecificPacket.readonly() }
-
-func (p sshFxpExtendedPacket) respond(svr *Server) error {
- return p.SpecificPacket.respond(svr)
-}
-
-func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error {
- var err error
- bOrig := b
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil {
- return err
- }
-
- // specific unmarshalling
- switch p.ExtendedRequest {
- case "statvfs@openssh.com":
- p.SpecificPacket = &sshFxpExtendedPacketStatVFS{}
- default:
- return errUnknownExtendedPacket
- }
-
- return p.SpecificPacket.UnmarshalBinary(bOrig)
-}
-
-type sshFxpExtendedPacketStatVFS struct {
- ID uint32
- ExtendedRequest string
- Path string
-}
-
-func (p sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID }
-func (p sshFxpExtendedPacketStatVFS) readonly() bool { return true }
-func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error {
- var err error
- if p.ID, b, err = unmarshalUint32Safe(b); err != nil {
- return err
- } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil {
- return err
- } else if p.Path, b, err = unmarshalStringSafe(b); err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go
deleted file mode 100644
index b695528..0000000
--- a/vendor/github.com/pkg/sftp/release.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// +build !debug
-
-package sftp
-
-func debug(fmt string, args ...interface{}) {}
diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go
deleted file mode 100644
index e097bda..0000000
--- a/vendor/github.com/pkg/sftp/server.go
+++ /dev/null
@@ -1,607 +0,0 @@
-package sftp
-
-// sftp server counterpart
-
-import (
- "encoding"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "sync"
- "syscall"
- "time"
-
- "github.com/pkg/errors"
-)
-
-const (
- sftpServerWorkerCount = 8
-)
-
-// Server is an SSH File Transfer Protocol (sftp) server.
-// This is intended to provide the sftp subsystem to an ssh server daemon.
-// This implementation currently supports most of sftp server protocol version 3,
-// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02
-type Server struct {
- serverConn
- debugStream io.Writer
- readOnly bool
- pktChan chan rxPacket
- openFiles map[string]*os.File
- openFilesLock sync.RWMutex
- handleCount int
- maxTxPacket uint32
-}
-
-func (svr *Server) nextHandle(f *os.File) string {
- svr.openFilesLock.Lock()
- defer svr.openFilesLock.Unlock()
- svr.handleCount++
- handle := strconv.Itoa(svr.handleCount)
- svr.openFiles[handle] = f
- return handle
-}
-
-func (svr *Server) closeHandle(handle string) error {
- svr.openFilesLock.Lock()
- defer svr.openFilesLock.Unlock()
- if f, ok := svr.openFiles[handle]; ok {
- delete(svr.openFiles, handle)
- return f.Close()
- }
-
- return syscall.EBADF
-}
-
-func (svr *Server) getHandle(handle string) (*os.File, bool) {
- svr.openFilesLock.RLock()
- defer svr.openFilesLock.RUnlock()
- f, ok := svr.openFiles[handle]
- return f, ok
-}
-
-type serverRespondablePacket interface {
- encoding.BinaryUnmarshaler
- id() uint32
- respond(svr *Server) error
-}
-
-// NewServer creates a new Server instance around the provided streams, serving
-// content from the root of the filesystem. Optionally, ServerOption
-// functions may be specified to further configure the Server.
-//
-// A subsequent call to Serve() is required to begin serving files over SFTP.
-func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) {
- s := &Server{
- serverConn: serverConn{
- conn: conn{
- Reader: rwc,
- WriteCloser: rwc,
- },
- },
- debugStream: ioutil.Discard,
- pktChan: make(chan rxPacket, sftpServerWorkerCount),
- openFiles: make(map[string]*os.File),
- maxTxPacket: 1 << 15,
- }
-
- for _, o := range options {
- if err := o(s); err != nil {
- return nil, err
- }
- }
-
- return s, nil
-}
-
-// A ServerOption is a function which applies configuration to a Server.
-type ServerOption func(*Server) error
-
-// WithDebug enables Server debugging output to the supplied io.Writer.
-func WithDebug(w io.Writer) ServerOption {
- return func(s *Server) error {
- s.debugStream = w
- return nil
- }
-}
-
-// ReadOnly configures a Server to serve files in read-only mode.
-func ReadOnly() ServerOption {
- return func(s *Server) error {
- s.readOnly = true
- return nil
- }
-}
-
-type rxPacket struct {
- pktType fxp
- pktBytes []byte
-}
-
-// Up to N parallel servers
-func (svr *Server) sftpServerWorker() error {
- for p := range svr.pktChan {
- var pkt interface {
- encoding.BinaryUnmarshaler
- id() uint32
- }
- var readonly = true
- switch p.pktType {
- case ssh_FXP_INIT:
- pkt = &sshFxInitPacket{}
- case ssh_FXP_LSTAT:
- pkt = &sshFxpLstatPacket{}
- case ssh_FXP_OPEN:
- pkt = &sshFxpOpenPacket{}
- // readonly handled specially below
- case ssh_FXP_CLOSE:
- pkt = &sshFxpClosePacket{}
- case ssh_FXP_READ:
- pkt = &sshFxpReadPacket{}
- case ssh_FXP_WRITE:
- pkt = &sshFxpWritePacket{}
- readonly = false
- case ssh_FXP_FSTAT:
- pkt = &sshFxpFstatPacket{}
- case ssh_FXP_SETSTAT:
- pkt = &sshFxpSetstatPacket{}
- readonly = false
- case ssh_FXP_FSETSTAT:
- pkt = &sshFxpFsetstatPacket{}
- readonly = false
- case ssh_FXP_OPENDIR:
- pkt = &sshFxpOpendirPacket{}
- case ssh_FXP_READDIR:
- pkt = &sshFxpReaddirPacket{}
- case ssh_FXP_REMOVE:
- pkt = &sshFxpRemovePacket{}
- readonly = false
- case ssh_FXP_MKDIR:
- pkt = &sshFxpMkdirPacket{}
- readonly = false
- case ssh_FXP_RMDIR:
- pkt = &sshFxpRmdirPacket{}
- readonly = false
- case ssh_FXP_REALPATH:
- pkt = &sshFxpRealpathPacket{}
- case ssh_FXP_STAT:
- pkt = &sshFxpStatPacket{}
- case ssh_FXP_RENAME:
- pkt = &sshFxpRenamePacket{}
- readonly = false
- case ssh_FXP_READLINK:
- pkt = &sshFxpReadlinkPacket{}
- case ssh_FXP_SYMLINK:
- pkt = &sshFxpSymlinkPacket{}
- readonly = false
- case ssh_FXP_EXTENDED:
- pkt = &sshFxpExtendedPacket{}
- default:
- return errors.Errorf("unhandled packet type: %s", p.pktType)
- }
- if err := pkt.UnmarshalBinary(p.pktBytes); err != nil {
- return err
- }
-
- // handle FXP_OPENDIR specially
- switch pkt := pkt.(type) {
- case *sshFxpOpenPacket:
- readonly = pkt.readonly()
- case *sshFxpExtendedPacket:
- readonly = pkt.SpecificPacket.readonly()
- }
-
- // If server is operating read-only and a write operation is requested,
- // return permission denied
- if !readonly && svr.readOnly {
- if err := svr.sendError(pkt, syscall.EPERM); err != nil {
- return errors.Wrap(err, "failed to send read only packet response")
- }
- continue
- }
-
- if err := handlePacket(svr, pkt); err != nil {
- return err
- }
- }
- return nil
-}
-
-func handlePacket(s *Server, p interface{}) error {
- switch p := p.(type) {
- case *sshFxInitPacket:
- return s.sendPacket(sshFxVersionPacket{sftpProtocolVersion, nil})
- case *sshFxpStatPacket:
- // stat the requested file
- info, err := os.Stat(p.Path)
- if err != nil {
- return s.sendError(p, err)
- }
- return s.sendPacket(sshFxpStatResponse{
- ID: p.ID,
- info: info,
- })
- case *sshFxpLstatPacket:
- // stat the requested file
- info, err := os.Lstat(p.Path)
- if err != nil {
- return s.sendError(p, err)
- }
- return s.sendPacket(sshFxpStatResponse{
- ID: p.ID,
- info: info,
- })
- case *sshFxpFstatPacket:
- f, ok := s.getHandle(p.Handle)
- if !ok {
- return s.sendError(p, syscall.EBADF)
- }
-
- info, err := f.Stat()
- if err != nil {
- return s.sendError(p, err)
- }
-
- return s.sendPacket(sshFxpStatResponse{
- ID: p.ID,
- info: info,
- })
- case *sshFxpMkdirPacket:
- // TODO FIXME: ignore flags field
- err := os.Mkdir(p.Path, 0755)
- return s.sendError(p, err)
- case *sshFxpRmdirPacket:
- err := os.Remove(p.Path)
- return s.sendError(p, err)
- case *sshFxpRemovePacket:
- err := os.Remove(p.Filename)
- return s.sendError(p, err)
- case *sshFxpRenamePacket:
- err := os.Rename(p.Oldpath, p.Newpath)
- return s.sendError(p, err)
- case *sshFxpSymlinkPacket:
- err := os.Symlink(p.Targetpath, p.Linkpath)
- return s.sendError(p, err)
- case *sshFxpClosePacket:
- return s.sendError(p, s.closeHandle(p.Handle))
- case *sshFxpReadlinkPacket:
- f, err := os.Readlink(p.Path)
- if err != nil {
- return s.sendError(p, err)
- }
-
- return s.sendPacket(sshFxpNamePacket{
- ID: p.ID,
- NameAttrs: []sshFxpNameAttr{{
- Name: f,
- LongName: f,
- Attrs: emptyFileStat,
- }},
- })
-
- case *sshFxpRealpathPacket:
- f, err := filepath.Abs(p.Path)
- if err != nil {
- return s.sendError(p, err)
- }
- f = filepath.Clean(f)
- return s.sendPacket(sshFxpNamePacket{
- ID: p.ID,
- NameAttrs: []sshFxpNameAttr{{
- Name: f,
- LongName: f,
- Attrs: emptyFileStat,
- }},
- })
- case *sshFxpOpendirPacket:
- return sshFxpOpenPacket{
- ID: p.ID,
- Path: p.Path,
- Pflags: ssh_FXF_READ,
- }.respond(s)
- case *sshFxpReadPacket:
- f, ok := s.getHandle(p.Handle)
- if !ok {
- return s.sendError(p, syscall.EBADF)
- }
-
- data := make([]byte, clamp(p.Len, s.maxTxPacket))
- n, err := f.ReadAt(data, int64(p.Offset))
- if err != nil && (err != io.EOF || n == 0) {
- return s.sendError(p, err)
- }
- return s.sendPacket(sshFxpDataPacket{
- ID: p.ID,
- Length: uint32(n),
- Data: data[:n],
- })
- case *sshFxpWritePacket:
- f, ok := s.getHandle(p.Handle)
- if !ok {
- return s.sendError(p, syscall.EBADF)
- }
-
- _, err := f.WriteAt(p.Data, int64(p.Offset))
- return s.sendError(p, err)
- case serverRespondablePacket:
- err := p.respond(s)
- return errors.Wrap(err, "pkt.respond failed")
- default:
- return errors.Errorf("unexpected packet type %T", p)
- }
-}
-
-// Serve serves SFTP connections until the streams stop or the SFTP subsystem
-// is stopped.
-func (svr *Server) Serve() error {
- var wg sync.WaitGroup
- wg.Add(sftpServerWorkerCount)
- for i := 0; i < sftpServerWorkerCount; i++ {
- go func() {
- defer wg.Done()
- if err := svr.sftpServerWorker(); err != nil {
- svr.conn.Close() // shuts down recvPacket
- }
- }()
- }
-
- var err error
- var pktType uint8
- var pktBytes []byte
- for {
- pktType, pktBytes, err = svr.recvPacket()
- if err != nil {
- break
- }
- svr.pktChan <- rxPacket{fxp(pktType), pktBytes}
- }
-
- close(svr.pktChan) // shuts down sftpServerWorkers
- wg.Wait() // wait for all workers to exit
-
- // close any still-open files
- for handle, file := range svr.openFiles {
- fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name())
- file.Close()
- }
- return err // error from recvPacket
-}
-
-type id interface {
- id() uint32
-}
-
-// The init packet has no ID, so we just return a zero-value ID
-func (p sshFxInitPacket) id() uint32 { return 0 }
-
-type sshFxpStatResponse struct {
- ID uint32
- info os.FileInfo
-}
-
-func (p sshFxpStatResponse) MarshalBinary() ([]byte, error) {
- b := []byte{ssh_FXP_ATTRS}
- b = marshalUint32(b, p.ID)
- b = marshalFileInfo(b, p.info)
- return b, nil
-}
-
-var emptyFileStat = []interface{}{uint32(0)}
-
-func (p sshFxpOpenPacket) readonly() bool {
- return !p.hasPflags(ssh_FXF_WRITE)
-}
-
-func (p sshFxpOpenPacket) hasPflags(flags ...uint32) bool {
- for _, f := range flags {
- if p.Pflags&f == 0 {
- return false
- }
- }
- return true
-}
-
-func (p sshFxpOpenPacket) respond(svr *Server) error {
- var osFlags int
- if p.hasPflags(ssh_FXF_READ, ssh_FXF_WRITE) {
- osFlags |= os.O_RDWR
- } else if p.hasPflags(ssh_FXF_WRITE) {
- osFlags |= os.O_WRONLY
- } else if p.hasPflags(ssh_FXF_READ) {
- osFlags |= os.O_RDONLY
- } else {
- // how are they opening?
- return svr.sendError(p, syscall.EINVAL)
- }
-
- if p.hasPflags(ssh_FXF_APPEND) {
- osFlags |= os.O_APPEND
- }
- if p.hasPflags(ssh_FXF_CREAT) {
- osFlags |= os.O_CREATE
- }
- if p.hasPflags(ssh_FXF_TRUNC) {
- osFlags |= os.O_TRUNC
- }
- if p.hasPflags(ssh_FXF_EXCL) {
- osFlags |= os.O_EXCL
- }
-
- f, err := os.OpenFile(p.Path, osFlags, 0644)
- if err != nil {
- return svr.sendError(p, err)
- }
-
- handle := svr.nextHandle(f)
- return svr.sendPacket(sshFxpHandlePacket{p.ID, handle})
-}
-
-func (p sshFxpReaddirPacket) respond(svr *Server) error {
- f, ok := svr.getHandle(p.Handle)
- if !ok {
- return svr.sendError(p, syscall.EBADF)
- }
-
- dirname := f.Name()
- dirents, err := f.Readdir(128)
- if err != nil {
- return svr.sendError(p, err)
- }
-
- ret := sshFxpNamePacket{ID: p.ID}
- for _, dirent := range dirents {
- ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{
- Name: dirent.Name(),
- LongName: runLs(dirname, dirent),
- Attrs: []interface{}{dirent},
- })
- }
- return svr.sendPacket(ret)
-}
-
-func (p sshFxpSetstatPacket) respond(svr *Server) error {
- // additional unmarshalling is required for each possibility here
- b := p.Attrs.([]byte)
- var err error
-
- debug("setstat name \"%s\"", p.Path)
- if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 {
- var size uint64
- if size, b, err = unmarshalUint64Safe(b); err == nil {
- err = os.Truncate(p.Path, int64(size))
- }
- }
- if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 {
- var mode uint32
- if mode, b, err = unmarshalUint32Safe(b); err == nil {
- err = os.Chmod(p.Path, os.FileMode(mode))
- }
- }
- if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 {
- var atime uint32
- var mtime uint32
- if atime, b, err = unmarshalUint32Safe(b); err != nil {
- } else if mtime, b, err = unmarshalUint32Safe(b); err != nil {
- } else {
- atimeT := time.Unix(int64(atime), 0)
- mtimeT := time.Unix(int64(mtime), 0)
- err = os.Chtimes(p.Path, atimeT, mtimeT)
- }
- }
- if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 {
- var uid uint32
- var gid uint32
- if uid, b, err = unmarshalUint32Safe(b); err != nil {
- } else if gid, b, err = unmarshalUint32Safe(b); err != nil {
- } else {
- err = os.Chown(p.Path, int(uid), int(gid))
- }
- }
-
- return svr.sendError(p, err)
-}
-
-func (p sshFxpFsetstatPacket) respond(svr *Server) error {
- f, ok := svr.getHandle(p.Handle)
- if !ok {
- return svr.sendError(p, syscall.EBADF)
- }
-
- // additional unmarshalling is required for each possibility here
- b := p.Attrs.([]byte)
- var err error
-
- debug("fsetstat name \"%s\"", f.Name())
- if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 {
- var size uint64
- if size, b, err = unmarshalUint64Safe(b); err == nil {
- err = f.Truncate(int64(size))
- }
- }
- if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 {
- var mode uint32
- if mode, b, err = unmarshalUint32Safe(b); err == nil {
- err = f.Chmod(os.FileMode(mode))
- }
- }
- if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 {
- var atime uint32
- var mtime uint32
- if atime, b, err = unmarshalUint32Safe(b); err != nil {
- } else if mtime, b, err = unmarshalUint32Safe(b); err != nil {
- } else {
- atimeT := time.Unix(int64(atime), 0)
- mtimeT := time.Unix(int64(mtime), 0)
- err = os.Chtimes(f.Name(), atimeT, mtimeT)
- }
- }
- if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 {
- var uid uint32
- var gid uint32
- if uid, b, err = unmarshalUint32Safe(b); err != nil {
- } else if gid, b, err = unmarshalUint32Safe(b); err != nil {
- } else {
- err = f.Chown(int(uid), int(gid))
- }
- }
-
- return svr.sendError(p, err)
-}
-
-// translateErrno translates a syscall error number to a SFTP error code.
-func translateErrno(errno syscall.Errno) uint32 {
- switch errno {
- case 0:
- return ssh_FX_OK
- case syscall.ENOENT:
- return ssh_FX_NO_SUCH_FILE
- case syscall.EPERM:
- return ssh_FX_PERMISSION_DENIED
- }
-
- return ssh_FX_FAILURE
-}
-
-func statusFromError(p id, err error) sshFxpStatusPacket {
- ret := sshFxpStatusPacket{
- ID: p.id(),
- StatusError: StatusError{
- // ssh_FX_OK = 0
- // ssh_FX_EOF = 1
- // ssh_FX_NO_SUCH_FILE = 2 ENOENT
- // ssh_FX_PERMISSION_DENIED = 3
- // ssh_FX_FAILURE = 4
- // ssh_FX_BAD_MESSAGE = 5
- // ssh_FX_NO_CONNECTION = 6
- // ssh_FX_CONNECTION_LOST = 7
- // ssh_FX_OP_UNSUPPORTED = 8
- Code: ssh_FX_OK,
- },
- }
- if err != nil {
- debug("statusFromError: error is %T %#v", err, err)
- ret.StatusError.Code = ssh_FX_FAILURE
- ret.StatusError.msg = err.Error()
- if err == io.EOF {
- ret.StatusError.Code = ssh_FX_EOF
- } else if errno, ok := err.(syscall.Errno); ok {
- ret.StatusError.Code = translateErrno(errno)
- } else if pathError, ok := err.(*os.PathError); ok {
- debug("statusFromError: error is %T %#v", pathError.Err, pathError.Err)
- if errno, ok := pathError.Err.(syscall.Errno); ok {
- ret.StatusError.Code = translateErrno(errno)
- }
- }
- }
- return ret
-}
-
-func clamp(v, max uint32) uint32 {
- if v > max {
- return max
- }
- return v
-}
diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go
deleted file mode 100644
index 8c01dac..0000000
--- a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package sftp
-
-import (
- "syscall"
-)
-
-func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) {
- return &StatVFS{
- Bsize: uint64(stat.Bsize),
- Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here
- Blocks: stat.Blocks,
- Bfree: stat.Bfree,
- Bavail: stat.Bavail,
- Files: stat.Files,
- Ffree: stat.Ffree,
- Favail: stat.Ffree, // not sure how to calculate Favail
- Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness?
- Flag: uint64(stat.Flags), // assuming POSIX?
- Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024
- }, nil
-}
diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go
deleted file mode 100644
index c26870e..0000000
--- a/vendor/github.com/pkg/sftp/server_statvfs_impl.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build darwin linux,!gccgo
-
-// fill in statvfs structure with OS specific values
-// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance)
-
-package sftp
-
-import (
- "syscall"
-)
-
-func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error {
- stat := &syscall.Statfs_t{}
- if err := syscall.Statfs(p.Path, stat); err != nil {
- return svr.sendPacket(statusFromError(p, err))
- }
-
- retPkt, err := statvfsFromStatfst(stat)
- if err != nil {
- return svr.sendPacket(statusFromError(p, err))
- }
- retPkt.ID = p.ID
-
- return svr.sendPacket(retPkt)
-}
diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go
deleted file mode 100644
index 43478e8..0000000
--- a/vendor/github.com/pkg/sftp/server_statvfs_linux.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build !gccgo,linux
-
-package sftp
-
-import (
- "syscall"
-)
-
-func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) {
- return &StatVFS{
- Bsize: uint64(stat.Bsize),
- Frsize: uint64(stat.Frsize),
- Blocks: stat.Blocks,
- Bfree: stat.Bfree,
- Bavail: stat.Bavail,
- Files: stat.Files,
- Ffree: stat.Ffree,
- Favail: stat.Ffree, // not sure how to calculate Favail
- Flag: uint64(stat.Flags), // assuming POSIX?
- Namemax: uint64(stat.Namelen),
- }, nil
-}
diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go
deleted file mode 100644
index 1512a13..0000000
--- a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build !darwin,!linux gccgo
-
-package sftp
-
-import (
- "syscall"
-)
-
-func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error {
- return syscall.ENOTSUP
-}
diff --git a/vendor/github.com/pkg/sftp/server_stubs.go b/vendor/github.com/pkg/sftp/server_stubs.go
deleted file mode 100644
index 3b1ddbd..0000000
--- a/vendor/github.com/pkg/sftp/server_stubs.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !cgo,!plan9 windows android
-
-package sftp
-
-import (
- "os"
- "path"
-)
-
-func runLs(dirname string, dirent os.FileInfo) string {
- return path.Join(dirname, dirent.Name())
-}
diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go
deleted file mode 100644
index 8c3f0b4..0000000
--- a/vendor/github.com/pkg/sftp/server_unix.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris
-// +build cgo
-
-package sftp
-
-import (
- "fmt"
- "os"
- "path"
- "syscall"
- "time"
-)
-
-func runLsTypeWord(dirent os.FileInfo) string {
- // find first character, the type char
- // b Block special file.
- // c Character special file.
- // d Directory.
- // l Symbolic link.
- // s Socket link.
- // p FIFO.
- // - Regular file.
- tc := '-'
- mode := dirent.Mode()
- if (mode & os.ModeDir) != 0 {
- tc = 'd'
- } else if (mode & os.ModeDevice) != 0 {
- tc = 'b'
- if (mode & os.ModeCharDevice) != 0 {
- tc = 'c'
- }
- } else if (mode & os.ModeSymlink) != 0 {
- tc = 'l'
- } else if (mode & os.ModeSocket) != 0 {
- tc = 's'
- } else if (mode & os.ModeNamedPipe) != 0 {
- tc = 'p'
- }
-
- // owner
- orc := '-'
- if (mode & 0400) != 0 {
- orc = 'r'
- }
- owc := '-'
- if (mode & 0200) != 0 {
- owc = 'w'
- }
- oxc := '-'
- ox := (mode & 0100) != 0
- setuid := (mode & os.ModeSetuid) != 0
- if ox && setuid {
- oxc = 's'
- } else if setuid {
- oxc = 'S'
- } else if ox {
- oxc = 'x'
- }
-
- // group
- grc := '-'
- if (mode & 040) != 0 {
- grc = 'r'
- }
- gwc := '-'
- if (mode & 020) != 0 {
- gwc = 'w'
- }
- gxc := '-'
- gx := (mode & 010) != 0
- setgid := (mode & os.ModeSetgid) != 0
- if gx && setgid {
- gxc = 's'
- } else if setgid {
- gxc = 'S'
- } else if gx {
- gxc = 'x'
- }
-
- // all / others
- arc := '-'
- if (mode & 04) != 0 {
- arc = 'r'
- }
- awc := '-'
- if (mode & 02) != 0 {
- awc = 'w'
- }
- axc := '-'
- ax := (mode & 01) != 0
- sticky := (mode & os.ModeSticky) != 0
- if ax && sticky {
- axc = 't'
- } else if sticky {
- axc = 'T'
- } else if ax {
- axc = 'x'
- }
-
- return fmt.Sprintf("%c%c%c%c%c%c%c%c%c%c", tc, orc, owc, oxc, grc, gwc, gxc, arc, awc, axc)
-}
-
-func runLsStatt(dirname string, dirent os.FileInfo, statt *syscall.Stat_t) string {
- // example from openssh sftp server:
- // crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd
- // format:
- // {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name
-
- typeword := runLsTypeWord(dirent)
- numLinks := statt.Nlink
- uid := statt.Uid
- gid := statt.Gid
- username := fmt.Sprintf("%d", uid)
- groupname := fmt.Sprintf("%d", gid)
- // TODO FIXME: uid -> username, gid -> groupname lookup for ls -l format output
-
- mtime := dirent.ModTime()
- monthStr := mtime.Month().String()[0:3]
- day := mtime.Day()
- year := mtime.Year()
- now := time.Now()
- isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2))
-
- yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute())
- if isOld {
- yearOrTime = fmt.Sprintf("%d", year)
- }
-
- return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name())
-}
-
-// ls -l style output for a file, which is in the 'long output' section of a readdir response packet
-// this is a very simple (lazy) implementation, just enough to look almost like openssh in a few basic cases
-func runLs(dirname string, dirent os.FileInfo) string {
- dsys := dirent.Sys()
- if dsys == nil {
- } else if statt, ok := dsys.(*syscall.Stat_t); !ok {
- } else {
- return runLsStatt(dirname, dirent, statt)
- }
-
- return path.Join(dirname, dirent.Name())
-}
diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go
deleted file mode 100644
index 22184af..0000000
--- a/vendor/github.com/pkg/sftp/sftp.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Package sftp implements the SSH File Transfer Protocol as described in
-// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt
-package sftp
-
-import (
- "fmt"
-
- "github.com/pkg/errors"
-)
-
-const (
- ssh_FXP_INIT = 1
- ssh_FXP_VERSION = 2
- ssh_FXP_OPEN = 3
- ssh_FXP_CLOSE = 4
- ssh_FXP_READ = 5
- ssh_FXP_WRITE = 6
- ssh_FXP_LSTAT = 7
- ssh_FXP_FSTAT = 8
- ssh_FXP_SETSTAT = 9
- ssh_FXP_FSETSTAT = 10
- ssh_FXP_OPENDIR = 11
- ssh_FXP_READDIR = 12
- ssh_FXP_REMOVE = 13
- ssh_FXP_MKDIR = 14
- ssh_FXP_RMDIR = 15
- ssh_FXP_REALPATH = 16
- ssh_FXP_STAT = 17
- ssh_FXP_RENAME = 18
- ssh_FXP_READLINK = 19
- ssh_FXP_SYMLINK = 20
- ssh_FXP_STATUS = 101
- ssh_FXP_HANDLE = 102
- ssh_FXP_DATA = 103
- ssh_FXP_NAME = 104
- ssh_FXP_ATTRS = 105
- ssh_FXP_EXTENDED = 200
- ssh_FXP_EXTENDED_REPLY = 201
-)
-
-const (
- ssh_FX_OK = 0
- ssh_FX_EOF = 1
- ssh_FX_NO_SUCH_FILE = 2
- ssh_FX_PERMISSION_DENIED = 3
- ssh_FX_FAILURE = 4
- ssh_FX_BAD_MESSAGE = 5
- ssh_FX_NO_CONNECTION = 6
- ssh_FX_CONNECTION_LOST = 7
- ssh_FX_OP_UNSUPPORTED = 8
-
- // see draft-ietf-secsh-filexfer-13
- // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1
- ssh_FX_INVALID_HANDLE = 9
- ssh_FX_NO_SUCH_PATH = 10
- ssh_FX_FILE_ALREADY_EXISTS = 11
- ssh_FX_WRITE_PROTECT = 12
- ssh_FX_NO_MEDIA = 13
- ssh_FX_NO_SPACE_ON_FILESYSTEM = 14
- ssh_FX_QUOTA_EXCEEDED = 15
- ssh_FX_UNKNOWN_PRINCIPAL = 16
- ssh_FX_LOCK_CONFLICT = 17
- ssh_FX_DIR_NOT_EMPTY = 18
- ssh_FX_NOT_A_DIRECTORY = 19
- ssh_FX_INVALID_FILENAME = 20
- ssh_FX_LINK_LOOP = 21
- ssh_FX_CANNOT_DELETE = 22
- ssh_FX_INVALID_PARAMETER = 23
- ssh_FX_FILE_IS_A_DIRECTORY = 24
- ssh_FX_BYTE_RANGE_LOCK_CONFLICT = 25
- ssh_FX_BYTE_RANGE_LOCK_REFUSED = 26
- ssh_FX_DELETE_PENDING = 27
- ssh_FX_FILE_CORRUPT = 28
- ssh_FX_OWNER_INVALID = 29
- ssh_FX_GROUP_INVALID = 30
- ssh_FX_NO_MATCHING_BYTE_RANGE_LOCK = 31
-)
-
-const (
- ssh_FXF_READ = 0x00000001
- ssh_FXF_WRITE = 0x00000002
- ssh_FXF_APPEND = 0x00000004
- ssh_FXF_CREAT = 0x00000008
- ssh_FXF_TRUNC = 0x00000010
- ssh_FXF_EXCL = 0x00000020
-)
-
-type fxp uint8
-
-func (f fxp) String() string {
- switch f {
- case ssh_FXP_INIT:
- return "SSH_FXP_INIT"
- case ssh_FXP_VERSION:
- return "SSH_FXP_VERSION"
- case ssh_FXP_OPEN:
- return "SSH_FXP_OPEN"
- case ssh_FXP_CLOSE:
- return "SSH_FXP_CLOSE"
- case ssh_FXP_READ:
- return "SSH_FXP_READ"
- case ssh_FXP_WRITE:
- return "SSH_FXP_WRITE"
- case ssh_FXP_LSTAT:
- return "SSH_FXP_LSTAT"
- case ssh_FXP_FSTAT:
- return "SSH_FXP_FSTAT"
- case ssh_FXP_SETSTAT:
- return "SSH_FXP_SETSTAT"
- case ssh_FXP_FSETSTAT:
- return "SSH_FXP_FSETSTAT"
- case ssh_FXP_OPENDIR:
- return "SSH_FXP_OPENDIR"
- case ssh_FXP_READDIR:
- return "SSH_FXP_READDIR"
- case ssh_FXP_REMOVE:
- return "SSH_FXP_REMOVE"
- case ssh_FXP_MKDIR:
- return "SSH_FXP_MKDIR"
- case ssh_FXP_RMDIR:
- return "SSH_FXP_RMDIR"
- case ssh_FXP_REALPATH:
- return "SSH_FXP_REALPATH"
- case ssh_FXP_STAT:
- return "SSH_FXP_STAT"
- case ssh_FXP_RENAME:
- return "SSH_FXP_RENAME"
- case ssh_FXP_READLINK:
- return "SSH_FXP_READLINK"
- case ssh_FXP_SYMLINK:
- return "SSH_FXP_SYMLINK"
- case ssh_FXP_STATUS:
- return "SSH_FXP_STATUS"
- case ssh_FXP_HANDLE:
- return "SSH_FXP_HANDLE"
- case ssh_FXP_DATA:
- return "SSH_FXP_DATA"
- case ssh_FXP_NAME:
- return "SSH_FXP_NAME"
- case ssh_FXP_ATTRS:
- return "SSH_FXP_ATTRS"
- case ssh_FXP_EXTENDED:
- return "SSH_FXP_EXTENDED"
- case ssh_FXP_EXTENDED_REPLY:
- return "SSH_FXP_EXTENDED_REPLY"
- default:
- return "unknown"
- }
-}
-
-type fx uint8
-
-func (f fx) String() string {
- switch f {
- case ssh_FX_OK:
- return "SSH_FX_OK"
- case ssh_FX_EOF:
- return "SSH_FX_EOF"
- case ssh_FX_NO_SUCH_FILE:
- return "SSH_FX_NO_SUCH_FILE"
- case ssh_FX_PERMISSION_DENIED:
- return "SSH_FX_PERMISSION_DENIED"
- case ssh_FX_FAILURE:
- return "SSH_FX_FAILURE"
- case ssh_FX_BAD_MESSAGE:
- return "SSH_FX_BAD_MESSAGE"
- case ssh_FX_NO_CONNECTION:
- return "SSH_FX_NO_CONNECTION"
- case ssh_FX_CONNECTION_LOST:
- return "SSH_FX_CONNECTION_LOST"
- case ssh_FX_OP_UNSUPPORTED:
- return "SSH_FX_OP_UNSUPPORTED"
- default:
- return "unknown"
- }
-}
-
-type unexpectedPacketErr struct {
- want, got uint8
-}
-
-func (u *unexpectedPacketErr) Error() string {
- return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got))
-}
-
-func unimplementedPacketErr(u uint8) error {
- return errors.Errorf("sftp: unimplemented packet type: got %v", fxp(u))
-}
-
-type unexpectedIDErr struct{ want, got uint32 }
-
-func (u *unexpectedIDErr) Error() string {
- return fmt.Sprintf("sftp: unexpected id: want %v, got %v", u.want, u.got)
-}
-
-func unimplementedSeekWhence(whence int) error {
- return errors.Errorf("sftp: unimplemented seek whence %v", whence)
-}
-
-func unexpectedCount(want, got uint32) error {
- return errors.Errorf("sftp: unexpected count: want %v, got %v", want, got)
-}
-
-type unexpectedVersionErr struct{ want, got uint32 }
-
-func (u *unexpectedVersionErr) Error() string {
- return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got)
-}
-
-// A StatusError is returned when an SFTP operation fails, and provides
-// additional information about the failure.
-type StatusError struct {
- Code uint32
- msg, lang string
-}
-
-func (s *StatusError) Error() string { return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) }
diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md
index db20817..d9e3327 100644
--- a/vendor/github.com/spf13/afero/README.md
+++ b/vendor/github.com/spf13/afero/README.md
@@ -198,17 +198,17 @@ independent, with no test relying on the state left by an earlier test.
Then in my tests I would initialize a new MemMapFs for each test:
```go
func TestExist(t *testing.T) {
- appFS = afero.NewMemMapFs()
+ appFS := afero.NewMemMapFs()
// create test files and directories
- appFS.MkdirAll("src/a", 0755))
+ appFS.MkdirAll("src/a", 0755)
afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
- _, err := appFS.Stat("src/c")
+ name := "src/c"
+ _, err := appFS.Stat(name)
if os.IsNotExist(err) {
- t.Errorf("file \"%s\" does not exist.\n", name)
+ t.Errorf("file \"%s\" does not exist.\n", name)
}
}
-
```
# Available Backends
diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go
index e3df3f4..f5b5e12 100644
--- a/vendor/github.com/spf13/afero/afero.go
+++ b/vendor/github.com/spf13/afero/afero.go
@@ -77,7 +77,7 @@ type Fs interface {
// happens.
Remove(name string) error
- // RemoveAll removes a directory path and all any children it contains. It
+ // RemoveAll removes a directory path and any children it contains. It
// does not fail if the path does not exist (return nil).
RemoveAll(path string) error
diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go
index d742425..e54a4f8 100644
--- a/vendor/github.com/spf13/afero/cacheOnReadFs.go
+++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -32,9 +32,8 @@ func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
type cacheState int
const (
- cacheUnknown cacheState = iota
// not present in the overlay, unknown if it exists in the base:
- cacheMiss
+ cacheMiss cacheState = iota
// present in the overlay and in base, base file is newer:
cacheStale
// present in the overlay - with cache time == 0 it may exist in the base,
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
index 2e259b8..494ba54 100644
--- a/vendor/github.com/spf13/afero/memmap.go
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -35,8 +35,6 @@ func NewMemMapFs() Fs {
return &MemMapFs{}
}
-var memfsInit sync.Once
-
func (m *MemMapFs) getData() map[string]*mem.FileData {
m.init.Do(func() {
m.data = make(map[string]*mem.FileData)
@@ -130,13 +128,16 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
m.mu.RUnlock()
if ok {
return &os.PathError{"mkdir", name, ErrFileExists}
- } else {
- m.mu.Lock()
- item := mem.CreateDir(name)
- m.getData()[name] = item
- m.registerWithParent(item)
- m.mu.Unlock()
}
+
+ m.mu.Lock()
+ item := mem.CreateDir(name)
+ m.getData()[name] = item
+ m.registerWithParent(item)
+ m.mu.Unlock()
+
+ m.Chmod(name, perm)
+
return nil
}
@@ -205,9 +206,11 @@ func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
}
func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+ chmod := false
file, err := m.openWrite(name)
if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
file, err = m.Create(name)
+ chmod = true
}
if err != nil {
return nil, err
@@ -229,6 +232,9 @@ func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, erro
return nil, err
}
}
+ if chmod {
+ m.Chmod(name, perm)
+ }
return file, nil
}
@@ -309,7 +315,10 @@ func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
name = normalizePath(name)
+
+ m.mu.RLock()
f, ok := m.getData()[name]
+ m.mu.RUnlock()
if !ok {
return &os.PathError{"chmod", name, ErrFileNotFound}
}
@@ -323,7 +332,10 @@ func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
name = normalizePath(name)
+
+ m.mu.RLock()
f, ok := m.getData()[name]
+ m.mu.RUnlock()
if !ok {
return &os.PathError{"chtimes", name, ErrFileNotFound}
}
@@ -342,8 +354,8 @@ func (m *MemMapFs) List() {
}
}
-func debugMemMapList(fs Fs) {
- if x, ok := fs.(*MemMapFs); ok {
- x.List()
- }
-}
+// func debugMemMapList(fs Fs) {
+// if x, ok := fs.(*MemMapFs); ok {
+// x.List()
+// }
+// }
diff --git a/vendor/github.com/spf13/afero/sftp.go b/vendor/github.com/spf13/afero/sftp.go
deleted file mode 100644
index a095a54..0000000
--- a/vendor/github.com/spf13/afero/sftp.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright © 2015 Jerry Jacobs <jerry.jacobs@xor-gate.org>.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package afero
-
-import (
- "os"
- "time"
-
- "github.com/spf13/afero/sftp"
-
- "github.com/pkg/sftp"
-)
-
-// SftpFs is a Fs implementation that uses functions provided by the sftp package.
-//
-// For details in any method, check the documentation of the sftp package
-// (github.com/pkg/sftp).
-type SftpFs struct{
- SftpClient *sftp.Client
-}
-
-func (s SftpFs) Name() string { return "SftpFs" }
-
-func (s SftpFs) Create(name string) (File, error) {
- f, err := sftpfs.FileCreate(s.SftpClient, name)
- return f, err
-}
-
-func (s SftpFs) Mkdir(name string, perm os.FileMode) error {
- err := s.SftpClient.Mkdir(name)
- if err != nil {
- return err
- }
- return s.SftpClient.Chmod(name, perm)
-}
-
-func (s SftpFs) MkdirAll(path string, perm os.FileMode) error {
- // Fast path: if we can tell whether path is a directory or file, stop with success or error.
- dir, err := s.Stat(path)
- if err == nil {
- if dir.IsDir() {
- return nil
- }
- return err
- }
-
- // Slow path: make sure parent exists and then call Mkdir for path.
- i := len(path)
- for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
- i--
- }
-
- j := i
- for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
- j--
- }
-
- if j > 1 {
- // Create parent
- err = s.MkdirAll(path[0:j-1], perm)
- if err != nil {
- return err
- }
- }
-
- // Parent now exists; invoke Mkdir and use its result.
- err = s.Mkdir(path, perm)
- if err != nil {
- // Handle arguments like "foo/." by
- // double-checking that directory doesn't exist.
- dir, err1 := s.Lstat(path)
- if err1 == nil && dir.IsDir() {
- return nil
- }
- return err
- }
- return nil
-}
-
-func (s SftpFs) Open(name string) (File, error) {
- f, err := sftpfs.FileOpen(s.SftpClient, name)
- return f, err
-}
-
-func (s SftpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
- return nil,nil
-}
-
-func (s SftpFs) Remove(name string) error {
- return s.SftpClient.Remove(name)
-}
-
-func (s SftpFs) RemoveAll(path string) error {
- // TODO have a look at os.RemoveAll
- // https://github.com/golang/go/blob/master/src/os/path.go#L66
- return nil
-}
-
-func (s SftpFs) Rename(oldname, newname string) error {
- return s.SftpClient.Rename(oldname, newname)
-}
-
-func (s SftpFs) Stat(name string) (os.FileInfo, error) {
- return s.SftpClient.Stat(name)
-}
-
-func (s SftpFs) Lstat(p string) (os.FileInfo, error) {
- return s.SftpClient.Lstat(p)
-}
-
-func (s SftpFs) Chmod(name string, mode os.FileMode) error {
- return s.SftpClient.Chmod(name, mode)
-}
-
-func (s SftpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
- return s.SftpClient.Chtimes(name, atime, mtime)
-}
diff --git a/vendor/github.com/spf13/afero/sftp/file.go b/vendor/github.com/spf13/afero/sftp/file.go
deleted file mode 100644
index bab4abc..0000000
--- a/vendor/github.com/spf13/afero/sftp/file.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright © 2015 Jerry Jacobs <jerry.jacobs@xor-gate.org>.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sftpfs
-
-import (
- "os"
- "github.com/pkg/sftp"
-)
-
-type File struct {
- fd *sftp.File
-}
-
-func FileOpen(s *sftp.Client, name string) (*File, error) {
- fd, err := s.Open(name)
- if err != nil {
- return &File{}, err
- }
- return &File{fd: fd}, nil
-}
-
-func FileCreate(s *sftp.Client, name string) (*File, error) {
- fd, err := s.Create(name)
- if err != nil {
- return &File{}, err
- }
- return &File{fd: fd}, nil
-}
-
-func (f *File) Close() error {
- return f.fd.Close()
-}
-
-func (f *File) Name() string {
- return f.fd.Name()
-}
-
-func (f *File) Stat() (os.FileInfo, error) {
- return f.fd.Stat()
-}
-
-func (f *File) Sync() error {
- return nil
-}
-
-func (f *File) Truncate(size int64) error {
- return f.fd.Truncate(size)
-}
-
-func (f *File) Read(b []byte) (n int, err error) {
- return f.fd.Read(b)
-}
-
-// TODO
-func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
- return 0,nil
-}
-
-// TODO
-func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
- return nil,nil
-}
-
-// TODO
-func (f *File) Readdirnames(n int) (names []string, err error) {
- return nil,nil
-}
-
-func (f *File) Seek(offset int64, whence int) (int64, error) {
- return f.fd.Seek(offset, whence)
-}
-
-func (f *File) Write(b []byte) (n int, err error) {
- return f.fd.Write(b)
-}
-
-// TODO
-func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
- return 0,nil
-}
-
-func (f *File) WriteString(s string) (ret int, err error) {
- return f.fd.Write([]byte(s))
-}
diff --git a/vendor/github.com/spf13/afero/sftp_test_go b/vendor/github.com/spf13/afero/sftp_test_go
deleted file mode 100644
index bb00535..0000000
--- a/vendor/github.com/spf13/afero/sftp_test_go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright © 2015 Jerry Jacobs <jerry.jacobs@xor-gate.org>.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package afero
-
-import (
- "testing"
- "os"
- "log"
- "fmt"
- "net"
- "flag"
- "time"
- "io/ioutil"
- "crypto/rsa"
- _rand "crypto/rand"
- "encoding/pem"
- "crypto/x509"
-
- "golang.org/x/crypto/ssh"
- "github.com/pkg/sftp"
-)
-
-type SftpFsContext struct {
- sshc *ssh.Client
- sshcfg *ssh.ClientConfig
- sftpc *sftp.Client
-}
-
-// TODO we only connect with hardcoded user+pass for now
-// it should be possible to use $HOME/.ssh/id_rsa to login into the stub sftp server
-func SftpConnect(user, password, host string) (*SftpFsContext, error) {
-/*
- pemBytes, err := ioutil.ReadFile(os.Getenv("HOME") + "/.ssh/id_rsa")
- if err != nil {
- return nil,err
- }
-
- signer, err := ssh.ParsePrivateKey(pemBytes)
- if err != nil {
- return nil,err
- }
-
- sshcfg := &ssh.ClientConfig{
- User: user,
- Auth: []ssh.AuthMethod{
- ssh.Password(password),
- ssh.PublicKeys(signer),
- },
- }
-*/
-
- sshcfg := &ssh.ClientConfig{
- User: user,
- Auth: []ssh.AuthMethod{
- ssh.Password(password),
- },
- }
-
- sshc, err := ssh.Dial("tcp", host, sshcfg)
- if err != nil {
- return nil,err
- }
-
- sftpc, err := sftp.NewClient(sshc)
- if err != nil {
- return nil,err
- }
-
- ctx := &SftpFsContext{
- sshc: sshc,
- sshcfg: sshcfg,
- sftpc: sftpc,
- }
-
- return ctx,nil
-}
-
-func (ctx *SftpFsContext) Disconnect() error {
- ctx.sftpc.Close()
- ctx.sshc.Close()
- return nil
-}
-
-// TODO for such a weird reason rootpath is "." when writing "file1" with afero sftp backend
-func RunSftpServer(rootpath string) {
- var (
- readOnly bool
- debugLevelStr string
- debugLevel int
- debugStderr bool
- rootDir string
- )
-
- flag.BoolVar(&readOnly, "R", false, "read-only server")
- flag.BoolVar(&debugStderr, "e", true, "debug to stderr")
- flag.StringVar(&debugLevelStr, "l", "none", "debug level")
- flag.StringVar(&rootDir, "root", rootpath, "root directory")
- flag.Parse()
-
- debugStream := ioutil.Discard
- if debugStderr {
- debugStream = os.Stderr
- debugLevel = 1
- }
-
- // An SSH server is represented by a ServerConfig, which holds
- // certificate details and handles authentication of ServerConns.
- config := &ssh.ServerConfig{
- PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
- // Should use constant-time compare (or better, salt+hash) in
- // a production setting.
- fmt.Fprintf(debugStream, "Login: %s\n", c.User())
- if c.User() == "test" && string(pass) == "test" {
- return nil, nil
- }
- return nil, fmt.Errorf("password rejected for %q", c.User())
- },
- }
-
- privateBytes, err := ioutil.ReadFile("./test/id_rsa")
- if err != nil {
- log.Fatal("Failed to load private key", err)
- }
-
- private, err := ssh.ParsePrivateKey(privateBytes)
- if err != nil {
- log.Fatal("Failed to parse private key", err)
- }
-
- config.AddHostKey(private)
-
- // Once a ServerConfig has been configured, connections can be
- // accepted.
- listener, err := net.Listen("tcp", "0.0.0.0:2022")
- if err != nil {
- log.Fatal("failed to listen for connection", err)
- }
- fmt.Printf("Listening on %v\n", listener.Addr())
-
- nConn, err := listener.Accept()
- if err != nil {
- log.Fatal("failed to accept incoming connection", err)
- }
-
- // Before use, a handshake must be performed on the incoming
- // net.Conn.
- _, chans, reqs, err := ssh.NewServerConn(nConn, config)
- if err != nil {
- log.Fatal("failed to handshake", err)
- }
- fmt.Fprintf(debugStream, "SSH server established\n")
-
- // The incoming Request channel must be serviced.
- go ssh.DiscardRequests(reqs)
-
- // Service the incoming Channel channel.
- for newChannel := range chans {
- // Channels have a type, depending on the application level
- // protocol intended. In the case of an SFTP session, this is "subsystem"
- // with a payload string of "<length=4>sftp"
- fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType())
- if newChannel.ChannelType() != "session" {
- newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
- fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType())
- continue
- }
- channel, requests, err := newChannel.Accept()
- if err != nil {
- log.Fatal("could not accept channel.", err)
- }
- fmt.Fprintf(debugStream, "Channel accepted\n")
-
- // Sessions have out-of-band requests such as "shell",
- // "pty-req" and "env". Here we handle only the
- // "subsystem" request.
- go func(in <-chan *ssh.Request) {
- for req := range in {
- fmt.Fprintf(debugStream, "Request: %v\n", req.Type)
- ok := false
- switch req.Type {
- case "subsystem":
- fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:])
- if string(req.Payload[4:]) == "sftp" {
- ok = true
- }
- }
- fmt.Fprintf(debugStream, " - accepted: %v\n", ok)
- req.Reply(ok, nil)
- }
- }(requests)
-
- server, err := sftp.NewServer(channel, channel, debugStream, debugLevel, readOnly, rootpath)
- if err != nil {
- log.Fatal(err)
- }
- if err := server.Serve(); err != nil {
- log.Fatal("sftp server completed with error:", err)
- }
- }
-}
-
-// MakeSSHKeyPair make a pair of public and private keys for SSH access.
-// Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file.
-// Private Key generated is PEM encoded
-func MakeSSHKeyPair(bits int, pubKeyPath, privateKeyPath string) error {
- privateKey, err := rsa.GenerateKey(_rand.Reader, bits)
- if err != nil {
- return err
- }
-
- // generate and write private key as PEM
- privateKeyFile, err := os.Create(privateKeyPath)
- defer privateKeyFile.Close()
- if err != nil {
- return err
- }
-
- privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
- if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil {
- return err
- }
-
- // generate and write public key
- pub, err := ssh.NewPublicKey(&privateKey.PublicKey)
- if err != nil {
- return err
- }
-
- return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0655)
-}
-
-func TestSftpCreate(t *testing.T) {
- os.Mkdir("./test", 0777)
- MakeSSHKeyPair(1024, "./test/id_rsa.pub", "./test/id_rsa")
-
- go RunSftpServer("./test/")
- time.Sleep(5 * time.Second)
-
- ctx, err := SftpConnect("test", "test", "localhost:2022")
- if err != nil {
- t.Fatal(err)
- }
- defer ctx.Disconnect()
-
- var AppFs Fs = SftpFs{
- SftpClient: ctx.sftpc,
- }
-
- AppFs.MkdirAll("test/dir1/dir2/dir3", os.FileMode(0777))
- AppFs.Mkdir("test/foo", os.FileMode(0000))
- AppFs.Chmod("test/foo", os.FileMode(0700))
- AppFs.Mkdir("test/bar", os.FileMode(0777))
-
- file, err := AppFs.Create("file1")
- if err != nil {
- t.Error(err)
- }
- defer file.Close()
-
- file.Write([]byte("hello\t"))
- file.WriteString("world!\n")
-
- f1, err := AppFs.Open("file1")
- if err != nil {
- log.Fatalf("open: %v", err)
- }
- defer f1.Close()
-
- b := make([]byte, 100)
-
- _, err = f1.Read(b)
- fmt.Println(string(b))
-
- // TODO check here if "hello\tworld\n" is in buffer b
-}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
index 23f59a1..1aaa1ff 100644
--- a/vendor/github.com/spf13/cast/caste.go
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -18,15 +18,21 @@ import (
func ToTimeE(i interface{}) (tim time.Time, err error) {
i = indirect(i)
- switch s := i.(type) {
+ switch v := i.(type) {
case time.Time:
- return s, nil
+ return v, nil
case string:
- d, e := StringToDate(s)
+ d, e := StringToDate(v)
if e == nil {
return d, nil
}
return time.Time{}, fmt.Errorf("Could not parse Date/Time format: %v\n", e)
+ case int:
+ return time.Unix(int64(v), 0), nil
+ case int32:
+ return time.Unix(int64(v), 0), nil
+ case int64:
+ return time.Unix(v, 0), nil
default:
return time.Time{}, fmt.Errorf("Unable to Cast %#v to Time\n", i)
}
@@ -513,6 +519,7 @@ func StringToDate(s string) (time.Time, error) {
"02 Jan 2006",
"2006-01-02 15:04:05 -07:00",
"2006-01-02 15:04:05 -0700",
+ "2006-01-02 15:04:05",
})
}
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
index 08ad945..93d6d77 100644
--- a/vendor/github.com/spf13/pflag/README.md
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -106,9 +106,9 @@ that give one-letter shorthands for flags. You can use these by appending
var ip = flag.IntP("flagname", "f", 1234, "help message")
var flagvar bool
func init() {
- flag.BoolVarP("boolname", "b", true, "help message")
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
}
-flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+flag.VarP(&flagVal, "varname", "v", "help message")
```
Shorthand letters can be used with single dashes on the command line.
diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go
index 4ed2d40..2603c78 100644
--- a/vendor/github.com/spf13/viper/viper.go
+++ b/vendor/github.com/spf13/viper/viper.go
@@ -1102,15 +1102,15 @@ func (v *Viper) ReadInConfig() error {
func MergeInConfig() error { return v.MergeInConfig() }
func (v *Viper) MergeInConfig() error {
jww.INFO.Println("Attempting to merge in config file")
- if !stringInSlice(v.getConfigType(), SupportedExts) {
- return UnsupportedConfigError(v.getConfigType())
- }
-
filename, err := v.getConfigFile()
if err != nil {
return err
}
+ if !stringInSlice(v.getConfigType(), SupportedExts) {
+ return UnsupportedConfigError(v.getConfigType())
+ }
+
file, err := afero.ReadFile(v.fs, filename)
if err != nil {
return err
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 29b71d1..aa4311f 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -43,6 +43,9 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
// a.Equal(123, 123, "123 and 123 should be equal")
//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Equal(a.t, expected, actual, msgAndArgs...)
}
@@ -262,6 +265,9 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index 835084f..d1552e5 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -18,10 +18,6 @@ import (
"github.com/pmezard/go-difflib/difflib"
)
-func init() {
- spew.Config.SortKeys = true
-}
-
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
Errorf(format string, args ...interface{})
@@ -157,7 +153,7 @@ func getWhitespaceString() string {
parts := strings.Split(file, "/")
file = parts[len(parts)-1]
- return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line)))
+ return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line)))
}
@@ -174,22 +170,18 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
return ""
}
-// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's
-// test printing (see inner comment for specifics)
-func indentMessageLines(message string, tabs int) string {
+// Aligns the provided message so that all lines after the first line start at the same location as the first line.
+// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
+// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// basis on which the alignment occurs).
+func indentMessageLines(message string, longestLabelLen int) string {
outBuf := new(bytes.Buffer)
for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+ // no need to align first line because it starts at the correct location (after the label)
if i != 0 {
- outBuf.WriteRune('\n')
- }
- for ii := 0; ii < tabs; ii++ {
- outBuf.WriteRune('\t')
- // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter
- // by 1 prematurely.
- if ii == 0 && i > 0 {
- ii++
- }
+ // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
+ outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen +1) + "\t")
}
outBuf.WriteString(scanner.Text())
}
@@ -221,29 +213,49 @@ func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool
// Fail reports a failure through
func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ content := []labeledContent{
+ {"Error Trace", strings.Join(CallerInfo(), "\n\r\t\t\t")},
+ {"Error", failureMessage},
+ }
message := messageFromMsgAndArgs(msgAndArgs...)
-
- errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t")
if len(message) > 0 {
- t.Errorf("\r%s\r\tError Trace:\t%s\n"+
- "\r\tError:%s\n"+
- "\r\tMessages:\t%s\n\r",
- getWhitespaceString(),
- errorTrace,
- indentMessageLines(failureMessage, 2),
- message)
- } else {
- t.Errorf("\r%s\r\tError Trace:\t%s\n"+
- "\r\tError:%s\n\r",
- getWhitespaceString(),
- errorTrace,
- indentMessageLines(failureMessage, 2))
+ content = append(content, labeledContent{"Messages", message})
}
+ t.Errorf("\r" + getWhitespaceString() + labeledOutput(content...))
+
return false
}
+type labeledContent struct {
+ label string
+ content string
+}
+
+// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
+//
+// \r\t{{label}}:{{align_spaces}}\t{{content}}\n
+//
+// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
+// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
+// alignment is achieved, "\t{{content}}\n" is added for the output.
+//
+// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
+func labeledOutput(content ...labeledContent) string {
+ longestLabel := 0
+ for _, v := range content {
+ if len(v.label) > longestLabel {
+ longestLabel = len(v.label)
+ }
+ }
+ var output string
+ for _, v := range content {
+ output += "\r\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
+ }
+ return output
+}
+
// Implements asserts that an object is implemented by the specified interface.
//
// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
@@ -274,6 +286,9 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs
// assert.Equal(t, 123, 123, "123 and 123 should be equal")
//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if !ObjectsAreEqual(expected, actual) {
@@ -295,31 +310,15 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
// with the type name, and the value will be enclosed in parenthesis similar
// to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
- aType := reflect.TypeOf(expected)
- bType := reflect.TypeOf(actual)
-
- if aType != bType && isNumericType(aType) && isNumericType(bType) {
- return fmt.Sprintf("%v(%#v)", aType, expected),
- fmt.Sprintf("%v(%#v)", bType, actual)
+ if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+ return fmt.Sprintf("%T(%#v)", expected, expected),
+ fmt.Sprintf("%T(%#v)", actual, actual)
}
return fmt.Sprintf("%#v", expected),
fmt.Sprintf("%#v", actual)
}
-func isNumericType(t reflect.Type) bool {
- switch t.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return true
- case reflect.Float32, reflect.Float64:
- return true
- }
-
- return false
-}
-
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
@@ -560,6 +559,9 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
//
// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if ObjectsAreEqual(expected, actual) {
@@ -1043,8 +1045,8 @@ func diff(expected interface{}, actual interface{}) string {
return ""
}
- e := spew.Sdump(expected)
- a := spew.Sdump(actual)
+ e := spewConfig.Sdump(expected)
+ a := spewConfig.Sdump(actual)
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
A: difflib.SplitLines(e),
@@ -1058,3 +1060,10 @@ func diff(expected interface{}, actual interface{}) string {
return "\n\nDiff:\n" + diff
}
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+}
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index f2fc9b6..21f7d0d 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -10,10 +10,13 @@ import (
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
+ "crypto/md5"
"crypto/rsa"
+ "crypto/sha256"
"crypto/x509"
"encoding/asn1"
"encoding/base64"
+ "encoding/hex"
"encoding/pem"
"errors"
"fmt"
@@ -878,3 +881,25 @@ func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) {
copy(pk, pk1.Priv)
return &pk, nil
}
+
+// FingerprintLegacyMD5 returns the user presentation of the key's
+// fingerprint as described by RFC 4716 section 4.
+func FingerprintLegacyMD5(pubKey PublicKey) string {
+ md5sum := md5.Sum(pubKey.Marshal())
+ hexarray := make([]string, len(md5sum))
+ for i, c := range md5sum {
+ hexarray[i] = hex.EncodeToString([]byte{c})
+ }
+ return strings.Join(hexarray, ":")
+}
+
+// FingerprintSHA256 returns the user presentation of the key's
+// fingerprint as unpadded base64 encoded sha256 hash.
+// This format was introduced from OpenSSH 6.8.
+// https://www.openssh.com/txt/release-6.8
+// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding)
+func FingerprintSHA256(pubKey PublicKey) string {
+ sha256sum := sha256.Sum256(pubKey.Marshal())
+ hash := base64.RawStdEncoding.EncodeToString(sha256sum[:])
+ return "SHA256:" + hash
+}
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
index 37df1b3..9037470 100644
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -242,7 +242,7 @@ func checkSourceAddress(addr net.Addr, sourceAddr string) error {
}
if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
- if bytes.Equal(allowedIP, tcpAddr.IP) {
+ if allowedIP.Equal(tcpAddr.IP) {
return nil
}
} else {
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
index d1ed420..134654c 100644
--- a/vendor/golang.org/x/net/context/context.go
+++ b/vendor/golang.org/x/net/context/context.go
@@ -36,6 +36,103 @@
// Contexts.
package context // import "golang.org/x/net/context"
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
@@ -52,3 +149,8 @@ func Background() Context {
func TODO() Context {
return todo
}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/golang.org/x/net/context/go18.go b/vendor/golang.org/x/net/context/go18.go
deleted file mode 100644
index 35360c5..0000000
--- a/vendor/golang.org/x/net/context/go18.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.8
-
-package context
-
-import (
- "context" // standard library's context, as of Go 1.7
-)
-
-// A Context carries a deadline, a cancelation signal, and other values across
-// API boundaries.
-//
-// Context's methods may be called by multiple goroutines simultaneously.
-type Context => context.Context
-
-// A CancelFunc tells an operation to abandon its work.
-// A CancelFunc does not wait for the work to stop.
-// After the first call, subsequent calls to a CancelFunc do nothing.
-type CancelFunc => context.CancelFunc
diff --git a/vendor/golang.org/x/net/context/pre_go18.go b/vendor/golang.org/x/net/context/pre_go18.go
deleted file mode 100644
index 41bd8ba..0000000
--- a/vendor/golang.org/x/net/context/pre_go18.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.8
-
-package context
-
-import "time"
-
-// A CancelFunc tells an operation to abandon its work.
-// A CancelFunc does not wait for the work to stop.
-// After the first call, subsequent calls to a CancelFunc do nothing.
-type CancelFunc func()
-
-// A Context carries a deadline, a cancelation signal, and other values across
-// API boundaries.
-//
-// Context's methods may be called by multiple goroutines simultaneously.
-type Context interface {
- // Deadline returns the time when work done on behalf of this context
- // should be canceled. Deadline returns ok==false when no deadline is
- // set. Successive calls to Deadline return the same results.
- Deadline() (deadline time.Time, ok bool)
-
- // Done returns a channel that's closed when work done on behalf of this
- // context should be canceled. Done may return nil if this context can
- // never be canceled. Successive calls to Done return the same value.
- //
- // WithCancel arranges for Done to be closed when cancel is called;
- // WithDeadline arranges for Done to be closed when the deadline
- // expires; WithTimeout arranges for Done to be closed when the timeout
- // elapses.
- //
- // Done is provided for use in select statements:
- //
- // // Stream generates values with DoSomething and sends them to out
- // // until DoSomething returns an error or ctx.Done is closed.
- // func Stream(ctx context.Context, out chan<- Value) error {
- // for {
- // v, err := DoSomething(ctx)
- // if err != nil {
- // return err
- // }
- // select {
- // case <-ctx.Done():
- // return ctx.Err()
- // case out <- v:
- // }
- // }
- // }
- //
- // See http://blog.golang.org/pipelines for more examples of how to use
- // a Done channel for cancelation.
- Done() <-chan struct{}
-
- // Err returns a non-nil error value after Done is closed. Err returns
- // Canceled if the context was canceled or DeadlineExceeded if the
- // context's deadline passed. No other values for Err are defined.
- // After Done is closed, successive calls to Err return the same value.
- Err() error
-
- // Value returns the value associated with this context for key, or nil
- // if no value is associated with key. Successive calls to Value with
- // the same key returns the same result.
- //
- // Use context values only for request-scoped data that transits
- // processes and API boundaries, not for passing optional parameters to
- // functions.
- //
- // A key identifies a specific value in a Context. Functions that wish
- // to store values in Context typically allocate a key in a global
- // variable then use that key as the argument to context.WithValue and
- // Context.Value. A key can be any type that supports equality;
- // packages should define keys as an unexported type to avoid
- // collisions.
- //
- // Packages that define a Context key should provide type-safe accessors
- // for the values stores using that key:
- //
- // // Package user defines a User type that's stored in Contexts.
- // package user
- //
- // import "golang.org/x/net/context"
- //
- // // User is the type of value stored in the Contexts.
- // type User struct {...}
- //
- // // key is an unexported type for keys defined in this package.
- // // This prevents collisions with keys defined in other packages.
- // type key int
- //
- // // userKey is the key for user.User values in Contexts. It is
- // // unexported; clients use user.NewContext and user.FromContext
- // // instead of using this key directly.
- // var userKey key = 0
- //
- // // NewContext returns a new Context that carries value u.
- // func NewContext(ctx context.Context, u *User) context.Context {
- // return context.WithValue(ctx, userKey, u)
- // }
- //
- // // FromContext returns the User value stored in ctx, if any.
- // func FromContext(ctx context.Context) (*User, bool) {
- // u, ok := ctx.Value(userKey).(*User)
- // return u, ok
- // }
- Value(key interface{}) interface{}
-}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index b0c79b0..358833f 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -317,10 +317,12 @@ type Framer struct {
// non-Continuation or Continuation on a different stream is
// attempted to be written.
- logReads bool
+ logReads, logWrites bool
- debugFramer *Framer // only use for logging written writes
- debugFramerBuf *bytes.Buffer
+ debugFramer *Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+ debugReadLoggerf func(string, ...interface{})
+ debugWriteLoggerf func(string, ...interface{})
}
func (fr *Framer) maxHeaderListSize() uint32 {
@@ -355,7 +357,7 @@ func (f *Framer) endWrite() error {
byte(length>>16),
byte(length>>8),
byte(length))
- if logFrameWrites {
+ if f.logWrites {
f.logWrite()
}
@@ -378,10 +380,10 @@ func (f *Framer) logWrite() {
f.debugFramerBuf.Write(f.wbuf)
fr, err := f.debugFramer.ReadFrame()
if err != nil {
- log.Printf("http2: Framer %p: failed to decode just-written frame", f)
+ f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
return
}
- log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+ f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
}
func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
@@ -399,9 +401,12 @@ const (
// NewFramer returns a Framer that writes frames to w and reads them from r.
func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{
- w: w,
- r: r,
- logReads: logFrameReads,
+ w: w,
+ r: r,
+ logReads: logFrameReads,
+ logWrites: logFrameWrites,
+ debugReadLoggerf: log.Printf,
+ debugWriteLoggerf: log.Printf,
}
fr.getReadBuf = func(size uint32) []byte {
if cap(fr.readBuf) >= int(size) {
@@ -483,7 +488,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
return nil, err
}
if fr.logReads {
- log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+ fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
}
if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
return fr.readMetaFrame(f.(*HeadersFrame))
@@ -1419,8 +1424,8 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
hdec.SetEmitEnabled(true)
hdec.SetMaxStringLength(fr.maxHeaderStringLen())
hdec.SetEmitFunc(func(hf hpack.HeaderField) {
- if VerboseLogs && logFrameReads {
- log.Printf("http2: decoded hpack field %+v", hf)
+ if VerboseLogs && fr.logReads {
+ fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
}
if !httplex.ValidHeaderFieldValue(hf.Value) {
invalid = headerFieldValueError(hf.Value)
diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go
index e000203..633202c 100644
--- a/vendor/golang.org/x/net/http2/go18.go
+++ b/vendor/golang.org/x/net/http2/go18.go
@@ -8,6 +8,7 @@ package http2
import (
"crypto/tls"
+ "io"
"net/http"
)
@@ -35,3 +36,15 @@ func configureServer18(h1 *http.Server, h2 *Server) error {
}
return nil
}
+
+func shouldLogPanic(panicValue interface{}) bool {
+ return panicValue != nil && panicValue != http.ErrAbortHandler
+}
+
+func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
+ return req.GetBody
+}
+
+func reqBodyIsNoBody(body io.ReadCloser) bool {
+ return body == http.NoBody
+}
diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go
index c1fa591..efbf83c 100644
--- a/vendor/golang.org/x/net/http2/not_go18.go
+++ b/vendor/golang.org/x/net/http2/not_go18.go
@@ -6,9 +6,22 @@
package http2
-import "net/http"
+import (
+ "io"
+ "net/http"
+)
func configureServer18(h1 *http.Server, h2 *Server) error {
// No IdleTimeout to sync prior to Go 1.8.
return nil
}
+
+func shouldLogPanic(panicValue interface{}) bool {
+ return panicValue != nil
+}
+
+func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
+ return nil
+}
+
+func reqBodyIsNoBody(io.ReadCloser) bool { return false }
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 370e42e..0431ab0 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -129,10 +129,6 @@ func (s *Server) maxConcurrentStreams() uint32 {
return defaultMaxStreams
}
-// List of funcs for ConfigureServer to run. Both h1 and h2 are guaranteed
-// to be non-nil.
-var configServerFuncs []func(h1 *http.Server, h2 *Server) error
-
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -192,9 +188,6 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if !haveNPN {
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
}
- // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
- // to switch to "h2".
- s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
if s.TLSNextProto == nil {
s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
@@ -209,17 +202,9 @@ func ConfigureServer(s *http.Server, conf *Server) error {
})
}
s.TLSNextProto[NextProtoTLS] = protoHandler
- s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
return nil
}
-// h1ServerShutdownChan if non-nil provides a func to return a channel
-// that will be closed when the provided *http.Server wants to shut
-// down. This is initialized via an init func in net/http (via its
-// mangled name from x/tools/cmd/bundle). This is only used when http2
-// is bundled into std for now.
-var h1ServerShutdownChan func(*http.Server) <-chan struct{}
-
// ServeConnOpts are options for the Server.ServeConn method.
type ServeConnOpts struct {
// BaseConfig optionally sets the base configuration
@@ -401,8 +386,8 @@ type serverConn struct {
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
curClientStreams uint32 // number of open streams initiated by the client
curPushedStreams uint32 // number of open streams initiated by server push
- maxStreamID uint32 // max ever seen from client
- maxPushPromiseID uint32 // ID of the last push promise, or 0 if there have been no pushes
+ maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
+ maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
streams map[uint32]*stream
initialWindowSize int32
maxFrameSize int32
@@ -438,6 +423,11 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
return uint32(n + typicalHeaders*perFieldOverhead)
}
+func (sc *serverConn) curOpenStreams() uint32 {
+ sc.serveG.check()
+ return sc.curClientStreams + sc.curPushedStreams
+}
+
// stream represents a stream. This is the minimal metadata needed by
// the serve goroutine. Most of the actual stream state is owned by
// the http.Handler's goroutine in the responseWriter. Because the
@@ -463,8 +453,7 @@ type stream struct {
numTrailerValues int64
weight uint8
state streamState
- sentReset bool // only true once detached from streams map
- gotReset bool // only true once detacted from streams map
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
gotTrailerHeader bool // HEADER frame for trailers was seen
wroteHeaders bool // whether we wrote headers (not status 100)
reqBuf []byte // if non-nil, body pipe buffer to return later at EOF
@@ -492,8 +481,14 @@ func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
// a client sends a HEADERS frame on stream 7 without ever sending a
// frame on stream 5, then stream 5 transitions to the "closed"
// state when the first frame for stream 7 is sent or received."
- if streamID <= sc.maxStreamID {
- return stateClosed, nil
+ if streamID%2 == 1 {
+ if streamID <= sc.maxClientStreamID {
+ return stateClosed, nil
+ }
+ } else {
+ if streamID <= sc.maxPushPromiseID {
+ return stateClosed, nil
+ }
}
return stateIdle, nil
}
@@ -718,7 +713,7 @@ func (sc *serverConn) serve() {
}
var gracefulShutdownCh <-chan struct{}
- if sc.hs != nil && h1ServerShutdownChan != nil {
+ if sc.hs != nil {
gracefulShutdownCh = h1ServerShutdownChan(sc.hs)
}
@@ -751,7 +746,7 @@ func (sc *serverConn) serve() {
return
case <-gracefulShutdownCh:
gracefulShutdownCh = nil
- sc.goAwayIn(ErrCodeNo, 0)
+ sc.startGracefulShutdown()
case <-sc.shutdownTimerCh:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
@@ -762,7 +757,7 @@ func (sc *serverConn) serve() {
fn(loopNum)
}
- if sc.inGoAway && sc.curClientStreams == 0 && !sc.needToSendGoAway && !sc.writingFrame {
+ if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame {
return
}
}
@@ -878,8 +873,34 @@ func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
sc.serveG.check()
+ // If true, wr will not be written and wr.done will not be signaled.
var ignoreWrite bool
+ // We are not allowed to write frames on closed streams. RFC 7540 Section
+ // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
+ // a closed stream." Our server never sends PRIORITY, so that exception
+ // does not apply.
+ //
+ // The serverConn might close an open stream while the stream's handler
+ // is still running. For example, the server might close a stream when it
+ // receives bad data from the client. If this happens, the handler might
+ // attempt to write a frame after the stream has been closed (since the
+ // handler hasn't yet been notified of the close). In this case, we simply
+ // ignore the frame. The handler will notice that the stream is closed when
+ // it waits for the frame to be written.
+ //
+ // As an exception to this rule, we allow sending RST_STREAM after close.
+ // This allows us to immediately reject new streams without tracking any
+ // state for those streams (except for the queued RST_STREAM frame). This
+ // may result in duplicate RST_STREAMs in some cases, but the client should
+ // ignore those.
+ if wr.StreamID() != 0 {
+ _, isReset := wr.write.(StreamError)
+ if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
+ ignoreWrite = true
+ }
+ }
+
// Don't send a 100-continue response if we've already sent headers.
// See golang.org/issue/14030.
switch wr.write.(type) {
@@ -887,6 +908,11 @@ func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
wr.stream.wroteHeaders = true
case write100ContinueHeadersFrame:
if wr.stream.wroteHeaders {
+ // We do not need to notify wr.done because this frame is
+ // never written with wr.done != nil.
+ if wr.done != nil {
+ panic("wr.done != nil for write100ContinueHeadersFrame")
+ }
ignoreWrite = true
}
}
@@ -910,14 +936,15 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
if st != nil {
switch st.state {
case stateHalfClosedLocal:
- panic("internal error: attempt to send frame on half-closed-local stream")
- case stateClosed:
- if st.sentReset || st.gotReset {
- // Skip this frame.
- sc.scheduleFrameWrite()
- return
+ switch wr.write.(type) {
+ case StreamError, handlerPanicRST, writeWindowUpdate:
+ // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
+ // in this state. (We never send PRIORITY from the server, so that is not checked.)
+ default:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
}
- panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wr))
+ case stateClosed:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
}
}
if wpp, ok := wr.write.(*writePushPromise); ok {
@@ -925,9 +952,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
wpp.promisedID, err = wpp.allocatePromisedID()
if err != nil {
sc.writingFrameAsync = false
- if wr.done != nil {
- wr.done <- err
- }
+ wr.replyToWriter(err)
return
}
}
@@ -960,25 +985,9 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
sc.writingFrameAsync = false
wr := res.wr
- st := wr.stream
-
- closeStream := endsStream(wr.write)
-
- if _, ok := wr.write.(handlerPanicRST); ok {
- sc.closeStream(st, errHandlerPanicked)
- }
-
- // Reply (if requested) to the blocked ServeHTTP goroutine.
- if ch := wr.done; ch != nil {
- select {
- case ch <- res.err:
- default:
- panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
- }
- }
- wr.write = nil // prevent use (assume it's tainted after wr.done send)
- if closeStream {
+ if writeEndsStream(wr.write) {
+ st := wr.stream
if st == nil {
panic("internal error: expecting non-nil stream")
}
@@ -991,15 +1000,29 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// reading data (see possible TODO at top of
// this file), we go into closed state here
// anyway, after telling the peer we're
- // hanging up on them.
- st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
- errCancel := streamError(st.id, ErrCodeCancel)
- sc.resetStream(errCancel)
+ // hanging up on them. We'll transition to
+ // stateClosed after the RST_STREAM frame is
+ // written.
+ st.state = stateHalfClosedLocal
+ sc.resetStream(streamError(st.id, ErrCodeCancel))
case stateHalfClosedRemote:
sc.closeStream(st, errHandlerComplete)
}
+ } else {
+ switch v := wr.write.(type) {
+ case StreamError:
+ // st may be unknown if the RST_STREAM was generated to reject bad input.
+ if st, ok := sc.streams[v.StreamID]; ok {
+ sc.closeStream(st, v)
+ }
+ case handlerPanicRST:
+ sc.closeStream(wr.stream, errHandlerPanicked)
+ }
}
+ // Reply (if requested) to unblock the ServeHTTP goroutine.
+ wr.replyToWriter(res.err)
+
sc.scheduleFrameWrite()
}
@@ -1026,7 +1049,7 @@ func (sc *serverConn) scheduleFrameWrite() {
sc.needToSendGoAway = false
sc.startFrameWrite(FrameWriteRequest{
write: &writeGoAway{
- maxStreamID: sc.maxStreamID,
+ maxStreamID: sc.maxClientStreamID,
code: sc.goAwayCode,
},
})
@@ -1053,6 +1076,13 @@ func (sc *serverConn) scheduleFrameWrite() {
sc.inFrameScheduleLoop = false
}
+// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the
+// client we're gracefully shutting down. The connection isn't closed
+// until all current streams are done.
+func (sc *serverConn) startGracefulShutdown() {
+ sc.goAwayIn(ErrCodeNo, 0)
+}
+
func (sc *serverConn) goAway(code ErrCode) {
sc.serveG.check()
var forceCloseIn time.Duration
@@ -1089,8 +1119,7 @@ func (sc *serverConn) resetStream(se StreamError) {
sc.serveG.check()
sc.writeFrame(FrameWriteRequest{write: se})
if st, ok := sc.streams[se.StreamID]; ok {
- st.sentReset = true
- sc.closeStream(st, se)
+ st.resetQueued = true
}
}
@@ -1175,6 +1204,8 @@ func (sc *serverConn) processFrame(f Frame) error {
return sc.processResetStream(f)
case *PriorityFrame:
return sc.processPriority(f)
+ case *GoAwayFrame:
+ return sc.processGoAway(f)
case *PushPromiseFrame:
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
@@ -1200,7 +1231,7 @@ func (sc *serverConn) processPing(f *PingFrame) error {
// PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
}
- if sc.inGoAway {
+ if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
return nil
}
sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
@@ -1209,9 +1240,6 @@ func (sc *serverConn) processPing(f *PingFrame) error {
func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
sc.serveG.check()
- if sc.inGoAway {
- return nil
- }
switch {
case f.StreamID != 0: // stream-level flow control
state, st := sc.state(f.StreamID)
@@ -1244,9 +1272,6 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
sc.serveG.check()
- if sc.inGoAway {
- return nil
- }
state, st := sc.state(f.StreamID)
if state == stateIdle {
@@ -1258,7 +1283,6 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
return ConnectionError(ErrCodeProtocol)
}
if st != nil {
- st.gotReset = true
st.cancelCtx()
sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
}
@@ -1276,12 +1300,15 @@ func (sc *serverConn) closeStream(st *stream, err error) {
} else {
sc.curClientStreams--
}
- if sc.curClientStreams+sc.curPushedStreams == 0 {
- sc.setConnState(http.StateIdle)
- }
delete(sc.streams, st.id)
- if len(sc.streams) == 0 && sc.srv.IdleTimeout != 0 {
- sc.idleTimer.Reset(sc.srv.IdleTimeout)
+ if len(sc.streams) == 0 {
+ sc.setConnState(http.StateIdle)
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer.Reset(sc.srv.IdleTimeout)
+ }
+ if h1ServerKeepAlivesDisabled(sc.hs) {
+ sc.startGracefulShutdown()
+ }
}
if p := st.body; p != nil {
// Return any buffered unread bytes worth of conn-level flow control.
@@ -1306,9 +1333,6 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
}
return nil
}
- if sc.inGoAway {
- return nil
- }
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
}
@@ -1380,7 +1404,7 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
func (sc *serverConn) processData(f *DataFrame) error {
sc.serveG.check()
- if sc.inGoAway {
+ if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
return nil
}
data := f.Data()
@@ -1397,7 +1421,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// type PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
}
- if st == nil || state != stateOpen || st.gotTrailerHeader {
+ if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
// This includes sending a RST_STREAM if the stream is
// in stateHalfClosedLocal (which currently means that
// the http.Handler returned, so it's done reading &
@@ -1417,6 +1441,10 @@ func (sc *serverConn) processData(f *DataFrame) error {
sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+ if st != nil && st.resetQueued {
+ // Already have a stream error in flight. Don't send another.
+ return nil
+ }
return streamError(id, ErrCodeStreamClosed)
}
if st.body == nil {
@@ -1459,6 +1487,20 @@ func (sc *serverConn) processData(f *DataFrame) error {
return nil
}
+func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
+ sc.serveG.check()
+ if f.ErrCode != ErrCodeNo {
+ sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ } else {
+ sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ }
+ sc.startGracefulShutdown()
+ // http://tools.ietf.org/html/rfc7540#section-6.8
+ // We should not create any new streams, which means we should disable push.
+ sc.pushEnabled = false
+ return nil
+}
+
// isPushed reports whether the stream is server-initiated.
func (st *stream) isPushed() bool {
return st.id%2 == 0
@@ -1511,6 +1553,11 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// open, let it process its own HEADERS frame (trailers at this
// point, if it's valid).
if st := sc.streams[f.StreamID]; st != nil {
+ if st.resetQueued {
+ // We're sending RST_STREAM to close the stream, so don't bother
+ // processing this frame.
+ return nil
+ }
return st.processTrailerHeaders(f)
}
@@ -1519,10 +1566,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// endpoint has opened or reserved. [...] An endpoint that
// receives an unexpected stream identifier MUST respond with
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- if id <= sc.maxStreamID {
+ if id <= sc.maxClientStreamID {
return ConnectionError(ErrCodeProtocol)
}
- sc.maxStreamID = id
+ sc.maxClientStreamID = id
if sc.idleTimer != nil {
sc.idleTimer.Stop()
@@ -1673,7 +1720,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
} else {
sc.curClientStreams++
}
- if sc.curClientStreams+sc.curPushedStreams == 1 {
+ if sc.curOpenStreams() == 1 {
sc.setConnState(http.StateActive)
}
@@ -1858,15 +1905,17 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler
rw.rws.stream.cancelCtx()
if didPanic {
e := recover()
- // Same as net/http:
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
sc.writeFrameFromHandler(FrameWriteRequest{
write: handlerPanicRST{rw.rws.stream.id},
stream: rw.rws.stream,
})
- sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ // Same as net/http:
+ if shouldLogPanic(e) {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ }
return
}
rw.handlerDone()
@@ -2278,8 +2327,9 @@ func (w *responseWriter) CloseNotify() <-chan bool {
if ch == nil {
ch = make(chan bool, 1)
rws.closeNotifierCh = ch
+ cw := rws.stream.cw
go func() {
- rws.stream.cw.Wait() // wait for close
+ cw.Wait() // wait for close
ch <- true
}()
}
@@ -2432,7 +2482,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error {
}
for k := range opts.Header {
if strings.HasPrefix(k, ":") {
- return fmt.Errorf("promised request headers cannot include psuedo header %q", k)
+ return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
}
// These headers are meaningful only if the request has a body,
// but PUSH_PROMISE requests cannot have a body.
@@ -2525,6 +2575,12 @@ func (sc *serverConn) startPush(msg startPushRequest) {
// http://tools.ietf.org/html/rfc7540#section-5.1.1.
// Streams initiated by the server MUST use even-numbered identifiers.
+ // A server that is unable to establish a new stream identifier can send a GOAWAY
+ // frame so that the client is forced to open a new connection for new streams.
+ if sc.maxPushPromiseID+2 >= 1<<31 {
+ sc.startGracefulShutdown()
+ return 0, ErrPushLimitReached
+ }
sc.maxPushPromiseID += 2
promisedID := sc.maxPushPromiseID
@@ -2539,7 +2595,7 @@ func (sc *serverConn) startPush(msg startPushRequest) {
scheme: msg.url.Scheme,
authority: msg.url.Host,
path: msg.url.RequestURI(),
- header: msg.header,
+ header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
})
if err != nil {
// Should not happen, since we've already validated msg.url.
@@ -2646,3 +2702,42 @@ var badTrailer = map[string]bool{
"Transfer-Encoding": true,
"Www-Authenticate": true,
}
+
+// h1ServerShutdownChan returns a channel that will be closed when the
+// provided *http.Server wants to shut down.
+//
+// This is a somewhat hacky way to get at http1 innards. It works
+// when the http2 code is bundled into the net/http package in the
+// standard library. The alternatives ended up making the cmd/go tool
+// depend on http Servers. This is the lightest option for now.
+// This is tested via the TestServeShutdown* tests in net/http.
+func h1ServerShutdownChan(hs *http.Server) <-chan struct{} {
+ if fn := testh1ServerShutdownChan; fn != nil {
+ return fn(hs)
+ }
+ var x interface{} = hs
+ type I interface {
+ getDoneChan() <-chan struct{}
+ }
+ if hs, ok := x.(I); ok {
+ return hs.getDoneChan()
+ }
+ return nil
+}
+
+// optional test hook for h1ServerShutdownChan.
+var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{}
+
+// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
+// disabled. See comments on h1ServerShutdownChan above for why
+// the code is written this way.
+func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
+ var x interface{} = hs
+ type I interface {
+ doKeepAlives() bool
+ }
+ if hs, ok := x.(I); ok {
+ return !hs.doKeepAlives()
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 129c8e0..9f60c29 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -191,6 +191,7 @@ type clientStream struct {
ID uint32
resc chan resAndError
bufPipe pipe // buffered pipe with the flow-controlled response payload
+ startedWrite bool // started request body write; guarded by cc.mu
requestedGzip bool
on100 func() // optional code to run if get a 100 continue response
@@ -199,6 +200,7 @@ type clientStream struct {
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
readErr error // sticky read error; owned by transportResponseBody.Read
stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
+ didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu
peerReset chan struct{} // closed on peer reset
resetErr error // populated before peerReset is closed
@@ -226,15 +228,26 @@ func (cs *clientStream) awaitRequestCancel(req *http.Request) {
}
select {
case <-req.Cancel:
+ cs.cancelStream()
cs.bufPipe.CloseWithError(errRequestCanceled)
- cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
case <-ctx.Done():
+ cs.cancelStream()
cs.bufPipe.CloseWithError(ctx.Err())
- cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
case <-cs.done:
}
}
+func (cs *clientStream) cancelStream() {
+ cs.cc.mu.Lock()
+ didReset := cs.didReset
+ cs.didReset = true
+ cs.cc.mu.Unlock()
+
+ if !didReset {
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ }
+}
+
// checkResetOrDone reports any error sent in a RST_STREAM frame by the
// server, or errStreamClosed if the stream is complete.
func (cs *clientStream) checkResetOrDone() error {
@@ -302,6 +315,10 @@ func authorityAddr(scheme string, authority string) (addr string) {
if a, err := idna.ToASCII(host); err == nil {
host = a
}
+ // IPv6 address literal, without a port:
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ return host + ":" + port
+ }
return net.JoinHostPort(host, port)
}
@@ -320,8 +337,10 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
}
traceGotConn(req, cc)
res, err := cc.RoundTrip(req)
- if shouldRetryRequest(req, err) {
- continue
+ if err != nil {
+ if req, err = shouldRetryRequest(req, err); err == nil {
+ continue
+ }
}
if err != nil {
t.vlogf("RoundTrip failure: %v", err)
@@ -343,12 +362,41 @@ func (t *Transport) CloseIdleConnections() {
var (
errClientConnClosed = errors.New("http2: client conn is closed")
errClientConnUnusable = errors.New("http2: client conn not usable")
+
+ errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+ errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written")
)
-func shouldRetryRequest(req *http.Request, err error) bool {
- // TODO: retry GET requests (no bodies) more aggressively, if shutdown
- // before response.
- return err == errClientConnUnusable
+// shouldRetryRequest is called by RoundTrip when a request fails to get
+// response headers. It is always called with a non-nil error.
+// It returns either a request to retry (either the same request, or a
+// modified clone), or an error if the request can't be replayed.
+func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) {
+ switch err {
+ default:
+ return nil, err
+ case errClientConnUnusable, errClientConnGotGoAway:
+ return req, nil
+ case errClientConnGotGoAwayAfterSomeReqBody:
+ // If the Body is nil (or http.NoBody), it's safe to reuse
+ // this request and its Body.
+ if req.Body == nil || reqBodyIsNoBody(req.Body) {
+ return req, nil
+ }
+ // Otherwise we depend on the Request having its GetBody
+ // func defined.
+ getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
+ if getBody == nil {
+ return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error")
+ }
+ body, err := getBody()
+ if err != nil {
+ return nil, err
+ }
+ newReq := *req
+ newReq.Body = body
+ return &newReq, nil
+ }
}
func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
@@ -501,6 +549,15 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
if old != nil && old.ErrCode != ErrCodeNo {
cc.goAway.ErrCode = old.ErrCode
}
+ last := f.LastStreamID
+ for streamID, cs := range cc.streams {
+ if streamID > last {
+ select {
+ case cs.resc <- resAndError{err: errClientConnGotGoAway}:
+ default:
+ }
+ }
+ }
}
func (cc *ClientConn) CanTakeNewRequest() bool {
@@ -761,6 +818,13 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
cs.abortRequestBodyWrite(errStopReqBodyWrite)
}
if re.err != nil {
+ if re.err == errClientConnGotGoAway {
+ cc.mu.Lock()
+ if cs.startedWrite {
+ re.err = errClientConnGotGoAwayAfterSomeReqBody
+ }
+ cc.mu.Unlock()
+ }
cc.forgetStreamID(cs.ID)
return nil, re.err
}
@@ -1666,9 +1730,10 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc.bw.Flush()
cc.wmu.Unlock()
}
+ didReset := cs.didReset
cc.mu.Unlock()
- if len(data) > 0 {
+ if len(data) > 0 && !didReset {
if _, err := cs.bufPipe.Write(data); err != nil {
rl.endStreamError(cs, err)
return err
@@ -2000,6 +2065,9 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body
resc := make(chan error, 1)
s.resc = resc
s.fn = func() {
+ cs.cc.mu.Lock()
+ cs.startedWrite = true
+ cs.cc.mu.Unlock()
resc <- cs.writeRequestBody(body, cs.req.Body)
}
s.delay = t.expectContinueTimeout()
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 1c135fd..6b0dfae 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -45,9 +45,10 @@ type writeContext interface {
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
}
-// endsStream reports whether the given frame writer w will locally
-// close the stream.
-func endsStream(w writeFramer) bool {
+// writeEndsStream reports whether w writes a frame that will transition
+// the stream to a half-closed local state. This returns false for RST_STREAM,
+// which closes the entire stream (not just the local half).
+func writeEndsStream(w writeFramer) bool {
switch v := w.(type) {
case *writeData:
return v.endStream
@@ -57,7 +58,7 @@ func endsStream(w writeFramer) bool {
// This can only happen if the caller reuses w after it's
// been intentionally nil'ed out to prevent use. Keep this
// here to catch future refactoring breaking it.
- panic("endsStream called on nil writeFramer")
+ panic("writeEndsStream called on nil writeFramer")
}
return false
}
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index 9f3e1b3..4fe3073 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -25,7 +25,9 @@ type WriteScheduler interface {
// https://tools.ietf.org/html/rfc7540#section-5.1
AdjustStream(streamID uint32, priority PriorityParam)
- // Push queues a frame in the scheduler.
+ // Push queues a frame in the scheduler. In most cases, this will not be
+ // called with wr.StreamID()!=0 unless that stream is currently open. The one
+ // exception is RST_STREAM frames, which may be sent on idle or closed streams.
Push(wr FrameWriteRequest)
// Pop dequeues the next frame to write. Returns false if no frames can
@@ -62,6 +64,13 @@ type FrameWriteRequest struct {
// 0 is used for non-stream frames such as PING and SETTINGS.
func (wr FrameWriteRequest) StreamID() uint32 {
if wr.stream == nil {
+ if se, ok := wr.write.(StreamError); ok {
+ // (*serverConn).resetStream doesn't set
+ // stream because it doesn't necessarily have
+ // one. So special case this type of write
+ // message.
+ return se.StreamID
+ }
return 0
}
return wr.stream.id
@@ -142,17 +151,27 @@ func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteReque
// String is for debugging only.
func (wr FrameWriteRequest) String() string {
- var streamID uint32
- if wr.stream != nil {
- streamID = wr.stream.id
- }
var des string
if s, ok := wr.write.(fmt.Stringer); ok {
des = s.String()
} else {
des = fmt.Sprintf("%T", wr.write)
}
- return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", streamID, wr.done != nil, des)
+ return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
+}
+
+// replyToWriter sends err to wr.done and panics if the send must block
+// This does nothing if wr.done is nil.
+func (wr *FrameWriteRequest) replyToWriter(err error) {
+ if wr.done == nil {
+ return
+ }
+ select {
+ case wr.done <- err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
+ }
+ wr.write = nil // prevent use (assume it's tainted after wr.done send)
}
// writeQueue is used by implementations of WriteScheduler.
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
index 40108b0..0113272 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -388,7 +388,15 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
} else {
n = ws.nodes[id]
if n == nil {
- panic("add on non-open stream")
+ // id is an idle or closed stream. wr should not be a HEADERS or
+ // DATA frame. However, wr can be a RST_STREAM. In this case, we
+ // push wr onto the root, rather than creating a new priorityNode,
+ // since RST_STREAM is tiny and the stream's priority is unknown
+ // anyway. See issue #17919.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ n = &ws.root
}
}
n.q.push(wr)
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
index 1826586..ecd766e 100644
--- a/vendor/golang.org/x/net/trace/trace.go
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -752,7 +752,7 @@ func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
and very unlikely to be the fault of this code.
The most likely scenario is that some code elsewhere is using
- a requestz.Trace after its Finish method is called.
+ a trace.Trace after its Finish method is called.
You can temporarily set the DebugUseAfterFinish var
to help discover where that is; do not leave that var set,
since it makes this package much less efficient.
diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go
index dc993ef..4243f4c 100644
--- a/vendor/golang.org/x/oauth2/google/appengine.go
+++ b/vendor/golang.org/x/oauth2/google/appengine.go
@@ -20,6 +20,9 @@ var appengineVM bool
// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineAppIDFunc func(c context.Context) string
+
// AppEngineTokenSource returns a token source that fetches tokens
// issued to the current App Engine application's service account.
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go
index 4f42c8b..6f66411 100644
--- a/vendor/golang.org/x/oauth2/google/appengine_hook.go
+++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go
@@ -10,4 +10,5 @@ import "google.golang.org/appengine"
func init() {
appengineTokenFunc = appengine.AccessToken
+ appengineAppIDFunc = appengine.AppID
}
diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go
index 633611c..1074780 100644
--- a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go
+++ b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go
@@ -11,4 +11,5 @@ import "google.golang.org/appengine"
func init() {
appengineVM = true
appengineTokenFunc = appengine.AccessToken
+ appengineAppIDFunc = appengine.AppID
}
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
index 565d731..b45e796 100644
--- a/vendor/golang.org/x/oauth2/google/default.go
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -6,7 +6,6 @@ package google
import (
"encoding/json"
- "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -17,19 +16,18 @@ import (
"cloud.google.com/go/compute/metadata"
"golang.org/x/net/context"
"golang.org/x/oauth2"
- "golang.org/x/oauth2/jwt"
)
-// DefaultClient returns an HTTP Client that uses the
-// DefaultTokenSource to obtain authentication credentials.
-//
-// This client should be used when developing services
-// that run on Google App Engine or Google Compute Engine
-// and use "Application Default Credentials."
-//
+// DefaultCredentials holds "Application Default Credentials".
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
-//
+type DefaultCredentials struct {
+ ProjectID string // may be empty
+ TokenSource oauth2.TokenSource
+}
+
+// DefaultClient returns an HTTP Client that uses the
+// DefaultTokenSource to obtain authentication credentials.
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
ts, err := DefaultTokenSource(ctx, scope...)
if err != nil {
@@ -38,8 +36,18 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
return oauth2.NewClient(ctx, ts), nil
}
-// DefaultTokenSource is a token source that uses
+// DefaultTokenSource returns the token source for
// "Application Default Credentials".
+// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource.
+func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+ creds, err := FindDefaultCredentials(ctx, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return creds.TokenSource, nil
+}
+
+// FindDefaultCredentials searches for "Application Default Credentials".
//
// It looks for credentials in the following places,
// preferring the first location found:
@@ -53,45 +61,40 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
// credentials from the metadata server.
// (In this final case any provided scopes are ignored.)
-//
-// For more details, see:
-// https://developers.google.com/accounts/docs/application-default-credentials
-//
-func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) {
// First, try the environment variable.
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
if filename := os.Getenv(envVar); filename != "" {
- ts, err := tokenSourceFromFile(ctx, filename, scope)
+ creds, err := readCredentialsFile(ctx, filename, scope)
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
}
- return ts, nil
+ return creds, nil
}
// Second, try a well-known file.
filename := wellKnownFile()
- _, err := os.Stat(filename)
- if err == nil {
- ts, err2 := tokenSourceFromFile(ctx, filename, scope)
- if err2 == nil {
- return ts, nil
- }
- err = err2
- } else if os.IsNotExist(err) {
- err = nil // ignore this error
- }
- if err != nil {
+ if creds, err := readCredentialsFile(ctx, filename, scope); err == nil {
+ return creds, nil
+ } else if !os.IsNotExist(err) {
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
}
// Third, if we're on Google App Engine use those credentials.
if appengineTokenFunc != nil && !appengineVM {
- return AppEngineTokenSource(ctx, scope...), nil
+ return &DefaultCredentials{
+ ProjectID: appengineAppIDFunc(ctx),
+ TokenSource: AppEngineTokenSource(ctx, scope...),
+ }, nil
}
// Fourth, if we're on Google Compute Engine use the metadata server.
if metadata.OnGCE() {
- return ComputeTokenSource(""), nil
+ id, _ := metadata.ProjectID()
+ return &DefaultCredentials{
+ ProjectID: id,
+ TokenSource: ComputeTokenSource(""),
+ }, nil
}
// None are found; return helpful error.
@@ -107,49 +110,21 @@ func wellKnownFile() string {
return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
}
-func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
+func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
- var d struct {
- // Common fields
- Type string
- ClientID string `json:"client_id"`
-
- // User Credential fields
- ClientSecret string `json:"client_secret"`
- RefreshToken string `json:"refresh_token"`
-
- // Service Account fields
- ClientEmail string `json:"client_email"`
- PrivateKeyID string `json:"private_key_id"`
- PrivateKey string `json:"private_key"`
- }
- if err := json.Unmarshal(b, &d); err != nil {
+ var f credentialsFile
+ if err := json.Unmarshal(b, &f); err != nil {
return nil, err
}
- switch d.Type {
- case "authorized_user":
- cfg := &oauth2.Config{
- ClientID: d.ClientID,
- ClientSecret: d.ClientSecret,
- Scopes: append([]string{}, scopes...), // copy
- Endpoint: Endpoint,
- }
- tok := &oauth2.Token{RefreshToken: d.RefreshToken}
- return cfg.TokenSource(ctx, tok), nil
- case "service_account":
- cfg := &jwt.Config{
- Email: d.ClientEmail,
- PrivateKey: []byte(d.PrivateKey),
- Scopes: append([]string{}, scopes...), // copy
- TokenURL: JWTTokenURL,
- }
- return cfg.TokenSource(ctx), nil
- case "":
- return nil, errors.New("missing 'type' field in credentials")
- default:
- return nil, fmt.Errorf("unknown credential type: %q", d.Type)
+ ts, err := f.tokenSource(ctx, append([]string(nil), scopes...))
+ if err != nil {
+ return nil, err
}
+ return &DefaultCredentials{
+ ProjectID: f.ProjectID,
+ TokenSource: ts,
+ }, nil
}
diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go
index a48d5bf..66a8b0e 100644
--- a/vendor/golang.org/x/oauth2/google/google.go
+++ b/vendor/golang.org/x/oauth2/google/google.go
@@ -22,6 +22,7 @@ import (
"time"
"cloud.google.com/go/compute/metadata"
+ "golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
)
@@ -85,26 +86,74 @@ func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
// Create a service account on "Credentials" for your project at
// https://console.developers.google.com to download a JSON key file.
func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
- var key struct {
- Email string `json:"client_email"`
- PrivateKey string `json:"private_key"`
- PrivateKeyID string `json:"private_key_id"`
- TokenURL string `json:"token_uri"`
- }
- if err := json.Unmarshal(jsonKey, &key); err != nil {
+ var f credentialsFile
+ if err := json.Unmarshal(jsonKey, &f); err != nil {
return nil, err
}
- config := &jwt.Config{
- Email: key.Email,
- PrivateKey: []byte(key.PrivateKey),
- PrivateKeyID: key.PrivateKeyID,
- Scopes: scope,
- TokenURL: key.TokenURL,
+ if f.Type != serviceAccountKey {
+ return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey)
}
- if config.TokenURL == "" {
- config.TokenURL = JWTTokenURL
+ scope = append([]string(nil), scope...) // copy
+ return f.jwtConfig(scope), nil
+}
+
+// JSON key file types.
+const (
+ serviceAccountKey = "service_account"
+ userCredentialsKey = "authorized_user"
+)
+
+// credentialsFile is the unmarshalled representation of a credentials file.
+type credentialsFile struct {
+ Type string `json:"type"` // serviceAccountKey or userCredentialsKey
+
+ // Service Account fields
+ ClientEmail string `json:"client_email"`
+ PrivateKeyID string `json:"private_key_id"`
+ PrivateKey string `json:"private_key"`
+ TokenURL string `json:"token_uri"`
+ ProjectID string `json:"project_id"`
+
+ // User Credential fields
+ // (These typically come from gcloud auth.)
+ ClientSecret string `json:"client_secret"`
+ ClientID string `json:"client_id"`
+ RefreshToken string `json:"refresh_token"`
+}
+
+func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config {
+ cfg := &jwt.Config{
+ Email: f.ClientEmail,
+ PrivateKey: []byte(f.PrivateKey),
+ PrivateKeyID: f.PrivateKeyID,
+ Scopes: scopes,
+ TokenURL: f.TokenURL,
+ }
+ if cfg.TokenURL == "" {
+ cfg.TokenURL = JWTTokenURL
+ }
+ return cfg
+}
+
+func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) {
+ switch f.Type {
+ case serviceAccountKey:
+ cfg := f.jwtConfig(scopes)
+ return cfg.TokenSource(ctx), nil
+ case userCredentialsKey:
+ cfg := &oauth2.Config{
+ ClientID: f.ClientID,
+ ClientSecret: f.ClientSecret,
+ Scopes: scopes,
+ Endpoint: Endpoint,
+ }
+ tok := &oauth2.Token{RefreshToken: f.RefreshToken}
+ return cfg.TokenSource(ctx, tok), nil
+ case "":
+ return nil, errors.New("missing 'type' field in credentials")
+ default:
+ return nil, fmt.Errorf("unknown credential type: %q", f.Type)
}
- return config, nil
}
// ComputeTokenSource returns a token source that fetches access tokens
diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go
index d29a3bb..bdc1808 100644
--- a/vendor/golang.org/x/oauth2/google/sdk.go
+++ b/vendor/golang.org/x/oauth2/google/sdk.go
@@ -160,9 +160,13 @@ var sdkConfigPath = func() (string, error) {
}
func guessUnixHomeDir() string {
- usr, err := user.Current()
- if err == nil {
- return usr.HomeDir
+ // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
+ if v := os.Getenv("HOME"); v != "" {
+ return v
}
- return os.Getenv("HOME")
+ // Else, fall back to user.Current:
+ if u, err := user.Current(); err == nil {
+ return u.HomeDir
+ }
+ return ""
}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
index 18328a0..1c0ec76 100644
--- a/vendor/golang.org/x/oauth2/internal/token.go
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -117,6 +117,8 @@ var brokenAuthHeaderProviders = []string{
"https://www.strava.com/oauth/",
"https://www.wunderlist.com/oauth/",
"https://api.patreon.com/",
+ "https://sandbox.codeswholesale.com/oauth/token",
+ "https://api.codeswholesale.com/oauth/token",
}
func RegisterBrokenAuthHeaderProvider(tokenURL string) {
diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go
index f4b9523..e016db4 100644
--- a/vendor/golang.org/x/oauth2/jwt/jwt.go
+++ b/vendor/golang.org/x/oauth2/jwt/jwt.go
@@ -105,7 +105,9 @@ func (js jwtSource) Token() (*oauth2.Token, error) {
if t := js.conf.Expires; t > 0 {
claimSet.Exp = time.Now().Add(t).Unix()
}
- payload, err := jws.Encode(defaultHeader, claimSet, pk)
+ h := *defaultHeader
+ h.KeyID = js.conf.PrivateKeyID
+ payload, err := jws.Encode(&h, claimSet, pk)
if err != nil {
return nil, err
}
diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
new file mode 100644
index 0000000..2ea4257
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
@@ -0,0 +1,31 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips mipsle
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for mips, Linux
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-28
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+ JMP syscall·RawSyscall6(SB)
diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
index 362831c..fc0e50e 100644
--- a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
+++ b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
@@ -1,4 +1,4 @@
-// +build linux,386 linux,arm
+// +build linux,386 linux,arm linux,mips linux,mipsle
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 33b7922..7e6276b 100755
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -128,6 +128,7 @@ includes_Linux='
#include <linux/wait.h>
#include <linux/icmpv6.h>
#include <linux/serial.h>
+#include <linux/can.h>
#include <net/route.h>
#include <asm/termbits.h>
@@ -339,6 +340,7 @@ ccflags="$@"
$2 !~ /^(BPF_TIMEVAL)$/ &&
$2 ~ /^(BPF|DLT)_/ ||
$2 ~ /^CLOCK_/ ||
+ $2 ~ /^CAN_/ ||
$2 !~ "WMESGLEN" &&
$2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)}
$2 ~ /^__WCOREFLAG$/ {next}
diff --git a/vendor/golang.org/x/sys/unix/mksysnum_linux.pl b/vendor/golang.org/x/sys/unix/mksysnum_linux.pl
index 4d4017d..52b1613 100755
--- a/vendor/golang.org/x/sys/unix/mksysnum_linux.pl
+++ b/vendor/golang.org/x/sys/unix/mksysnum_linux.pl
@@ -23,6 +23,8 @@ package unix
const(
EOF
+my $offset = 0;
+
sub fmt {
my ($name, $num) = @_;
if($num > 999){
@@ -31,13 +33,18 @@ sub fmt {
return;
}
$name =~ y/a-z/A-Z/;
+ $num = $num + $offset;
print " SYS_$name = $num;\n";
}
my $prev;
open(GCC, "gcc -E -dD $ARGV[0] |") || die "can't run gcc";
while(<GCC>){
- if(/^#define __NR_syscalls\s+/) {
+ if(/^#define __NR_Linux\s+([0-9]+)/){
+ # mips/mips64: extract offset
+ $offset = $1;
+ }
+ elsif(/^#define __NR_syscalls\s+/) {
# ignore redefinitions of __NR_syscalls
}
elsif(/^#define __NR_(\w+)\s+([0-9]+)/){
@@ -51,6 +58,9 @@ while(<GCC>){
elsif(/^#define __NR_(\w+)\s+\(\w+\+\s*([0-9]+)\)/){
fmt($1, $prev+$2)
}
+ elsif(/^#define __NR_(\w+)\s+\(__NR_Linux \+ ([0-9]+)/){
+ fmt($1, $2);
+ }
}
print <<EOF;
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index e967176..ccb29c7 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -470,25 +470,11 @@ func Sysctl(name string) (string, error) {
}
func SysctlArgs(name string, args ...int) (string, error) {
- mib, err := sysctlmib(name, args...)
+ buf, err := SysctlRaw(name, args...)
if err != nil {
return "", err
}
-
- // Find size.
- n := uintptr(0)
- if err := sysctl(mib, nil, &n, nil, 0); err != nil {
- return "", err
- }
- if n == 0 {
- return "", nil
- }
-
- // Read into buffer of that size.
- buf := make([]byte, n)
- if err := sysctl(mib, &buf[0], &n, nil, 0); err != nil {
- return "", err
- }
+ n := len(buf)
// Throw away terminating NUL.
if n > 0 && buf[n-1] == '\x00' {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index cfac4a4..01c569a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -411,6 +411,47 @@ func (sa *SockaddrHCI) sockaddr() (unsafe.Pointer, _Socklen, error) {
return unsafe.Pointer(&sa.raw), SizeofSockaddrHCI, nil
}
+// SockaddrCAN implements the Sockaddr interface for AF_CAN type sockets.
+// The RxID and TxID fields are used for transport protocol addressing in
+// (CAN_TP16, CAN_TP20, CAN_MCNET, and CAN_ISOTP), they can be left with
+// zero values for CAN_RAW and CAN_BCM sockets as they have no meaning.
+//
+// The SockaddrCAN struct must be bound to the socket file descriptor
+// using Bind before the CAN socket can be used.
+//
+// // Read one raw CAN frame
+// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
+// addr := &SockaddrCAN{Ifindex: index}
+// Bind(fd, addr)
+// frame := make([]byte, 16)
+// Read(fd, frame)
+//
+// The full SocketCAN documentation can be found in the linux kernel
+// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt
+type SockaddrCAN struct {
+ Ifindex int
+ RxID uint32
+ TxID uint32
+ raw RawSockaddrCAN
+}
+
+func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_CAN
+ sa.raw.Ifindex = int32(sa.Ifindex)
+ rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
+ for i := 0; i < 4; i++ {
+ sa.raw.Addr[i] = rx[i]
+ }
+ tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
+ for i := 0; i < 4; i++ {
+ sa.raw.Addr[i+4] = tx[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
+}
+
func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
switch rsa.Addr.Family {
case AF_NETLINK:
@@ -899,6 +940,7 @@ func Getpgrp() (pid int) {
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettid() (tid int)
//sys Getxattr(path string, attr string, dest []byte) (sz int, err error)
//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error)
@@ -911,7 +953,7 @@ func Getpgrp() (pid int) {
//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
-//sysnb prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) = SYS_PRLIMIT64
+//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64
//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
//sys read(fd int, p []byte) (n int, err error)
//sys Removexattr(path string, attr string) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
index 18911c2..9516a3f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
@@ -6,8 +6,6 @@
package unix
-import "syscall"
-
//sys Dup2(oldfd int, newfd int) (err error)
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
@@ -63,9 +61,6 @@ import "syscall"
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
-//go:noescape
-func gettimeofday(tv *Timeval) (err syscall.Errno)
-
func Gettimeofday(tv *Timeval) (err error) {
errno := gettimeofday(tv)
if errno != 0 {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go
new file mode 100644
index 0000000..21a4946
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go
@@ -0,0 +1,13 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,linux
+// +build !gccgo
+
+package unix
+
+import "syscall"
+
+//go:noescape
+func gettimeofday(tv *Timeval) (err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
new file mode 100644
index 0000000..5ed8013
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
@@ -0,0 +1,241 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips mipsle
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
+
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getuid() (uid int)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Listen(s int, n int) (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Shutdown(fd int, how int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
+//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64
+//sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error)
+//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
+//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
+//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
+//sysnb socket(domain int, typ int, proto int) (fd int, err error)
+//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
+//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
+//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
+//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
+//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
+//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
+
+//sysnb InotifyInit() (fd int, err error)
+//sys Ioperm(from int, num int, on int) (err error)
+//sys Iopl(level int) (err error)
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+//sysnb Time(t *Time_t) (tt Time_t, err error)
+
+//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
+//sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64
+//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
+
+//sys Utime(path string, buf *Utimbuf) (err error)
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Pause() (err error)
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+ _, _, e := Syscall(SYS_FSTATFS64, uintptr(fd), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ use(unsafe.Pointer(buf))
+ if e != 0 {
+ err = errnoErr(e)
+ }
+ return
+}
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ p, err := BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+ _, _, e := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(p)), unsafe.Sizeof(*buf), uintptr(unsafe.Pointer(buf)))
+ use(unsafe.Pointer(p))
+ if e != 0 {
+ err = errnoErr(e)
+ }
+ return
+}
+
+func Seek(fd int, offset int64, whence int) (off int64, err error) {
+ _, _, e := Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offset>>32), uintptr(offset), uintptr(unsafe.Pointer(&off)), uintptr(whence), 0)
+ if e != 0 {
+ err = errnoErr(e)
+ }
+ return
+}
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = int32(nsec / 1e9)
+ ts.Nsec = int32(nsec % 1e9)
+ return
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = int32(nsec / 1e9)
+ tv.Usec = int32(nsec % 1e9 / 1e3)
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, 0)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error)
+
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+ page := uintptr(offset / 4096)
+ if offset != int64(page)*4096 {
+ return 0, EINVAL
+ }
+ return mmap2(addr, length, prot, flags, fd, page)
+}
+
+const rlimInf32 = ^uint32(0)
+const rlimInf64 = ^uint64(0)
+
+type rlimit32 struct {
+ Cur uint32
+ Max uint32
+}
+
+//sysnb getrlimit(resource int, rlim *rlimit32) (err error) = SYS_GETRLIMIT
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, nil, rlim)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ err = getrlimit(resource, &rl)
+ if err != nil {
+ return
+ }
+
+ if rl.Cur == rlimInf32 {
+ rlim.Cur = rlimInf64
+ } else {
+ rlim.Cur = uint64(rl.Cur)
+ }
+
+ if rl.Max == rlimInf32 {
+ rlim.Max = rlimInf64
+ } else {
+ rlim.Max = uint64(rl.Max)
+ }
+ return
+}
+
+//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ err = prlimit(0, resource, rlim, nil)
+ if err != ENOSYS {
+ return err
+ }
+
+ rl := rlimit32{}
+ if rlim.Cur == rlimInf64 {
+ rl.Cur = rlimInf32
+ } else if rlim.Cur < uint64(rlimInf32) {
+ rl.Cur = uint32(rlim.Cur)
+ } else {
+ return EINVAL
+ }
+ if rlim.Max == rlimInf64 {
+ rl.Max = rlimInf32
+ } else if rlim.Max < uint64(rlimInf32) {
+ rl.Max = uint32(rlim.Max)
+ } else {
+ return EINVAL
+ }
+
+ return setrlimit(resource, &rl)
+}
+
+func (r *PtraceRegs) PC() uint64 { return uint64(r.Regs[64]) }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Regs[64] = uint32(pc) }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint32(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint32(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint32(length)
+}
+
+//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+ if len(fds) == 0 {
+ return poll(nil, 0, timeout)
+ }
+ return poll(&fds[0], len(fds), timeout)
+}
+
+func Getpagesize() int { return 4096 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index b46b250..8a5237d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -49,11 +49,6 @@ func errnoErr(e syscall.Errno) error {
return e
}
-func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
-func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
-func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
-func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
-
// Mmap manager, for use by operating system-specific implementations.
type mmapper struct {
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go
new file mode 100644
index 0000000..4cb8e8e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build !gccgo
+
+package unix
+
+import "syscall"
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno)
+func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno)
diff --git a/vendor/golang.org/x/sys/unix/types_linux.go b/vendor/golang.org/x/sys/unix/types_linux.go
index de80e2c..f3d8f90 100644
--- a/vendor/golang.org/x/sys/unix/types_linux.go
+++ b/vendor/golang.org/x/sys/unix/types_linux.go
@@ -58,6 +58,7 @@ package unix
#include <utime.h>
#include <bluetooth/bluetooth.h>
#include <bluetooth/hci.h>
+#include <linux/can.h>
#ifdef TCSETS2
// On systems that have "struct termios2" use this as type Termios.
@@ -125,7 +126,7 @@ typedef struct {} ptracePer;
// The real epoll_event is a union, and godefs doesn't handle it well.
struct my_epoll_event {
uint32_t events;
-#if defined(__ARM_EABI__) || defined(__aarch64__)
+#if defined(__ARM_EABI__) || defined(__aarch64__) || (defined(__mips__) && _MIPS_SIM == _ABIO32)
// padding is not specified in linux/eventpoll.h but added to conform to the
// alignment requirements of EABI
int32_t padFd;
@@ -218,6 +219,8 @@ type RawSockaddrNetlink C.struct_sockaddr_nl
type RawSockaddrHCI C.struct_sockaddr_hci
+type RawSockaddrCAN C.struct_sockaddr_can
+
type RawSockaddr C.struct_sockaddr
type RawSockaddrAny C.struct_sockaddr_any
@@ -258,6 +261,7 @@ const (
SizeofSockaddrLinklayer = C.sizeof_struct_sockaddr_ll
SizeofSockaddrNetlink = C.sizeof_struct_sockaddr_nl
SizeofSockaddrHCI = C.sizeof_struct_sockaddr_hci
+ SizeofSockaddrCAN = C.sizeof_struct_sockaddr_can
SizeofLinger = C.sizeof_struct_linger
SizeofIPMreq = C.sizeof_struct_ip_mreq
SizeofIPMreqn = C.sizeof_struct_ip_mreqn
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 8f92012..b40d029 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -190,6 +190,25 @@ const (
BS0 = 0x0
BS1 = 0x2000
BSDLY = 0x2000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_ID_BITS = 0x1d
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MAX_DLC = 0x8
+ CAN_MAX_DLEN = 0x8
+ CAN_MCNET = 0x5
+ CAN_MTU = 0x10
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_ID_BITS = 0xb
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 49b6c35..9f0600c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -190,6 +190,25 @@ const (
BS0 = 0x0
BS1 = 0x2000
BSDLY = 0x2000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_ID_BITS = 0x1d
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MAX_DLC = 0x8
+ CAN_MAX_DLEN = 0x8
+ CAN_MCNET = 0x5
+ CAN_MTU = 0x10
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_ID_BITS = 0xb
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index f036758..647a796 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -186,6 +186,25 @@ const (
BS0 = 0x0
BS1 = 0x2000
BSDLY = 0x2000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_ID_BITS = 0x1d
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MAX_DLC = 0x8
+ CAN_MAX_DLEN = 0x8
+ CAN_MCNET = 0x5
+ CAN_MTU = 0x10
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_ID_BITS = 0xb
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 16dcbc9..a6d1e1f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -196,6 +196,25 @@ const (
BS0 = 0x0
BS1 = 0x2000
BSDLY = 0x2000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_ID_BITS = 0x1d
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MAX_DLC = 0x8
+ CAN_MAX_DLEN = 0x8
+ CAN_MCNET = 0x5
+ CAN_MTU = 0x10
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_ID_BITS = 0xb
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
new file mode 100644
index 0000000..e4fb9ad
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -0,0 +1,1814 @@
+// mkerrors.sh
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build mips,linux
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- _const.go
+
+package unix
+
+import "syscall"
+
+const (
+ AF_ALG = 0x26
+ AF_APPLETALK = 0x5
+ AF_ASH = 0x12
+ AF_ATMPVC = 0x8
+ AF_ATMSVC = 0x14
+ AF_AX25 = 0x3
+ AF_BLUETOOTH = 0x1f
+ AF_BRIDGE = 0x7
+ AF_CAIF = 0x25
+ AF_CAN = 0x1d
+ AF_DECnet = 0xc
+ AF_ECONET = 0x13
+ AF_FILE = 0x1
+ AF_IEEE802154 = 0x24
+ AF_INET = 0x2
+ AF_INET6 = 0xa
+ AF_IPX = 0x4
+ AF_IRDA = 0x17
+ AF_ISDN = 0x22
+ AF_IUCV = 0x20
+ AF_KEY = 0xf
+ AF_LLC = 0x1a
+ AF_LOCAL = 0x1
+ AF_MAX = 0x27
+ AF_NETBEUI = 0xd
+ AF_NETLINK = 0x10
+ AF_NETROM = 0x6
+ AF_PACKET = 0x11
+ AF_PHONET = 0x23
+ AF_PPPOX = 0x18
+ AF_RDS = 0x15
+ AF_ROSE = 0xb
+ AF_ROUTE = 0x10
+ AF_RXRPC = 0x21
+ AF_SECURITY = 0xe
+ AF_SNA = 0x16
+ AF_TIPC = 0x1e
+ AF_UNIX = 0x1
+ AF_UNSPEC = 0x0
+ AF_WANPIPE = 0x19
+ AF_X25 = 0x9
+ ARPHRD_ADAPT = 0x108
+ ARPHRD_APPLETLK = 0x8
+ ARPHRD_ARCNET = 0x7
+ ARPHRD_ASH = 0x30d
+ ARPHRD_ATM = 0x13
+ ARPHRD_AX25 = 0x3
+ ARPHRD_BIF = 0x307
+ ARPHRD_CAIF = 0x336
+ ARPHRD_CAN = 0x118
+ ARPHRD_CHAOS = 0x5
+ ARPHRD_CISCO = 0x201
+ ARPHRD_CSLIP = 0x101
+ ARPHRD_CSLIP6 = 0x103
+ ARPHRD_DDCMP = 0x205
+ ARPHRD_DLCI = 0xf
+ ARPHRD_ECONET = 0x30e
+ ARPHRD_EETHER = 0x2
+ ARPHRD_ETHER = 0x1
+ ARPHRD_EUI64 = 0x1b
+ ARPHRD_FCAL = 0x311
+ ARPHRD_FCFABRIC = 0x313
+ ARPHRD_FCPL = 0x312
+ ARPHRD_FCPP = 0x310
+ ARPHRD_FDDI = 0x306
+ ARPHRD_FRAD = 0x302
+ ARPHRD_HDLC = 0x201
+ ARPHRD_HIPPI = 0x30c
+ ARPHRD_HWX25 = 0x110
+ ARPHRD_IEEE1394 = 0x18
+ ARPHRD_IEEE802 = 0x6
+ ARPHRD_IEEE80211 = 0x321
+ ARPHRD_IEEE80211_PRISM = 0x322
+ ARPHRD_IEEE80211_RADIOTAP = 0x323
+ ARPHRD_IEEE802154 = 0x324
+ ARPHRD_IEEE802_TR = 0x320
+ ARPHRD_INFINIBAND = 0x20
+ ARPHRD_IPDDP = 0x309
+ ARPHRD_IPGRE = 0x30a
+ ARPHRD_IRDA = 0x30f
+ ARPHRD_LAPB = 0x204
+ ARPHRD_LOCALTLK = 0x305
+ ARPHRD_LOOPBACK = 0x304
+ ARPHRD_METRICOM = 0x17
+ ARPHRD_NETROM = 0x0
+ ARPHRD_NONE = 0xfffe
+ ARPHRD_PHONET = 0x334
+ ARPHRD_PHONET_PIPE = 0x335
+ ARPHRD_PIMREG = 0x30b
+ ARPHRD_PPP = 0x200
+ ARPHRD_PRONET = 0x4
+ ARPHRD_RAWHDLC = 0x206
+ ARPHRD_ROSE = 0x10e
+ ARPHRD_RSRVD = 0x104
+ ARPHRD_SIT = 0x308
+ ARPHRD_SKIP = 0x303
+ ARPHRD_SLIP = 0x100
+ ARPHRD_SLIP6 = 0x102
+ ARPHRD_TUNNEL = 0x300
+ ARPHRD_TUNNEL6 = 0x301
+ ARPHRD_VOID = 0xffff
+ ARPHRD_X25 = 0x10f
+ B0 = 0x0
+ B1000000 = 0x1008
+ B110 = 0x3
+ B115200 = 0x1002
+ B1152000 = 0x1009
+ B1200 = 0x9
+ B134 = 0x4
+ B150 = 0x5
+ B1500000 = 0x100a
+ B1800 = 0xa
+ B19200 = 0xe
+ B200 = 0x6
+ B2000000 = 0x100b
+ B230400 = 0x1003
+ B2400 = 0xb
+ B2500000 = 0x100c
+ B300 = 0x7
+ B3000000 = 0x100d
+ B3500000 = 0x100e
+ B38400 = 0xf
+ B4000000 = 0x100f
+ B460800 = 0x1004
+ B4800 = 0xc
+ B50 = 0x1
+ B500000 = 0x1005
+ B57600 = 0x1001
+ B576000 = 0x1006
+ B600 = 0x8
+ B75 = 0x2
+ B921600 = 0x1007
+ B9600 = 0xd
+ BOTHER = 0x1000
+ BPF_A = 0x10
+ BPF_ABS = 0x20
+ BPF_ADD = 0x0
+ BPF_ALU = 0x4
+ BPF_AND = 0x50
+ BPF_B = 0x10
+ BPF_DIV = 0x30
+ BPF_H = 0x8
+ BPF_IMM = 0x0
+ BPF_IND = 0x40
+ BPF_JA = 0x0
+ BPF_JEQ = 0x10
+ BPF_JGE = 0x30
+ BPF_JGT = 0x20
+ BPF_JMP = 0x5
+ BPF_JSET = 0x40
+ BPF_K = 0x0
+ BPF_LD = 0x0
+ BPF_LDX = 0x1
+ BPF_LEN = 0x80
+ BPF_LSH = 0x60
+ BPF_MAJOR_VERSION = 0x1
+ BPF_MAXINSNS = 0x1000
+ BPF_MEM = 0x60
+ BPF_MEMWORDS = 0x10
+ BPF_MINOR_VERSION = 0x1
+ BPF_MISC = 0x7
+ BPF_MSH = 0xa0
+ BPF_MUL = 0x20
+ BPF_NEG = 0x80
+ BPF_OR = 0x40
+ BPF_RET = 0x6
+ BPF_RSH = 0x70
+ BPF_ST = 0x2
+ BPF_STX = 0x3
+ BPF_SUB = 0x10
+ BPF_TAX = 0x0
+ BPF_TXA = 0x80
+ BPF_W = 0x0
+ BPF_X = 0x8
+ BRKINT = 0x2
+ BS0 = 0x0
+ BS1 = 0x2000
+ BSDLY = 0x2000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MCNET = 0x5
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
+ CBAUD = 0x100f
+ CBAUDEX = 0x1000
+ CFLUSH = 0xf
+ CIBAUD = 0x100f0000
+ CLOCAL = 0x800
+ CLOCK_DEFAULT = 0x0
+ CLOCK_EXT = 0x1
+ CLOCK_INT = 0x2
+ CLOCK_MONOTONIC = 0x1
+ CLOCK_MONOTONIC_COARSE = 0x6
+ CLOCK_MONOTONIC_RAW = 0x4
+ CLOCK_PROCESS_CPUTIME_ID = 0x2
+ CLOCK_REALTIME = 0x0
+ CLOCK_REALTIME_COARSE = 0x5
+ CLOCK_THREAD_CPUTIME_ID = 0x3
+ CLOCK_TXFROMRX = 0x4
+ CLOCK_TXINT = 0x3
+ CLONE_CHILD_CLEARTID = 0x200000
+ CLONE_CHILD_SETTID = 0x1000000
+ CLONE_DETACHED = 0x400000
+ CLONE_FILES = 0x400
+ CLONE_FS = 0x200
+ CLONE_IO = 0x80000000
+ CLONE_NEWIPC = 0x8000000
+ CLONE_NEWNET = 0x40000000
+ CLONE_NEWNS = 0x20000
+ CLONE_NEWPID = 0x20000000
+ CLONE_NEWUSER = 0x10000000
+ CLONE_NEWUTS = 0x4000000
+ CLONE_PARENT = 0x8000
+ CLONE_PARENT_SETTID = 0x100000
+ CLONE_PTRACE = 0x2000
+ CLONE_SETTLS = 0x80000
+ CLONE_SIGHAND = 0x800
+ CLONE_SYSVSEM = 0x40000
+ CLONE_THREAD = 0x10000
+ CLONE_UNTRACED = 0x800000
+ CLONE_VFORK = 0x4000
+ CLONE_VM = 0x100
+ CMSPAR = 0x40000000
+ CR0 = 0x0
+ CR1 = 0x200
+ CR2 = 0x400
+ CR3 = 0x600
+ CRDLY = 0x600
+ CREAD = 0x80
+ CRTSCTS = 0x80000000
+ CS5 = 0x0
+ CS6 = 0x10
+ CS7 = 0x20
+ CS8 = 0x30
+ CSIGNAL = 0xff
+ CSIZE = 0x30
+ CSTART = 0x11
+ CSTATUS = 0x0
+ CSTOP = 0x13
+ CSTOPB = 0x40
+ CSUSP = 0x1a
+ DT_BLK = 0x6
+ DT_CHR = 0x2
+ DT_DIR = 0x4
+ DT_FIFO = 0x1
+ DT_LNK = 0xa
+ DT_REG = 0x8
+ DT_SOCK = 0xc
+ DT_UNKNOWN = 0x0
+ DT_WHT = 0xe
+ ECHO = 0x8
+ ECHOCTL = 0x200
+ ECHOE = 0x10
+ ECHOK = 0x20
+ ECHOKE = 0x800
+ ECHONL = 0x40
+ ECHOPRT = 0x400
+ ENCODING_DEFAULT = 0x0
+ ENCODING_FM_MARK = 0x3
+ ENCODING_FM_SPACE = 0x4
+ ENCODING_MANCHESTER = 0x5
+ ENCODING_NRZ = 0x1
+ ENCODING_NRZI = 0x2
+ EPOLLERR = 0x8
+ EPOLLET = -0x80000000
+ EPOLLHUP = 0x10
+ EPOLLIN = 0x1
+ EPOLLMSG = 0x400
+ EPOLLONESHOT = 0x40000000
+ EPOLLOUT = 0x4
+ EPOLLPRI = 0x2
+ EPOLLRDBAND = 0x80
+ EPOLLRDHUP = 0x2000
+ EPOLLRDNORM = 0x40
+ EPOLLWRBAND = 0x200
+ EPOLLWRNORM = 0x100
+ EPOLL_CLOEXEC = 0x80000
+ EPOLL_CTL_ADD = 0x1
+ EPOLL_CTL_DEL = 0x2
+ EPOLL_CTL_MOD = 0x3
+ EPOLL_NONBLOCK = 0x80
+ ETH_P_1588 = 0x88f7
+ ETH_P_8021AD = 0x88a8
+ ETH_P_8021AH = 0x88e7
+ ETH_P_8021Q = 0x8100
+ ETH_P_802_2 = 0x4
+ ETH_P_802_3 = 0x1
+ ETH_P_AARP = 0x80f3
+ ETH_P_AF_IUCV = 0xfbfb
+ ETH_P_ALL = 0x3
+ ETH_P_AOE = 0x88a2
+ ETH_P_ARCNET = 0x1a
+ ETH_P_ARP = 0x806
+ ETH_P_ATALK = 0x809b
+ ETH_P_ATMFATE = 0x8884
+ ETH_P_ATMMPOA = 0x884c
+ ETH_P_AX25 = 0x2
+ ETH_P_BPQ = 0x8ff
+ ETH_P_CAIF = 0xf7
+ ETH_P_CAN = 0xc
+ ETH_P_CONTROL = 0x16
+ ETH_P_CUST = 0x6006
+ ETH_P_DDCMP = 0x6
+ ETH_P_DEC = 0x6000
+ ETH_P_DIAG = 0x6005
+ ETH_P_DNA_DL = 0x6001
+ ETH_P_DNA_RC = 0x6002
+ ETH_P_DNA_RT = 0x6003
+ ETH_P_DSA = 0x1b
+ ETH_P_ECONET = 0x18
+ ETH_P_EDSA = 0xdada
+ ETH_P_FCOE = 0x8906
+ ETH_P_FIP = 0x8914
+ ETH_P_HDLC = 0x19
+ ETH_P_IEEE802154 = 0xf6
+ ETH_P_IEEEPUP = 0xa00
+ ETH_P_IEEEPUPAT = 0xa01
+ ETH_P_IP = 0x800
+ ETH_P_IPV6 = 0x86dd
+ ETH_P_IPX = 0x8137
+ ETH_P_IRDA = 0x17
+ ETH_P_LAT = 0x6004
+ ETH_P_LINK_CTL = 0x886c
+ ETH_P_LOCALTALK = 0x9
+ ETH_P_LOOP = 0x60
+ ETH_P_MOBITEX = 0x15
+ ETH_P_MPLS_MC = 0x8848
+ ETH_P_MPLS_UC = 0x8847
+ ETH_P_PAE = 0x888e
+ ETH_P_PAUSE = 0x8808
+ ETH_P_PHONET = 0xf5
+ ETH_P_PPPTALK = 0x10
+ ETH_P_PPP_DISC = 0x8863
+ ETH_P_PPP_MP = 0x8
+ ETH_P_PPP_SES = 0x8864
+ ETH_P_PUP = 0x200
+ ETH_P_PUPAT = 0x201
+ ETH_P_QINQ1 = 0x9100
+ ETH_P_QINQ2 = 0x9200
+ ETH_P_QINQ3 = 0x9300
+ ETH_P_RARP = 0x8035
+ ETH_P_SCA = 0x6007
+ ETH_P_SLOW = 0x8809
+ ETH_P_SNAP = 0x5
+ ETH_P_TDLS = 0x890d
+ ETH_P_TEB = 0x6558
+ ETH_P_TIPC = 0x88ca
+ ETH_P_TRAILER = 0x1c
+ ETH_P_TR_802_2 = 0x11
+ ETH_P_WAN_PPP = 0x7
+ ETH_P_WCCP = 0x883e
+ ETH_P_X25 = 0x805
+ EXTA = 0xe
+ EXTB = 0xf
+ EXTPROC = 0x10000
+ FD_CLOEXEC = 0x1
+ FD_SETSIZE = 0x400
+ FF0 = 0x0
+ FF1 = 0x8000
+ FFDLY = 0x8000
+ FLUSHO = 0x2000
+ F_DUPFD = 0x0
+ F_DUPFD_CLOEXEC = 0x406
+ F_EXLCK = 0x4
+ F_GETFD = 0x1
+ F_GETFL = 0x3
+ F_GETLEASE = 0x401
+ F_GETLK = 0x21
+ F_GETLK64 = 0x21
+ F_GETOWN = 0x17
+ F_GETOWN_EX = 0x10
+ F_GETPIPE_SZ = 0x408
+ F_GETSIG = 0xb
+ F_LOCK = 0x1
+ F_NOTIFY = 0x402
+ F_OK = 0x0
+ F_RDLCK = 0x0
+ F_SETFD = 0x2
+ F_SETFL = 0x4
+ F_SETLEASE = 0x400
+ F_SETLK = 0x22
+ F_SETLK64 = 0x22
+ F_SETLKW = 0x23
+ F_SETLKW64 = 0x23
+ F_SETOWN = 0x18
+ F_SETOWN_EX = 0xf
+ F_SETPIPE_SZ = 0x407
+ F_SETSIG = 0xa
+ F_SHLCK = 0x8
+ F_TEST = 0x3
+ F_TLOCK = 0x2
+ F_ULOCK = 0x0
+ F_UNLCK = 0x2
+ F_WRLCK = 0x1
+ HUPCL = 0x400
+ IBSHIFT = 0x10
+ ICANON = 0x2
+ ICMPV6_FILTER = 0x1
+ ICRNL = 0x100
+ IEXTEN = 0x100
+ IFA_F_DADFAILED = 0x8
+ IFA_F_DEPRECATED = 0x20
+ IFA_F_HOMEADDRESS = 0x10
+ IFA_F_NODAD = 0x2
+ IFA_F_OPTIMISTIC = 0x4
+ IFA_F_PERMANENT = 0x80
+ IFA_F_SECONDARY = 0x1
+ IFA_F_TEMPORARY = 0x1
+ IFA_F_TENTATIVE = 0x40
+ IFA_MAX = 0x7
+ IFF_802_1Q_VLAN = 0x1
+ IFF_ALLMULTI = 0x200
+ IFF_AUTOMEDIA = 0x4000
+ IFF_BONDING = 0x20
+ IFF_BRIDGE_PORT = 0x4000
+ IFF_BROADCAST = 0x2
+ IFF_DEBUG = 0x4
+ IFF_DISABLE_NETPOLL = 0x1000
+ IFF_DONT_BRIDGE = 0x800
+ IFF_DORMANT = 0x20000
+ IFF_DYNAMIC = 0x8000
+ IFF_EBRIDGE = 0x2
+ IFF_ECHO = 0x40000
+ IFF_ISATAP = 0x80
+ IFF_LOOPBACK = 0x8
+ IFF_LOWER_UP = 0x10000
+ IFF_MACVLAN_PORT = 0x2000
+ IFF_MASTER = 0x400
+ IFF_MASTER_8023AD = 0x8
+ IFF_MASTER_ALB = 0x10
+ IFF_MASTER_ARPMON = 0x100
+ IFF_MULTICAST = 0x1000
+ IFF_NOARP = 0x80
+ IFF_NOTRAILERS = 0x20
+ IFF_NO_PI = 0x1000
+ IFF_ONE_QUEUE = 0x2000
+ IFF_OVS_DATAPATH = 0x8000
+ IFF_POINTOPOINT = 0x10
+ IFF_PORTSEL = 0x2000
+ IFF_PROMISC = 0x100
+ IFF_RUNNING = 0x40
+ IFF_SLAVE = 0x800
+ IFF_SLAVE_INACTIVE = 0x4
+ IFF_SLAVE_NEEDARP = 0x40
+ IFF_TAP = 0x2
+ IFF_TUN = 0x1
+ IFF_TUN_EXCL = 0x8000
+ IFF_TX_SKB_SHARING = 0x10000
+ IFF_UNICAST_FLT = 0x20000
+ IFF_UP = 0x1
+ IFF_VNET_HDR = 0x4000
+ IFF_VOLATILE = 0x70c5a
+ IFF_WAN_HDLC = 0x200
+ IFF_XMIT_DST_RELEASE = 0x400
+ IFNAMSIZ = 0x10
+ IGNBRK = 0x1
+ IGNCR = 0x80
+ IGNPAR = 0x4
+ IMAXBEL = 0x2000
+ INLCR = 0x40
+ INPCK = 0x10
+ IN_ACCESS = 0x1
+ IN_ALL_EVENTS = 0xfff
+ IN_ATTRIB = 0x4
+ IN_CLASSA_HOST = 0xffffff
+ IN_CLASSA_MAX = 0x80
+ IN_CLASSA_NET = 0xff000000
+ IN_CLASSA_NSHIFT = 0x18
+ IN_CLASSB_HOST = 0xffff
+ IN_CLASSB_MAX = 0x10000
+ IN_CLASSB_NET = 0xffff0000
+ IN_CLASSB_NSHIFT = 0x10
+ IN_CLASSC_HOST = 0xff
+ IN_CLASSC_NET = 0xffffff00
+ IN_CLASSC_NSHIFT = 0x8
+ IN_CLOEXEC = 0x80000
+ IN_CLOSE = 0x18
+ IN_CLOSE_NOWRITE = 0x10
+ IN_CLOSE_WRITE = 0x8
+ IN_CREATE = 0x100
+ IN_DELETE = 0x200
+ IN_DELETE_SELF = 0x400
+ IN_DONT_FOLLOW = 0x2000000
+ IN_EXCL_UNLINK = 0x4000000
+ IN_IGNORED = 0x8000
+ IN_ISDIR = 0x40000000
+ IN_LOOPBACKNET = 0x7f
+ IN_MASK_ADD = 0x20000000
+ IN_MODIFY = 0x2
+ IN_MOVE = 0xc0
+ IN_MOVED_FROM = 0x40
+ IN_MOVED_TO = 0x80
+ IN_MOVE_SELF = 0x800
+ IN_NONBLOCK = 0x80
+ IN_ONESHOT = 0x80000000
+ IN_ONLYDIR = 0x1000000
+ IN_OPEN = 0x20
+ IN_Q_OVERFLOW = 0x4000
+ IN_UNMOUNT = 0x2000
+ IPPROTO_AH = 0x33
+ IPPROTO_COMP = 0x6c
+ IPPROTO_DCCP = 0x21
+ IPPROTO_DSTOPTS = 0x3c
+ IPPROTO_EGP = 0x8
+ IPPROTO_ENCAP = 0x62
+ IPPROTO_ESP = 0x32
+ IPPROTO_FRAGMENT = 0x2c
+ IPPROTO_GRE = 0x2f
+ IPPROTO_HOPOPTS = 0x0
+ IPPROTO_ICMP = 0x1
+ IPPROTO_ICMPV6 = 0x3a
+ IPPROTO_IDP = 0x16
+ IPPROTO_IGMP = 0x2
+ IPPROTO_IP = 0x0
+ IPPROTO_IPIP = 0x4
+ IPPROTO_IPV6 = 0x29
+ IPPROTO_MTP = 0x5c
+ IPPROTO_NONE = 0x3b
+ IPPROTO_PIM = 0x67
+ IPPROTO_PUP = 0xc
+ IPPROTO_RAW = 0xff
+ IPPROTO_ROUTING = 0x2b
+ IPPROTO_RSVP = 0x2e
+ IPPROTO_SCTP = 0x84
+ IPPROTO_TCP = 0x6
+ IPPROTO_TP = 0x1d
+ IPPROTO_UDP = 0x11
+ IPPROTO_UDPLITE = 0x88
+ IPV6_2292DSTOPTS = 0x4
+ IPV6_2292HOPLIMIT = 0x8
+ IPV6_2292HOPOPTS = 0x3
+ IPV6_2292PKTINFO = 0x2
+ IPV6_2292PKTOPTIONS = 0x6
+ IPV6_2292RTHDR = 0x5
+ IPV6_ADDRFORM = 0x1
+ IPV6_ADD_MEMBERSHIP = 0x14
+ IPV6_AUTHHDR = 0xa
+ IPV6_CHECKSUM = 0x7
+ IPV6_DROP_MEMBERSHIP = 0x15
+ IPV6_DSTOPTS = 0x3b
+ IPV6_HOPLIMIT = 0x34
+ IPV6_HOPOPTS = 0x36
+ IPV6_IPSEC_POLICY = 0x22
+ IPV6_JOIN_ANYCAST = 0x1b
+ IPV6_JOIN_GROUP = 0x14
+ IPV6_LEAVE_ANYCAST = 0x1c
+ IPV6_LEAVE_GROUP = 0x15
+ IPV6_MTU = 0x18
+ IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_HOPS = 0x12
+ IPV6_MULTICAST_IF = 0x11
+ IPV6_MULTICAST_LOOP = 0x13
+ IPV6_NEXTHOP = 0x9
+ IPV6_PKTINFO = 0x32
+ IPV6_PMTUDISC_DO = 0x2
+ IPV6_PMTUDISC_DONT = 0x0
+ IPV6_PMTUDISC_PROBE = 0x3
+ IPV6_PMTUDISC_WANT = 0x1
+ IPV6_RECVDSTOPTS = 0x3a
+ IPV6_RECVERR = 0x19
+ IPV6_RECVHOPLIMIT = 0x33
+ IPV6_RECVHOPOPTS = 0x35
+ IPV6_RECVPKTINFO = 0x31
+ IPV6_RECVRTHDR = 0x38
+ IPV6_RECVTCLASS = 0x42
+ IPV6_ROUTER_ALERT = 0x16
+ IPV6_RTHDR = 0x39
+ IPV6_RTHDRDSTOPTS = 0x37
+ IPV6_RTHDR_LOOSE = 0x0
+ IPV6_RTHDR_STRICT = 0x1
+ IPV6_RTHDR_TYPE_0 = 0x0
+ IPV6_RXDSTOPTS = 0x3b
+ IPV6_RXHOPOPTS = 0x36
+ IPV6_TCLASS = 0x43
+ IPV6_UNICAST_HOPS = 0x10
+ IPV6_V6ONLY = 0x1a
+ IPV6_XFRM_POLICY = 0x23
+ IP_ADD_MEMBERSHIP = 0x23
+ IP_ADD_SOURCE_MEMBERSHIP = 0x27
+ IP_BLOCK_SOURCE = 0x26
+ IP_DEFAULT_MULTICAST_LOOP = 0x1
+ IP_DEFAULT_MULTICAST_TTL = 0x1
+ IP_DF = 0x4000
+ IP_DROP_MEMBERSHIP = 0x24
+ IP_DROP_SOURCE_MEMBERSHIP = 0x28
+ IP_FREEBIND = 0xf
+ IP_HDRINCL = 0x3
+ IP_IPSEC_POLICY = 0x10
+ IP_MAXPACKET = 0xffff
+ IP_MAX_MEMBERSHIPS = 0x14
+ IP_MF = 0x2000
+ IP_MINTTL = 0x15
+ IP_MSFILTER = 0x29
+ IP_MSS = 0x240
+ IP_MTU = 0xe
+ IP_MTU_DISCOVER = 0xa
+ IP_MULTICAST_IF = 0x20
+ IP_MULTICAST_LOOP = 0x22
+ IP_MULTICAST_TTL = 0x21
+ IP_OFFMASK = 0x1fff
+ IP_OPTIONS = 0x4
+ IP_ORIGDSTADDR = 0x14
+ IP_PASSSEC = 0x12
+ IP_PKTINFO = 0x8
+ IP_PKTOPTIONS = 0x9
+ IP_PMTUDISC = 0xa
+ IP_PMTUDISC_DO = 0x2
+ IP_PMTUDISC_DONT = 0x0
+ IP_PMTUDISC_PROBE = 0x3
+ IP_PMTUDISC_WANT = 0x1
+ IP_RECVERR = 0xb
+ IP_RECVOPTS = 0x6
+ IP_RECVORIGDSTADDR = 0x14
+ IP_RECVRETOPTS = 0x7
+ IP_RECVTOS = 0xd
+ IP_RECVTTL = 0xc
+ IP_RETOPTS = 0x7
+ IP_RF = 0x8000
+ IP_ROUTER_ALERT = 0x5
+ IP_TOS = 0x1
+ IP_TRANSPARENT = 0x13
+ IP_TTL = 0x2
+ IP_UNBLOCK_SOURCE = 0x25
+ IP_XFRM_POLICY = 0x11
+ ISIG = 0x1
+ ISTRIP = 0x20
+ IUCLC = 0x200
+ IUTF8 = 0x4000
+ IXANY = 0x800
+ IXOFF = 0x1000
+ IXON = 0x400
+ LINUX_REBOOT_CMD_CAD_OFF = 0x0
+ LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef
+ LINUX_REBOOT_CMD_HALT = 0xcdef0123
+ LINUX_REBOOT_CMD_KEXEC = 0x45584543
+ LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc
+ LINUX_REBOOT_CMD_RESTART = 0x1234567
+ LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4
+ LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2
+ LINUX_REBOOT_MAGIC1 = 0xfee1dead
+ LINUX_REBOOT_MAGIC2 = 0x28121969
+ LOCK_EX = 0x2
+ LOCK_NB = 0x4
+ LOCK_SH = 0x1
+ LOCK_UN = 0x8
+ MADV_DOFORK = 0xb
+ MADV_DONTFORK = 0xa
+ MADV_DONTNEED = 0x4
+ MADV_HUGEPAGE = 0xe
+ MADV_HWPOISON = 0x64
+ MADV_MERGEABLE = 0xc
+ MADV_NOHUGEPAGE = 0xf
+ MADV_NORMAL = 0x0
+ MADV_RANDOM = 0x1
+ MADV_REMOVE = 0x9
+ MADV_SEQUENTIAL = 0x2
+ MADV_UNMERGEABLE = 0xd
+ MADV_WILLNEED = 0x3
+ MAP_ANON = 0x800
+ MAP_ANONYMOUS = 0x800
+ MAP_DENYWRITE = 0x2000
+ MAP_EXECUTABLE = 0x4000
+ MAP_FILE = 0x0
+ MAP_FIXED = 0x10
+ MAP_GROWSDOWN = 0x1000
+ MAP_LOCKED = 0x8000
+ MAP_NONBLOCK = 0x20000
+ MAP_NORESERVE = 0x400
+ MAP_POPULATE = 0x10000
+ MAP_PRIVATE = 0x2
+ MAP_RENAME = 0x800
+ MAP_SHARED = 0x1
+ MAP_TYPE = 0xf
+ MCL_CURRENT = 0x1
+ MCL_FUTURE = 0x2
+ MNT_DETACH = 0x2
+ MNT_EXPIRE = 0x4
+ MNT_FORCE = 0x1
+ MSG_CMSG_CLOEXEC = 0x40000000
+ MSG_CONFIRM = 0x800
+ MSG_CTRUNC = 0x8
+ MSG_DONTROUTE = 0x4
+ MSG_DONTWAIT = 0x40
+ MSG_EOR = 0x80
+ MSG_ERRQUEUE = 0x2000
+ MSG_FASTOPEN = 0x20000000
+ MSG_FIN = 0x200
+ MSG_MORE = 0x8000
+ MSG_NOSIGNAL = 0x4000
+ MSG_OOB = 0x1
+ MSG_PEEK = 0x2
+ MSG_PROXY = 0x10
+ MSG_RST = 0x1000
+ MSG_SYN = 0x400
+ MSG_TRUNC = 0x20
+ MSG_TRYHARD = 0x4
+ MSG_WAITALL = 0x100
+ MSG_WAITFORONE = 0x10000
+ MS_ACTIVE = 0x40000000
+ MS_ASYNC = 0x1
+ MS_BIND = 0x1000
+ MS_DIRSYNC = 0x80
+ MS_INVALIDATE = 0x2
+ MS_I_VERSION = 0x800000
+ MS_KERNMOUNT = 0x400000
+ MS_MANDLOCK = 0x40
+ MS_MGC_MSK = 0xffff0000
+ MS_MGC_VAL = 0xc0ed0000
+ MS_MOVE = 0x2000
+ MS_NOATIME = 0x400
+ MS_NODEV = 0x4
+ MS_NODIRATIME = 0x800
+ MS_NOEXEC = 0x8
+ MS_NOSUID = 0x2
+ MS_NOUSER = -0x80000000
+ MS_POSIXACL = 0x10000
+ MS_PRIVATE = 0x40000
+ MS_RDONLY = 0x1
+ MS_REC = 0x4000
+ MS_RELATIME = 0x200000
+ MS_REMOUNT = 0x20
+ MS_RMT_MASK = 0x800051
+ MS_SHARED = 0x100000
+ MS_SILENT = 0x8000
+ MS_SLAVE = 0x80000
+ MS_STRICTATIME = 0x1000000
+ MS_SYNC = 0x4
+ MS_SYNCHRONOUS = 0x10
+ MS_UNBINDABLE = 0x20000
+ NAME_MAX = 0xff
+ NETLINK_ADD_MEMBERSHIP = 0x1
+ NETLINK_AUDIT = 0x9
+ NETLINK_BROADCAST_ERROR = 0x4
+ NETLINK_CONNECTOR = 0xb
+ NETLINK_CRYPTO = 0x15
+ NETLINK_DNRTMSG = 0xe
+ NETLINK_DROP_MEMBERSHIP = 0x2
+ NETLINK_ECRYPTFS = 0x13
+ NETLINK_FIB_LOOKUP = 0xa
+ NETLINK_FIREWALL = 0x3
+ NETLINK_GENERIC = 0x10
+ NETLINK_INET_DIAG = 0x4
+ NETLINK_IP6_FW = 0xd
+ NETLINK_ISCSI = 0x8
+ NETLINK_KOBJECT_UEVENT = 0xf
+ NETLINK_NETFILTER = 0xc
+ NETLINK_NFLOG = 0x5
+ NETLINK_NO_ENOBUFS = 0x5
+ NETLINK_PKTINFO = 0x3
+ NETLINK_RDMA = 0x14
+ NETLINK_ROUTE = 0x0
+ NETLINK_SCSITRANSPORT = 0x12
+ NETLINK_SELINUX = 0x7
+ NETLINK_UNUSED = 0x1
+ NETLINK_USERSOCK = 0x2
+ NETLINK_XFRM = 0x6
+ NL0 = 0x0
+ NL1 = 0x100
+ NLA_ALIGNTO = 0x4
+ NLA_F_NESTED = 0x8000
+ NLA_F_NET_BYTEORDER = 0x4000
+ NLA_HDRLEN = 0x4
+ NLDLY = 0x100
+ NLMSG_ALIGNTO = 0x4
+ NLMSG_DONE = 0x3
+ NLMSG_ERROR = 0x2
+ NLMSG_HDRLEN = 0x10
+ NLMSG_MIN_TYPE = 0x10
+ NLMSG_NOOP = 0x1
+ NLMSG_OVERRUN = 0x4
+ NLM_F_ACK = 0x4
+ NLM_F_APPEND = 0x800
+ NLM_F_ATOMIC = 0x400
+ NLM_F_CREATE = 0x400
+ NLM_F_DUMP = 0x300
+ NLM_F_DUMP_INTR = 0x10
+ NLM_F_ECHO = 0x8
+ NLM_F_EXCL = 0x200
+ NLM_F_MATCH = 0x200
+ NLM_F_MULTI = 0x2
+ NLM_F_REPLACE = 0x100
+ NLM_F_REQUEST = 0x1
+ NLM_F_ROOT = 0x100
+ NOFLSH = 0x80
+ OCRNL = 0x8
+ OFDEL = 0x80
+ OFILL = 0x40
+ OLCUC = 0x2
+ ONLCR = 0x4
+ ONLRET = 0x20
+ ONOCR = 0x10
+ OPOST = 0x1
+ O_ACCMODE = 0x3
+ O_APPEND = 0x8
+ O_ASYNC = 0x1000
+ O_CLOEXEC = 0x80000
+ O_CREAT = 0x100
+ O_DIRECT = 0x8000
+ O_DIRECTORY = 0x10000
+ O_DSYNC = 0x10
+ O_EXCL = 0x400
+ O_FSYNC = 0x4010
+ O_LARGEFILE = 0x2000
+ O_NDELAY = 0x80
+ O_NOATIME = 0x40000
+ O_NOCTTY = 0x800
+ O_NOFOLLOW = 0x20000
+ O_NONBLOCK = 0x80
+ O_RDONLY = 0x0
+ O_RDWR = 0x2
+ O_RSYNC = 0x4010
+ O_SYNC = 0x4010
+ O_TRUNC = 0x200
+ O_WRONLY = 0x1
+ PACKET_ADD_MEMBERSHIP = 0x1
+ PACKET_AUXDATA = 0x8
+ PACKET_BROADCAST = 0x1
+ PACKET_COPY_THRESH = 0x7
+ PACKET_DROP_MEMBERSHIP = 0x2
+ PACKET_FANOUT = 0x12
+ PACKET_FANOUT_CPU = 0x2
+ PACKET_FANOUT_FLAG_DEFRAG = 0x8000
+ PACKET_FANOUT_HASH = 0x0
+ PACKET_FANOUT_LB = 0x1
+ PACKET_FASTROUTE = 0x6
+ PACKET_HDRLEN = 0xb
+ PACKET_HOST = 0x0
+ PACKET_LOOPBACK = 0x5
+ PACKET_LOSS = 0xe
+ PACKET_MR_ALLMULTI = 0x2
+ PACKET_MR_MULTICAST = 0x0
+ PACKET_MR_PROMISC = 0x1
+ PACKET_MR_UNICAST = 0x3
+ PACKET_MULTICAST = 0x2
+ PACKET_ORIGDEV = 0x9
+ PACKET_OTHERHOST = 0x3
+ PACKET_OUTGOING = 0x4
+ PACKET_RECV_OUTPUT = 0x3
+ PACKET_RESERVE = 0xc
+ PACKET_RX_RING = 0x5
+ PACKET_STATISTICS = 0x6
+ PACKET_TIMESTAMP = 0x11
+ PACKET_TX_RING = 0xd
+ PACKET_TX_TIMESTAMP = 0x10
+ PACKET_VERSION = 0xa
+ PACKET_VNET_HDR = 0xf
+ PARENB = 0x100
+ PARITY_CRC16_PR0 = 0x2
+ PARITY_CRC16_PR0_CCITT = 0x4
+ PARITY_CRC16_PR1 = 0x3
+ PARITY_CRC16_PR1_CCITT = 0x5
+ PARITY_CRC32_PR0_CCITT = 0x6
+ PARITY_CRC32_PR1_CCITT = 0x7
+ PARITY_DEFAULT = 0x0
+ PARITY_NONE = 0x1
+ PARMRK = 0x8
+ PARODD = 0x200
+ PENDIN = 0x4000
+ PRIO_PGRP = 0x1
+ PRIO_PROCESS = 0x0
+ PRIO_USER = 0x2
+ PROT_EXEC = 0x4
+ PROT_GROWSDOWN = 0x1000000
+ PROT_GROWSUP = 0x2000000
+ PROT_NONE = 0x0
+ PROT_READ = 0x1
+ PROT_WRITE = 0x2
+ PR_CAPBSET_DROP = 0x18
+ PR_CAPBSET_READ = 0x17
+ PR_ENDIAN_BIG = 0x0
+ PR_ENDIAN_LITTLE = 0x1
+ PR_ENDIAN_PPC_LITTLE = 0x2
+ PR_FPEMU_NOPRINT = 0x1
+ PR_FPEMU_SIGFPE = 0x2
+ PR_FP_EXC_ASYNC = 0x2
+ PR_FP_EXC_DISABLED = 0x0
+ PR_FP_EXC_DIV = 0x10000
+ PR_FP_EXC_INV = 0x100000
+ PR_FP_EXC_NONRECOV = 0x1
+ PR_FP_EXC_OVF = 0x20000
+ PR_FP_EXC_PRECISE = 0x3
+ PR_FP_EXC_RES = 0x80000
+ PR_FP_EXC_SW_ENABLE = 0x80
+ PR_FP_EXC_UND = 0x40000
+ PR_GET_DUMPABLE = 0x3
+ PR_GET_ENDIAN = 0x13
+ PR_GET_FPEMU = 0x9
+ PR_GET_FPEXC = 0xb
+ PR_GET_KEEPCAPS = 0x7
+ PR_GET_NAME = 0x10
+ PR_GET_PDEATHSIG = 0x2
+ PR_GET_SECCOMP = 0x15
+ PR_GET_SECUREBITS = 0x1b
+ PR_GET_TIMERSLACK = 0x1e
+ PR_GET_TIMING = 0xd
+ PR_GET_TSC = 0x19
+ PR_GET_UNALIGN = 0x5
+ PR_MCE_KILL = 0x21
+ PR_MCE_KILL_CLEAR = 0x0
+ PR_MCE_KILL_DEFAULT = 0x2
+ PR_MCE_KILL_EARLY = 0x1
+ PR_MCE_KILL_GET = 0x22
+ PR_MCE_KILL_LATE = 0x0
+ PR_MCE_KILL_SET = 0x1
+ PR_SET_DUMPABLE = 0x4
+ PR_SET_ENDIAN = 0x14
+ PR_SET_FPEMU = 0xa
+ PR_SET_FPEXC = 0xc
+ PR_SET_KEEPCAPS = 0x8
+ PR_SET_NAME = 0xf
+ PR_SET_PDEATHSIG = 0x1
+ PR_SET_SECCOMP = 0x16
+ PR_SET_SECUREBITS = 0x1c
+ PR_SET_TIMERSLACK = 0x1d
+ PR_SET_TIMING = 0xe
+ PR_SET_TSC = 0x1a
+ PR_SET_UNALIGN = 0x6
+ PR_TASK_PERF_EVENTS_DISABLE = 0x1f
+ PR_TASK_PERF_EVENTS_ENABLE = 0x20
+ PR_TIMING_STATISTICAL = 0x0
+ PR_TIMING_TIMESTAMP = 0x1
+ PR_TSC_ENABLE = 0x1
+ PR_TSC_SIGSEGV = 0x2
+ PR_UNALIGN_NOPRINT = 0x1
+ PR_UNALIGN_SIGBUS = 0x2
+ PTRACE_ATTACH = 0x10
+ PTRACE_CONT = 0x7
+ PTRACE_DETACH = 0x11
+ PTRACE_EVENT_CLONE = 0x3
+ PTRACE_EVENT_EXEC = 0x4
+ PTRACE_EVENT_EXIT = 0x6
+ PTRACE_EVENT_FORK = 0x1
+ PTRACE_EVENT_STOP = 0x7
+ PTRACE_EVENT_VFORK = 0x2
+ PTRACE_EVENT_VFORK_DONE = 0x5
+ PTRACE_GETEVENTMSG = 0x4201
+ PTRACE_GETFPREGS = 0xe
+ PTRACE_GETREGS = 0xc
+ PTRACE_GETREGSET = 0x4204
+ PTRACE_GETSIGINFO = 0x4202
+ PTRACE_GET_THREAD_AREA = 0x19
+ PTRACE_GET_THREAD_AREA_3264 = 0xc4
+ PTRACE_GET_WATCH_REGS = 0xd0
+ PTRACE_INTERRUPT = 0x4207
+ PTRACE_KILL = 0x8
+ PTRACE_LISTEN = 0x4208
+ PTRACE_OLDSETOPTIONS = 0x15
+ PTRACE_O_MASK = 0x7f
+ PTRACE_O_TRACECLONE = 0x8
+ PTRACE_O_TRACEEXEC = 0x10
+ PTRACE_O_TRACEEXIT = 0x40
+ PTRACE_O_TRACEFORK = 0x2
+ PTRACE_O_TRACESYSGOOD = 0x1
+ PTRACE_O_TRACEVFORK = 0x4
+ PTRACE_O_TRACEVFORKDONE = 0x20
+ PTRACE_PEEKDATA = 0x2
+ PTRACE_PEEKDATA_3264 = 0xc1
+ PTRACE_PEEKTEXT = 0x1
+ PTRACE_PEEKTEXT_3264 = 0xc0
+ PTRACE_PEEKUSR = 0x3
+ PTRACE_POKEDATA = 0x5
+ PTRACE_POKEDATA_3264 = 0xc3
+ PTRACE_POKETEXT = 0x4
+ PTRACE_POKETEXT_3264 = 0xc2
+ PTRACE_POKEUSR = 0x6
+ PTRACE_SEIZE = 0x4206
+ PTRACE_SEIZE_DEVEL = 0x80000000
+ PTRACE_SETFPREGS = 0xf
+ PTRACE_SETOPTIONS = 0x4200
+ PTRACE_SETREGS = 0xd
+ PTRACE_SETREGSET = 0x4205
+ PTRACE_SETSIGINFO = 0x4203
+ PTRACE_SET_THREAD_AREA = 0x1a
+ PTRACE_SET_WATCH_REGS = 0xd1
+ PTRACE_SINGLESTEP = 0x9
+ PTRACE_SYSCALL = 0x18
+ PTRACE_TRACEME = 0x0
+ RLIMIT_AS = 0x6
+ RLIMIT_CORE = 0x4
+ RLIMIT_CPU = 0x0
+ RLIMIT_DATA = 0x2
+ RLIMIT_FSIZE = 0x1
+ RLIMIT_NOFILE = 0x5
+ RLIMIT_STACK = 0x3
+ RLIM_INFINITY = 0x7fffffffffffffff
+ RTAX_ADVMSS = 0x8
+ RTAX_CWND = 0x7
+ RTAX_FEATURES = 0xc
+ RTAX_FEATURE_ALLFRAG = 0x8
+ RTAX_FEATURE_ECN = 0x1
+ RTAX_FEATURE_SACK = 0x2
+ RTAX_FEATURE_TIMESTAMP = 0x4
+ RTAX_HOPLIMIT = 0xa
+ RTAX_INITCWND = 0xb
+ RTAX_INITRWND = 0xe
+ RTAX_LOCK = 0x1
+ RTAX_MAX = 0xe
+ RTAX_MTU = 0x2
+ RTAX_REORDERING = 0x9
+ RTAX_RTO_MIN = 0xd
+ RTAX_RTT = 0x4
+ RTAX_RTTVAR = 0x5
+ RTAX_SSTHRESH = 0x6
+ RTAX_UNSPEC = 0x0
+ RTAX_WINDOW = 0x3
+ RTA_ALIGNTO = 0x4
+ RTA_MAX = 0x10
+ RTCF_DIRECTSRC = 0x4000000
+ RTCF_DOREDIRECT = 0x1000000
+ RTCF_LOG = 0x2000000
+ RTCF_MASQ = 0x400000
+ RTCF_NAT = 0x800000
+ RTCF_VALVE = 0x200000
+ RTF_ADDRCLASSMASK = 0xf8000000
+ RTF_ADDRCONF = 0x40000
+ RTF_ALLONLINK = 0x20000
+ RTF_BROADCAST = 0x10000000
+ RTF_CACHE = 0x1000000
+ RTF_DEFAULT = 0x10000
+ RTF_DYNAMIC = 0x10
+ RTF_FLOW = 0x2000000
+ RTF_GATEWAY = 0x2
+ RTF_HOST = 0x4
+ RTF_INTERFACE = 0x40000000
+ RTF_IRTT = 0x100
+ RTF_LINKRT = 0x100000
+ RTF_LOCAL = 0x80000000
+ RTF_MODIFIED = 0x20
+ RTF_MSS = 0x40
+ RTF_MTU = 0x40
+ RTF_MULTICAST = 0x20000000
+ RTF_NAT = 0x8000000
+ RTF_NOFORWARD = 0x1000
+ RTF_NONEXTHOP = 0x200000
+ RTF_NOPMTUDISC = 0x4000
+ RTF_POLICY = 0x4000000
+ RTF_REINSTATE = 0x8
+ RTF_REJECT = 0x200
+ RTF_STATIC = 0x400
+ RTF_THROW = 0x2000
+ RTF_UP = 0x1
+ RTF_WINDOW = 0x80
+ RTF_XRESOLVE = 0x800
+ RTM_BASE = 0x10
+ RTM_DELACTION = 0x31
+ RTM_DELADDR = 0x15
+ RTM_DELADDRLABEL = 0x49
+ RTM_DELLINK = 0x11
+ RTM_DELNEIGH = 0x1d
+ RTM_DELQDISC = 0x25
+ RTM_DELROUTE = 0x19
+ RTM_DELRULE = 0x21
+ RTM_DELTCLASS = 0x29
+ RTM_DELTFILTER = 0x2d
+ RTM_F_CLONED = 0x200
+ RTM_F_EQUALIZE = 0x400
+ RTM_F_NOTIFY = 0x100
+ RTM_F_PREFIX = 0x800
+ RTM_GETACTION = 0x32
+ RTM_GETADDR = 0x16
+ RTM_GETADDRLABEL = 0x4a
+ RTM_GETANYCAST = 0x3e
+ RTM_GETDCB = 0x4e
+ RTM_GETLINK = 0x12
+ RTM_GETMULTICAST = 0x3a
+ RTM_GETNEIGH = 0x1e
+ RTM_GETNEIGHTBL = 0x42
+ RTM_GETQDISC = 0x26
+ RTM_GETROUTE = 0x1a
+ RTM_GETRULE = 0x22
+ RTM_GETTCLASS = 0x2a
+ RTM_GETTFILTER = 0x2e
+ RTM_MAX = 0x4f
+ RTM_NEWACTION = 0x30
+ RTM_NEWADDR = 0x14
+ RTM_NEWADDRLABEL = 0x48
+ RTM_NEWLINK = 0x10
+ RTM_NEWNDUSEROPT = 0x44
+ RTM_NEWNEIGH = 0x1c
+ RTM_NEWNEIGHTBL = 0x40
+ RTM_NEWPREFIX = 0x34
+ RTM_NEWQDISC = 0x24
+ RTM_NEWROUTE = 0x18
+ RTM_NEWRULE = 0x20
+ RTM_NEWTCLASS = 0x28
+ RTM_NEWTFILTER = 0x2c
+ RTM_NR_FAMILIES = 0x10
+ RTM_NR_MSGTYPES = 0x40
+ RTM_SETDCB = 0x4f
+ RTM_SETLINK = 0x13
+ RTM_SETNEIGHTBL = 0x43
+ RTNH_ALIGNTO = 0x4
+ RTNH_F_DEAD = 0x1
+ RTNH_F_ONLINK = 0x4
+ RTNH_F_PERVASIVE = 0x2
+ RTN_MAX = 0xb
+ RTPROT_BIRD = 0xc
+ RTPROT_BOOT = 0x3
+ RTPROT_DHCP = 0x10
+ RTPROT_DNROUTED = 0xd
+ RTPROT_GATED = 0x8
+ RTPROT_KERNEL = 0x2
+ RTPROT_MRT = 0xa
+ RTPROT_NTK = 0xf
+ RTPROT_RA = 0x9
+ RTPROT_REDIRECT = 0x1
+ RTPROT_STATIC = 0x4
+ RTPROT_UNSPEC = 0x0
+ RTPROT_XORP = 0xe
+ RTPROT_ZEBRA = 0xb
+ RT_CLASS_DEFAULT = 0xfd
+ RT_CLASS_LOCAL = 0xff
+ RT_CLASS_MAIN = 0xfe
+ RT_CLASS_MAX = 0xff
+ RT_CLASS_UNSPEC = 0x0
+ RUSAGE_CHILDREN = -0x1
+ RUSAGE_SELF = 0x0
+ RUSAGE_THREAD = 0x1
+ SCM_CREDENTIALS = 0x2
+ SCM_RIGHTS = 0x1
+ SCM_TIMESTAMP = 0x1d
+ SCM_TIMESTAMPING = 0x25
+ SCM_TIMESTAMPNS = 0x23
+ SHUT_RD = 0x0
+ SHUT_RDWR = 0x2
+ SHUT_WR = 0x1
+ SIOCADDDLCI = 0x8980
+ SIOCADDMULTI = 0x8931
+ SIOCADDRT = 0x890b
+ SIOCATMARK = 0x40047307
+ SIOCDARP = 0x8953
+ SIOCDELDLCI = 0x8981
+ SIOCDELMULTI = 0x8932
+ SIOCDELRT = 0x890c
+ SIOCDEVPRIVATE = 0x89f0
+ SIOCDIFADDR = 0x8936
+ SIOCDRARP = 0x8960
+ SIOCGARP = 0x8954
+ SIOCGIFADDR = 0x8915
+ SIOCGIFBR = 0x8940
+ SIOCGIFBRDADDR = 0x8919
+ SIOCGIFCONF = 0x8912
+ SIOCGIFCOUNT = 0x8938
+ SIOCGIFDSTADDR = 0x8917
+ SIOCGIFENCAP = 0x8925
+ SIOCGIFFLAGS = 0x8913
+ SIOCGIFHWADDR = 0x8927
+ SIOCGIFINDEX = 0x8933
+ SIOCGIFMAP = 0x8970
+ SIOCGIFMEM = 0x891f
+ SIOCGIFMETRIC = 0x891d
+ SIOCGIFMTU = 0x8921
+ SIOCGIFNAME = 0x8910
+ SIOCGIFNETMASK = 0x891b
+ SIOCGIFPFLAGS = 0x8935
+ SIOCGIFSLAVE = 0x8929
+ SIOCGIFTXQLEN = 0x8942
+ SIOCGPGRP = 0x40047309
+ SIOCGRARP = 0x8961
+ SIOCGSTAMP = 0x8906
+ SIOCGSTAMPNS = 0x8907
+ SIOCPROTOPRIVATE = 0x89e0
+ SIOCRTMSG = 0x890d
+ SIOCSARP = 0x8955
+ SIOCSIFADDR = 0x8916
+ SIOCSIFBR = 0x8941
+ SIOCSIFBRDADDR = 0x891a
+ SIOCSIFDSTADDR = 0x8918
+ SIOCSIFENCAP = 0x8926
+ SIOCSIFFLAGS = 0x8914
+ SIOCSIFHWADDR = 0x8924
+ SIOCSIFHWBROADCAST = 0x8937
+ SIOCSIFLINK = 0x8911
+ SIOCSIFMAP = 0x8971
+ SIOCSIFMEM = 0x8920
+ SIOCSIFMETRIC = 0x891e
+ SIOCSIFMTU = 0x8922
+ SIOCSIFNAME = 0x8923
+ SIOCSIFNETMASK = 0x891c
+ SIOCSIFPFLAGS = 0x8934
+ SIOCSIFSLAVE = 0x8930
+ SIOCSIFTXQLEN = 0x8943
+ SIOCSPGRP = 0x80047308
+ SIOCSRARP = 0x8962
+ SOCK_CLOEXEC = 0x80000
+ SOCK_DCCP = 0x6
+ SOCK_DGRAM = 0x1
+ SOCK_NONBLOCK = 0x80
+ SOCK_PACKET = 0xa
+ SOCK_RAW = 0x3
+ SOCK_RDM = 0x4
+ SOCK_SEQPACKET = 0x5
+ SOCK_STREAM = 0x2
+ SOL_AAL = 0x109
+ SOL_ATM = 0x108
+ SOL_CAN_BASE = 0x64
+ SOL_DECNET = 0x105
+ SOL_ICMPV6 = 0x3a
+ SOL_IP = 0x0
+ SOL_IPV6 = 0x29
+ SOL_IRDA = 0x10a
+ SOL_PACKET = 0x107
+ SOL_RAW = 0xff
+ SOL_SOCKET = 0xffff
+ SOL_TCP = 0x6
+ SOL_X25 = 0x106
+ SOMAXCONN = 0x80
+ SO_ACCEPTCONN = 0x1009
+ SO_ATTACH_FILTER = 0x1a
+ SO_BINDTODEVICE = 0x19
+ SO_BROADCAST = 0x20
+ SO_BSDCOMPAT = 0xe
+ SO_DEBUG = 0x1
+ SO_DETACH_FILTER = 0x1b
+ SO_DOMAIN = 0x1029
+ SO_DONTROUTE = 0x10
+ SO_ERROR = 0x1007
+ SO_KEEPALIVE = 0x8
+ SO_LINGER = 0x80
+ SO_MARK = 0x24
+ SO_NO_CHECK = 0xb
+ SO_OOBINLINE = 0x100
+ SO_PASSCRED = 0x11
+ SO_PASSSEC = 0x22
+ SO_PEERCRED = 0x12
+ SO_PEERNAME = 0x1c
+ SO_PEERSEC = 0x1e
+ SO_PRIORITY = 0xc
+ SO_PROTOCOL = 0x1028
+ SO_RCVBUF = 0x1002
+ SO_RCVBUFFORCE = 0x21
+ SO_RCVLOWAT = 0x1004
+ SO_RCVTIMEO = 0x1006
+ SO_REUSEADDR = 0x4
+ SO_RXQ_OVFL = 0x28
+ SO_SECURITY_AUTHENTICATION = 0x16
+ SO_SECURITY_ENCRYPTION_NETWORK = 0x18
+ SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
+ SO_SNDBUF = 0x1001
+ SO_SNDBUFFORCE = 0x1f
+ SO_SNDLOWAT = 0x1003
+ SO_SNDTIMEO = 0x1005
+ SO_STYLE = 0x1008
+ SO_TIMESTAMP = 0x1d
+ SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPNS = 0x23
+ SO_TYPE = 0x1008
+ S_BLKSIZE = 0x200
+ S_IEXEC = 0x40
+ S_IFBLK = 0x6000
+ S_IFCHR = 0x2000
+ S_IFDIR = 0x4000
+ S_IFIFO = 0x1000
+ S_IFLNK = 0xa000
+ S_IFMT = 0xf000
+ S_IFREG = 0x8000
+ S_IFSOCK = 0xc000
+ S_IREAD = 0x100
+ S_IRGRP = 0x20
+ S_IROTH = 0x4
+ S_IRUSR = 0x100
+ S_IRWXG = 0x38
+ S_IRWXO = 0x7
+ S_IRWXU = 0x1c0
+ S_ISGID = 0x400
+ S_ISUID = 0x800
+ S_ISVTX = 0x200
+ S_IWGRP = 0x10
+ S_IWOTH = 0x2
+ S_IWRITE = 0x80
+ S_IWUSR = 0x80
+ S_IXGRP = 0x8
+ S_IXOTH = 0x1
+ S_IXUSR = 0x40
+ TAB0 = 0x0
+ TAB1 = 0x800
+ TAB2 = 0x1000
+ TAB3 = 0x1800
+ TABDLY = 0x1800
+ TCFLSH = 0x5407
+ TCGETA = 0x5401
+ TCGETS = 0x540d
+ TCGETS2 = 0x4030542a
+ TCIFLUSH = 0x0
+ TCIOFF = 0x2
+ TCIOFLUSH = 0x2
+ TCION = 0x3
+ TCOFLUSH = 0x1
+ TCOOFF = 0x0
+ TCOON = 0x1
+ TCP_CONGESTION = 0xd
+ TCP_CORK = 0x3
+ TCP_DEFER_ACCEPT = 0x9
+ TCP_INFO = 0xb
+ TCP_KEEPCNT = 0x6
+ TCP_KEEPIDLE = 0x4
+ TCP_KEEPINTVL = 0x5
+ TCP_LINGER2 = 0x8
+ TCP_MAXSEG = 0x2
+ TCP_MAXWIN = 0xffff
+ TCP_MAX_WINSHIFT = 0xe
+ TCP_MD5SIG = 0xe
+ TCP_MD5SIG_MAXKEYLEN = 0x50
+ TCP_MSS = 0x200
+ TCP_NODELAY = 0x1
+ TCP_QUICKACK = 0xc
+ TCP_SYNCNT = 0x7
+ TCP_WINDOW_CLAMP = 0xa
+ TCSAFLUSH = 0x5410
+ TCSBRK = 0x5405
+ TCSBRKP = 0x5486
+ TCSETA = 0x5402
+ TCSETAF = 0x5404
+ TCSETAW = 0x5403
+ TCSETS = 0x540e
+ TCSETS2 = 0x8030542b
+ TCSETSF = 0x5410
+ TCSETSF2 = 0x8030542d
+ TCSETSW = 0x540f
+ TCSETSW2 = 0x8030542c
+ TCXONC = 0x5406
+ TIOCCBRK = 0x5428
+ TIOCCONS = 0x80047478
+ TIOCEXCL = 0x740d
+ TIOCGDEV = 0x40045432
+ TIOCGETD = 0x7400
+ TIOCGETP = 0x7408
+ TIOCGICOUNT = 0x5492
+ TIOCGLCKTRMIOS = 0x548b
+ TIOCGLTC = 0x7474
+ TIOCGPGRP = 0x40047477
+ TIOCGPTN = 0x40045430
+ TIOCGSERIAL = 0x5484
+ TIOCGSID = 0x7416
+ TIOCGSOFTCAR = 0x5481
+ TIOCGWINSZ = 0x40087468
+ TIOCINQ = 0x467f
+ TIOCLINUX = 0x5483
+ TIOCMBIC = 0x741c
+ TIOCMBIS = 0x741b
+ TIOCMGET = 0x741d
+ TIOCMIWAIT = 0x5491
+ TIOCMSET = 0x741a
+ TIOCM_CAR = 0x100
+ TIOCM_CD = 0x100
+ TIOCM_CTS = 0x40
+ TIOCM_DSR = 0x400
+ TIOCM_DTR = 0x2
+ TIOCM_LE = 0x1
+ TIOCM_RI = 0x200
+ TIOCM_RNG = 0x200
+ TIOCM_RTS = 0x4
+ TIOCM_SR = 0x20
+ TIOCM_ST = 0x10
+ TIOCNOTTY = 0x5471
+ TIOCNXCL = 0x740e
+ TIOCOUTQ = 0x7472
+ TIOCPKT = 0x5470
+ TIOCPKT_DATA = 0x0
+ TIOCPKT_DOSTOP = 0x20
+ TIOCPKT_FLUSHREAD = 0x1
+ TIOCPKT_FLUSHWRITE = 0x2
+ TIOCPKT_IOCTL = 0x40
+ TIOCPKT_NOSTOP = 0x10
+ TIOCPKT_START = 0x8
+ TIOCPKT_STOP = 0x4
+ TIOCSBRK = 0x5427
+ TIOCSCTTY = 0x5480
+ TIOCSERCONFIG = 0x5488
+ TIOCSERGETLSR = 0x548e
+ TIOCSERGETMULTI = 0x548f
+ TIOCSERGSTRUCT = 0x548d
+ TIOCSERGWILD = 0x5489
+ TIOCSERSETMULTI = 0x5490
+ TIOCSERSWILD = 0x548a
+ TIOCSER_TEMT = 0x1
+ TIOCSETD = 0x7401
+ TIOCSETN = 0x740a
+ TIOCSETP = 0x7409
+ TIOCSIG = 0x80045436
+ TIOCSLCKTRMIOS = 0x548c
+ TIOCSLTC = 0x7475
+ TIOCSPGRP = 0x80047476
+ TIOCSPTLCK = 0x80045431
+ TIOCSSERIAL = 0x5485
+ TIOCSSOFTCAR = 0x5482
+ TIOCSTI = 0x5472
+ TIOCSWINSZ = 0x80087467
+ TIOCVHANGUP = 0x5437
+ TOSTOP = 0x8000
+ TUNATTACHFILTER = 0x800854d5
+ TUNDETACHFILTER = 0x800854d6
+ TUNGETFEATURES = 0x400454cf
+ TUNGETIFF = 0x400454d2
+ TUNGETSNDBUF = 0x400454d3
+ TUNGETVNETHDRSZ = 0x400454d7
+ TUNSETDEBUG = 0x800454c9
+ TUNSETGROUP = 0x800454ce
+ TUNSETIFF = 0x800454ca
+ TUNSETLINK = 0x800454cd
+ TUNSETNOCSUM = 0x800454c8
+ TUNSETOFFLOAD = 0x800454d0
+ TUNSETOWNER = 0x800454cc
+ TUNSETPERSIST = 0x800454cb
+ TUNSETSNDBUF = 0x800454d4
+ TUNSETTXFILTER = 0x800454d1
+ TUNSETVNETHDRSZ = 0x800454d8
+ VDISCARD = 0xd
+ VEOF = 0x10
+ VEOL = 0x11
+ VEOL2 = 0x6
+ VERASE = 0x2
+ VINTR = 0x0
+ VKILL = 0x3
+ VLNEXT = 0xf
+ VMIN = 0x4
+ VQUIT = 0x1
+ VREPRINT = 0xc
+ VSTART = 0x8
+ VSTOP = 0x9
+ VSUSP = 0xa
+ VSWTC = 0x7
+ VSWTCH = 0x7
+ VT0 = 0x0
+ VT1 = 0x4000
+ VTDLY = 0x4000
+ VTIME = 0x5
+ VWERASE = 0xe
+ WALL = 0x40000000
+ WCLONE = 0x80000000
+ WCONTINUED = 0x8
+ WEXITED = 0x4
+ WNOHANG = 0x1
+ WNOTHREAD = 0x20000000
+ WNOWAIT = 0x1000000
+ WORDSIZE = 0x20
+ WSTOPPED = 0x2
+ WUNTRACED = 0x2
+ XCASE = 0x4
+ XTABS = 0x1800
+)
+
+// Errors
+const (
+ E2BIG = syscall.Errno(0x7)
+ EACCES = syscall.Errno(0xd)
+ EADDRINUSE = syscall.Errno(0x7d)
+ EADDRNOTAVAIL = syscall.Errno(0x7e)
+ EADV = syscall.Errno(0x44)
+ EAFNOSUPPORT = syscall.Errno(0x7c)
+ EAGAIN = syscall.Errno(0xb)
+ EALREADY = syscall.Errno(0x95)
+ EBADE = syscall.Errno(0x32)
+ EBADF = syscall.Errno(0x9)
+ EBADFD = syscall.Errno(0x51)
+ EBADMSG = syscall.Errno(0x4d)
+ EBADR = syscall.Errno(0x33)
+ EBADRQC = syscall.Errno(0x36)
+ EBADSLT = syscall.Errno(0x37)
+ EBFONT = syscall.Errno(0x3b)
+ EBUSY = syscall.Errno(0x10)
+ ECANCELED = syscall.Errno(0x9e)
+ ECHILD = syscall.Errno(0xa)
+ ECHRNG = syscall.Errno(0x25)
+ ECOMM = syscall.Errno(0x46)
+ ECONNABORTED = syscall.Errno(0x82)
+ ECONNREFUSED = syscall.Errno(0x92)
+ ECONNRESET = syscall.Errno(0x83)
+ EDEADLK = syscall.Errno(0x2d)
+ EDEADLOCK = syscall.Errno(0x38)
+ EDESTADDRREQ = syscall.Errno(0x60)
+ EDOM = syscall.Errno(0x21)
+ EDOTDOT = syscall.Errno(0x49)
+ EDQUOT = syscall.Errno(0x46d)
+ EEXIST = syscall.Errno(0x11)
+ EFAULT = syscall.Errno(0xe)
+ EFBIG = syscall.Errno(0x1b)
+ EHOSTDOWN = syscall.Errno(0x93)
+ EHOSTUNREACH = syscall.Errno(0x94)
+ EHWPOISON = syscall.Errno(0xa8)
+ EIDRM = syscall.Errno(0x24)
+ EILSEQ = syscall.Errno(0x58)
+ EINIT = syscall.Errno(0x8d)
+ EINPROGRESS = syscall.Errno(0x96)
+ EINTR = syscall.Errno(0x4)
+ EINVAL = syscall.Errno(0x16)
+ EIO = syscall.Errno(0x5)
+ EISCONN = syscall.Errno(0x85)
+ EISDIR = syscall.Errno(0x15)
+ EISNAM = syscall.Errno(0x8b)
+ EKEYEXPIRED = syscall.Errno(0xa2)
+ EKEYREJECTED = syscall.Errno(0xa4)
+ EKEYREVOKED = syscall.Errno(0xa3)
+ EL2HLT = syscall.Errno(0x2c)
+ EL2NSYNC = syscall.Errno(0x26)
+ EL3HLT = syscall.Errno(0x27)
+ EL3RST = syscall.Errno(0x28)
+ ELIBACC = syscall.Errno(0x53)
+ ELIBBAD = syscall.Errno(0x54)
+ ELIBEXEC = syscall.Errno(0x57)
+ ELIBMAX = syscall.Errno(0x56)
+ ELIBSCN = syscall.Errno(0x55)
+ ELNRNG = syscall.Errno(0x29)
+ ELOOP = syscall.Errno(0x5a)
+ EMEDIUMTYPE = syscall.Errno(0xa0)
+ EMFILE = syscall.Errno(0x18)
+ EMLINK = syscall.Errno(0x1f)
+ EMSGSIZE = syscall.Errno(0x61)
+ EMULTIHOP = syscall.Errno(0x4a)
+ ENAMETOOLONG = syscall.Errno(0x4e)
+ ENAVAIL = syscall.Errno(0x8a)
+ ENETDOWN = syscall.Errno(0x7f)
+ ENETRESET = syscall.Errno(0x81)
+ ENETUNREACH = syscall.Errno(0x80)
+ ENFILE = syscall.Errno(0x17)
+ ENOANO = syscall.Errno(0x35)
+ ENOBUFS = syscall.Errno(0x84)
+ ENOCSI = syscall.Errno(0x2b)
+ ENODATA = syscall.Errno(0x3d)
+ ENODEV = syscall.Errno(0x13)
+ ENOENT = syscall.Errno(0x2)
+ ENOEXEC = syscall.Errno(0x8)
+ ENOKEY = syscall.Errno(0xa1)
+ ENOLCK = syscall.Errno(0x2e)
+ ENOLINK = syscall.Errno(0x43)
+ ENOMEDIUM = syscall.Errno(0x9f)
+ ENOMEM = syscall.Errno(0xc)
+ ENOMSG = syscall.Errno(0x23)
+ ENONET = syscall.Errno(0x40)
+ ENOPKG = syscall.Errno(0x41)
+ ENOPROTOOPT = syscall.Errno(0x63)
+ ENOSPC = syscall.Errno(0x1c)
+ ENOSR = syscall.Errno(0x3f)
+ ENOSTR = syscall.Errno(0x3c)
+ ENOSYS = syscall.Errno(0x59)
+ ENOTBLK = syscall.Errno(0xf)
+ ENOTCONN = syscall.Errno(0x86)
+ ENOTDIR = syscall.Errno(0x14)
+ ENOTEMPTY = syscall.Errno(0x5d)
+ ENOTNAM = syscall.Errno(0x89)
+ ENOTRECOVERABLE = syscall.Errno(0xa6)
+ ENOTSOCK = syscall.Errno(0x5f)
+ ENOTSUP = syscall.Errno(0x7a)
+ ENOTTY = syscall.Errno(0x19)
+ ENOTUNIQ = syscall.Errno(0x50)
+ ENXIO = syscall.Errno(0x6)
+ EOPNOTSUPP = syscall.Errno(0x7a)
+ EOVERFLOW = syscall.Errno(0x4f)
+ EOWNERDEAD = syscall.Errno(0xa5)
+ EPERM = syscall.Errno(0x1)
+ EPFNOSUPPORT = syscall.Errno(0x7b)
+ EPIPE = syscall.Errno(0x20)
+ EPROTO = syscall.Errno(0x47)
+ EPROTONOSUPPORT = syscall.Errno(0x78)
+ EPROTOTYPE = syscall.Errno(0x62)
+ ERANGE = syscall.Errno(0x22)
+ EREMCHG = syscall.Errno(0x52)
+ EREMDEV = syscall.Errno(0x8e)
+ EREMOTE = syscall.Errno(0x42)
+ EREMOTEIO = syscall.Errno(0x8c)
+ ERESTART = syscall.Errno(0x5b)
+ ERFKILL = syscall.Errno(0xa7)
+ EROFS = syscall.Errno(0x1e)
+ ESHUTDOWN = syscall.Errno(0x8f)
+ ESOCKTNOSUPPORT = syscall.Errno(0x79)
+ ESPIPE = syscall.Errno(0x1d)
+ ESRCH = syscall.Errno(0x3)
+ ESRMNT = syscall.Errno(0x45)
+ ESTALE = syscall.Errno(0x97)
+ ESTRPIPE = syscall.Errno(0x5c)
+ ETIME = syscall.Errno(0x3e)
+ ETIMEDOUT = syscall.Errno(0x91)
+ ETOOMANYREFS = syscall.Errno(0x90)
+ ETXTBSY = syscall.Errno(0x1a)
+ EUCLEAN = syscall.Errno(0x87)
+ EUNATCH = syscall.Errno(0x2a)
+ EUSERS = syscall.Errno(0x5e)
+ EWOULDBLOCK = syscall.Errno(0xb)
+ EXDEV = syscall.Errno(0x12)
+ EXFULL = syscall.Errno(0x34)
+)
+
+// Signals
+const (
+ SIGABRT = syscall.Signal(0x6)
+ SIGALRM = syscall.Signal(0xe)
+ SIGBUS = syscall.Signal(0xa)
+ SIGCHLD = syscall.Signal(0x12)
+ SIGCLD = syscall.Signal(0x12)
+ SIGCONT = syscall.Signal(0x19)
+ SIGEMT = syscall.Signal(0x7)
+ SIGFPE = syscall.Signal(0x8)
+ SIGHUP = syscall.Signal(0x1)
+ SIGILL = syscall.Signal(0x4)
+ SIGINT = syscall.Signal(0x2)
+ SIGIO = syscall.Signal(0x16)
+ SIGIOT = syscall.Signal(0x6)
+ SIGKILL = syscall.Signal(0x9)
+ SIGPIPE = syscall.Signal(0xd)
+ SIGPOLL = syscall.Signal(0x16)
+ SIGPROF = syscall.Signal(0x1d)
+ SIGPWR = syscall.Signal(0x13)
+ SIGQUIT = syscall.Signal(0x3)
+ SIGSEGV = syscall.Signal(0xb)
+ SIGSTOP = syscall.Signal(0x17)
+ SIGSYS = syscall.Signal(0xc)
+ SIGTERM = syscall.Signal(0xf)
+ SIGTRAP = syscall.Signal(0x5)
+ SIGTSTP = syscall.Signal(0x18)
+ SIGTTIN = syscall.Signal(0x1a)
+ SIGTTOU = syscall.Signal(0x1b)
+ SIGURG = syscall.Signal(0x15)
+ SIGUSR1 = syscall.Signal(0x10)
+ SIGUSR2 = syscall.Signal(0x11)
+ SIGVTALRM = syscall.Signal(0x1c)
+ SIGWINCH = syscall.Signal(0x14)
+ SIGXCPU = syscall.Signal(0x1e)
+ SIGXFSZ = syscall.Signal(0x1f)
+)
+
+// Error table
+var errors = [...]string{
+ 1: "operation not permitted",
+ 2: "no such file or directory",
+ 3: "no such process",
+ 4: "interrupted system call",
+ 5: "input/output error",
+ 6: "no such device or address",
+ 7: "argument list too long",
+ 8: "exec format error",
+ 9: "bad file descriptor",
+ 10: "no child processes",
+ 11: "resource temporarily unavailable",
+ 12: "cannot allocate memory",
+ 13: "permission denied",
+ 14: "bad address",
+ 15: "block device required",
+ 16: "device or resource busy",
+ 17: "file exists",
+ 18: "invalid cross-device link",
+ 19: "no such device",
+ 20: "not a directory",
+ 21: "is a directory",
+ 22: "invalid argument",
+ 23: "too many open files in system",
+ 24: "too many open files",
+ 25: "inappropriate ioctl for device",
+ 26: "text file busy",
+ 27: "file too large",
+ 28: "no space left on device",
+ 29: "illegal seek",
+ 30: "read-only file system",
+ 31: "too many links",
+ 32: "broken pipe",
+ 33: "numerical argument out of domain",
+ 34: "numerical result out of range",
+ 35: "no message of desired type",
+ 36: "identifier removed",
+ 37: "channel number out of range",
+ 38: "level 2 not synchronized",
+ 39: "level 3 halted",
+ 40: "level 3 reset",
+ 41: "link number out of range",
+ 42: "protocol driver not attached",
+ 43: "no CSI structure available",
+ 44: "level 2 halted",
+ 45: "resource deadlock avoided",
+ 46: "no locks available",
+ 50: "invalid exchange",
+ 51: "invalid request descriptor",
+ 52: "exchange full",
+ 53: "no anode",
+ 54: "invalid request code",
+ 55: "invalid slot",
+ 56: "file locking deadlock error",
+ 59: "bad font file format",
+ 60: "device not a stream",
+ 61: "no data available",
+ 62: "timer expired",
+ 63: "out of streams resources",
+ 64: "machine is not on the network",
+ 65: "package not installed",
+ 66: "object is remote",
+ 67: "link has been severed",
+ 68: "advertise error",
+ 69: "srmount error",
+ 70: "communication error on send",
+ 71: "protocol error",
+ 73: "RFS specific error",
+ 74: "multihop attempted",
+ 77: "bad message",
+ 78: "file name too long",
+ 79: "value too large for defined data type",
+ 80: "name not unique on network",
+ 81: "file descriptor in bad state",
+ 82: "remote address changed",
+ 83: "can not access a needed shared library",
+ 84: "accessing a corrupted shared library",
+ 85: ".lib section in a.out corrupted",
+ 86: "attempting to link in too many shared libraries",
+ 87: "cannot exec a shared library directly",
+ 88: "invalid or incomplete multibyte or wide character",
+ 89: "function not implemented",
+ 90: "too many levels of symbolic links",
+ 91: "interrupted system call should be restarted",
+ 92: "streams pipe error",
+ 93: "directory not empty",
+ 94: "too many users",
+ 95: "socket operation on non-socket",
+ 96: "destination address required",
+ 97: "message too long",
+ 98: "protocol wrong type for socket",
+ 99: "protocol not available",
+ 120: "protocol not supported",
+ 121: "socket type not supported",
+ 122: "operation not supported",
+ 123: "protocol family not supported",
+ 124: "address family not supported by protocol",
+ 125: "address already in use",
+ 126: "cannot assign requested address",
+ 127: "network is down",
+ 128: "network is unreachable",
+ 129: "network dropped connection on reset",
+ 130: "software caused connection abort",
+ 131: "connection reset by peer",
+ 132: "no buffer space available",
+ 133: "transport endpoint is already connected",
+ 134: "transport endpoint is not connected",
+ 135: "structure needs cleaning",
+ 137: "not a XENIX named type file",
+ 138: "no XENIX semaphores available",
+ 139: "is a named type file",
+ 140: "remote I/O error",
+ 141: "unknown error 141",
+ 142: "unknown error 142",
+ 143: "cannot send after transport endpoint shutdown",
+ 144: "too many references: cannot splice",
+ 145: "connection timed out",
+ 146: "connection refused",
+ 147: "host is down",
+ 148: "no route to host",
+ 149: "operation already in progress",
+ 150: "operation now in progress",
+ 151: "stale NFS file handle",
+ 158: "operation canceled",
+ 159: "no medium found",
+ 160: "wrong medium type",
+ 161: "required key not available",
+ 162: "key has expired",
+ 163: "key has been revoked",
+ 164: "key was rejected by service",
+ 165: "owner died",
+ 166: "state not recoverable",
+ 167: "operation not possible due to RF-kill",
+ 168: "unknown error 168",
+ 1133: "disk quota exceeded",
+}
+
+// Signal table
+var signals = [...]string{
+ 1: "hangup",
+ 2: "interrupt",
+ 3: "quit",
+ 4: "illegal instruction",
+ 5: "trace/breakpoint trap",
+ 6: "aborted",
+ 7: "EMT trap",
+ 8: "floating point exception",
+ 9: "killed",
+ 10: "bus error",
+ 11: "segmentation fault",
+ 12: "bad system call",
+ 13: "broken pipe",
+ 14: "alarm clock",
+ 15: "terminated",
+ 16: "user defined signal 1",
+ 17: "user defined signal 2",
+ 18: "child exited",
+ 19: "power failure",
+ 20: "window changed",
+ 21: "urgent I/O condition",
+ 22: "I/O possible",
+ 23: "stopped (signal)",
+ 24: "stopped",
+ 25: "continued",
+ 26: "stopped (tty input)",
+ 27: "stopped (tty output)",
+ 28: "virtual timer expired",
+ 29: "profiling timer expired",
+ 30: "CPU time limit exceeded",
+ 31: "file size limit exceeded",
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
new file mode 100644
index 0000000..0f5ee22
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -0,0 +1,2020 @@
+// mkerrors.sh
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build mipsle,linux
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- _const.go
+
+package unix
+
+import "syscall"
+
+const (
+ AF_ALG = 0x26
+ AF_APPLETALK = 0x5
+ AF_ASH = 0x12
+ AF_ATMPVC = 0x8
+ AF_ATMSVC = 0x14
+ AF_AX25 = 0x3
+ AF_BLUETOOTH = 0x1f
+ AF_BRIDGE = 0x7
+ AF_CAIF = 0x25
+ AF_CAN = 0x1d
+ AF_DECnet = 0xc
+ AF_ECONET = 0x13
+ AF_FILE = 0x1
+ AF_IB = 0x1b
+ AF_IEEE802154 = 0x24
+ AF_INET = 0x2
+ AF_INET6 = 0xa
+ AF_IPX = 0x4
+ AF_IRDA = 0x17
+ AF_ISDN = 0x22
+ AF_IUCV = 0x20
+ AF_KCM = 0x29
+ AF_KEY = 0xf
+ AF_LLC = 0x1a
+ AF_LOCAL = 0x1
+ AF_MAX = 0x2a
+ AF_MPLS = 0x1c
+ AF_NETBEUI = 0xd
+ AF_NETLINK = 0x10
+ AF_NETROM = 0x6
+ AF_NFC = 0x27
+ AF_PACKET = 0x11
+ AF_PHONET = 0x23
+ AF_PPPOX = 0x18
+ AF_RDS = 0x15
+ AF_ROSE = 0xb
+ AF_ROUTE = 0x10
+ AF_RXRPC = 0x21
+ AF_SECURITY = 0xe
+ AF_SNA = 0x16
+ AF_TIPC = 0x1e
+ AF_UNIX = 0x1
+ AF_UNSPEC = 0x0
+ AF_VSOCK = 0x28
+ AF_WANPIPE = 0x19
+ AF_X25 = 0x9
+ ARPHRD_6LOWPAN = 0x339
+ ARPHRD_ADAPT = 0x108
+ ARPHRD_APPLETLK = 0x8
+ ARPHRD_ARCNET = 0x7
+ ARPHRD_ASH = 0x30d
+ ARPHRD_ATM = 0x13
+ ARPHRD_AX25 = 0x3
+ ARPHRD_BIF = 0x307
+ ARPHRD_CAIF = 0x336
+ ARPHRD_CAN = 0x118
+ ARPHRD_CHAOS = 0x5
+ ARPHRD_CISCO = 0x201
+ ARPHRD_CSLIP = 0x101
+ ARPHRD_CSLIP6 = 0x103
+ ARPHRD_DDCMP = 0x205
+ ARPHRD_DLCI = 0xf
+ ARPHRD_ECONET = 0x30e
+ ARPHRD_EETHER = 0x2
+ ARPHRD_ETHER = 0x1
+ ARPHRD_EUI64 = 0x1b
+ ARPHRD_FCAL = 0x311
+ ARPHRD_FCFABRIC = 0x313
+ ARPHRD_FCPL = 0x312
+ ARPHRD_FCPP = 0x310
+ ARPHRD_FDDI = 0x306
+ ARPHRD_FRAD = 0x302
+ ARPHRD_HDLC = 0x201
+ ARPHRD_HIPPI = 0x30c
+ ARPHRD_HWX25 = 0x110
+ ARPHRD_IEEE1394 = 0x18
+ ARPHRD_IEEE802 = 0x6
+ ARPHRD_IEEE80211 = 0x321
+ ARPHRD_IEEE80211_PRISM = 0x322
+ ARPHRD_IEEE80211_RADIOTAP = 0x323
+ ARPHRD_IEEE802154 = 0x324
+ ARPHRD_IEEE802154_MONITOR = 0x325
+ ARPHRD_IEEE802_TR = 0x320
+ ARPHRD_INFINIBAND = 0x20
+ ARPHRD_IP6GRE = 0x337
+ ARPHRD_IPDDP = 0x309
+ ARPHRD_IPGRE = 0x30a
+ ARPHRD_IRDA = 0x30f
+ ARPHRD_LAPB = 0x204
+ ARPHRD_LOCALTLK = 0x305
+ ARPHRD_LOOPBACK = 0x304
+ ARPHRD_METRICOM = 0x17
+ ARPHRD_NETLINK = 0x338
+ ARPHRD_NETROM = 0x0
+ ARPHRD_NONE = 0xfffe
+ ARPHRD_PHONET = 0x334
+ ARPHRD_PHONET_PIPE = 0x335
+ ARPHRD_PIMREG = 0x30b
+ ARPHRD_PPP = 0x200
+ ARPHRD_PRONET = 0x4
+ ARPHRD_RAWHDLC = 0x206
+ ARPHRD_ROSE = 0x10e
+ ARPHRD_RSRVD = 0x104
+ ARPHRD_SIT = 0x308
+ ARPHRD_SKIP = 0x303
+ ARPHRD_SLIP = 0x100
+ ARPHRD_SLIP6 = 0x102
+ ARPHRD_TUNNEL = 0x300
+ ARPHRD_TUNNEL6 = 0x301
+ ARPHRD_VOID = 0xffff
+ ARPHRD_X25 = 0x10f
+ B0 = 0x0
+ B1000000 = 0x1008
+ B110 = 0x3
+ B115200 = 0x1002
+ B1152000 = 0x1009
+ B1200 = 0x9
+ B134 = 0x4
+ B150 = 0x5
+ B1500000 = 0x100a
+ B1800 = 0xa
+ B19200 = 0xe
+ B200 = 0x6
+ B2000000 = 0x100b
+ B230400 = 0x1003
+ B2400 = 0xb
+ B2500000 = 0x100c
+ B300 = 0x7
+ B3000000 = 0x100d
+ B3500000 = 0x100e
+ B38400 = 0xf
+ B4000000 = 0x100f
+ B460800 = 0x1004
+ B4800 = 0xc
+ B50 = 0x1
+ B500000 = 0x1005
+ B57600 = 0x1001
+ B576000 = 0x1006
+ B600 = 0x8
+ B75 = 0x2
+ B921600 = 0x1007
+ B9600 = 0xd
+ BOTHER = 0x1000
+ BPF_A = 0x10
+ BPF_ABS = 0x20
+ BPF_ADD = 0x0
+ BPF_ALU = 0x4
+ BPF_AND = 0x50
+ BPF_B = 0x10
+ BPF_DIV = 0x30
+ BPF_H = 0x8
+ BPF_IMM = 0x0
+ BPF_IND = 0x40
+ BPF_JA = 0x0
+ BPF_JEQ = 0x10
+ BPF_JGE = 0x30
+ BPF_JGT = 0x20
+ BPF_JMP = 0x5
+ BPF_JSET = 0x40
+ BPF_K = 0x0
+ BPF_LD = 0x0
+ BPF_LDX = 0x1
+ BPF_LEN = 0x80
+ BPF_LL_OFF = -0x200000
+ BPF_LSH = 0x60
+ BPF_MAJOR_VERSION = 0x1
+ BPF_MAXINSNS = 0x1000
+ BPF_MEM = 0x60
+ BPF_MEMWORDS = 0x10
+ BPF_MINOR_VERSION = 0x1
+ BPF_MISC = 0x7
+ BPF_MOD = 0x90
+ BPF_MSH = 0xa0
+ BPF_MUL = 0x20
+ BPF_NEG = 0x80
+ BPF_NET_OFF = -0x100000
+ BPF_OR = 0x40
+ BPF_RET = 0x6
+ BPF_RSH = 0x70
+ BPF_ST = 0x2
+ BPF_STX = 0x3
+ BPF_SUB = 0x10
+ BPF_TAX = 0x0
+ BPF_TXA = 0x80
+ BPF_W = 0x0
+ BPF_X = 0x8
+ BPF_XOR = 0xa0
+ BRKINT = 0x2
+ BS0 = 0x0
+ BS1 = 0x2000
+ BSDLY = 0x2000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_ID_BITS = 0x1d
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MAX_DLC = 0x8
+ CAN_MAX_DLEN = 0x8
+ CAN_MCNET = 0x5
+ CAN_MTU = 0x10
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_ID_BITS = 0xb
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
+ CBAUD = 0x100f
+ CBAUDEX = 0x1000
+ CFLUSH = 0xf
+ CIBAUD = 0x100f0000
+ CLOCAL = 0x800
+ CLOCK_BOOTTIME = 0x7
+ CLOCK_BOOTTIME_ALARM = 0x9
+ CLOCK_DEFAULT = 0x0
+ CLOCK_EXT = 0x1
+ CLOCK_INT = 0x2
+ CLOCK_MONOTONIC = 0x1
+ CLOCK_MONOTONIC_COARSE = 0x6
+ CLOCK_MONOTONIC_RAW = 0x4
+ CLOCK_PROCESS_CPUTIME_ID = 0x2
+ CLOCK_REALTIME = 0x0
+ CLOCK_REALTIME_ALARM = 0x8
+ CLOCK_REALTIME_COARSE = 0x5
+ CLOCK_TAI = 0xb
+ CLOCK_THREAD_CPUTIME_ID = 0x3
+ CLOCK_TXFROMRX = 0x4
+ CLOCK_TXINT = 0x3
+ CLONE_CHILD_CLEARTID = 0x200000
+ CLONE_CHILD_SETTID = 0x1000000
+ CLONE_DETACHED = 0x400000
+ CLONE_FILES = 0x400
+ CLONE_FS = 0x200
+ CLONE_IO = 0x80000000
+ CLONE_NEWCGROUP = 0x2000000
+ CLONE_NEWIPC = 0x8000000
+ CLONE_NEWNET = 0x40000000
+ CLONE_NEWNS = 0x20000
+ CLONE_NEWPID = 0x20000000
+ CLONE_NEWUSER = 0x10000000
+ CLONE_NEWUTS = 0x4000000
+ CLONE_PARENT = 0x8000
+ CLONE_PARENT_SETTID = 0x100000
+ CLONE_PTRACE = 0x2000
+ CLONE_SETTLS = 0x80000
+ CLONE_SIGHAND = 0x800
+ CLONE_SYSVSEM = 0x40000
+ CLONE_THREAD = 0x10000
+ CLONE_UNTRACED = 0x800000
+ CLONE_VFORK = 0x4000
+ CLONE_VM = 0x100
+ CMSPAR = 0x40000000
+ CR0 = 0x0
+ CR1 = 0x200
+ CR2 = 0x400
+ CR3 = 0x600
+ CRDLY = 0x600
+ CREAD = 0x80
+ CRTSCTS = 0x80000000
+ CS5 = 0x0
+ CS6 = 0x10
+ CS7 = 0x20
+ CS8 = 0x30
+ CSIGNAL = 0xff
+ CSIZE = 0x30
+ CSTART = 0x11
+ CSTATUS = 0x0
+ CSTOP = 0x13
+ CSTOPB = 0x40
+ CSUSP = 0x1a
+ DT_BLK = 0x6
+ DT_CHR = 0x2
+ DT_DIR = 0x4
+ DT_FIFO = 0x1
+ DT_LNK = 0xa
+ DT_REG = 0x8
+ DT_SOCK = 0xc
+ DT_UNKNOWN = 0x0
+ DT_WHT = 0xe
+ ECHO = 0x8
+ ECHOCTL = 0x200
+ ECHOE = 0x10
+ ECHOK = 0x20
+ ECHOKE = 0x800
+ ECHONL = 0x40
+ ECHOPRT = 0x400
+ ENCODING_DEFAULT = 0x0
+ ENCODING_FM_MARK = 0x3
+ ENCODING_FM_SPACE = 0x4
+ ENCODING_MANCHESTER = 0x5
+ ENCODING_NRZ = 0x1
+ ENCODING_NRZI = 0x2
+ EPOLLERR = 0x8
+ EPOLLET = 0x80000000
+ EPOLLEXCLUSIVE = 0x10000000
+ EPOLLHUP = 0x10
+ EPOLLIN = 0x1
+ EPOLLMSG = 0x400
+ EPOLLONESHOT = 0x40000000
+ EPOLLOUT = 0x4
+ EPOLLPRI = 0x2
+ EPOLLRDBAND = 0x80
+ EPOLLRDHUP = 0x2000
+ EPOLLRDNORM = 0x40
+ EPOLLWAKEUP = 0x20000000
+ EPOLLWRBAND = 0x200
+ EPOLLWRNORM = 0x100
+ EPOLL_CLOEXEC = 0x80000
+ EPOLL_CTL_ADD = 0x1
+ EPOLL_CTL_DEL = 0x2
+ EPOLL_CTL_MOD = 0x3
+ ETH_P_1588 = 0x88f7
+ ETH_P_8021AD = 0x88a8
+ ETH_P_8021AH = 0x88e7
+ ETH_P_8021Q = 0x8100
+ ETH_P_80221 = 0x8917
+ ETH_P_802_2 = 0x4
+ ETH_P_802_3 = 0x1
+ ETH_P_802_3_MIN = 0x600
+ ETH_P_802_EX1 = 0x88b5
+ ETH_P_AARP = 0x80f3
+ ETH_P_AF_IUCV = 0xfbfb
+ ETH_P_ALL = 0x3
+ ETH_P_AOE = 0x88a2
+ ETH_P_ARCNET = 0x1a
+ ETH_P_ARP = 0x806
+ ETH_P_ATALK = 0x809b
+ ETH_P_ATMFATE = 0x8884
+ ETH_P_ATMMPOA = 0x884c
+ ETH_P_AX25 = 0x2
+ ETH_P_BATMAN = 0x4305
+ ETH_P_BPQ = 0x8ff
+ ETH_P_CAIF = 0xf7
+ ETH_P_CAN = 0xc
+ ETH_P_CANFD = 0xd
+ ETH_P_CONTROL = 0x16
+ ETH_P_CUST = 0x6006
+ ETH_P_DDCMP = 0x6
+ ETH_P_DEC = 0x6000
+ ETH_P_DIAG = 0x6005
+ ETH_P_DNA_DL = 0x6001
+ ETH_P_DNA_RC = 0x6002
+ ETH_P_DNA_RT = 0x6003
+ ETH_P_DSA = 0x1b
+ ETH_P_ECONET = 0x18
+ ETH_P_EDSA = 0xdada
+ ETH_P_FCOE = 0x8906
+ ETH_P_FIP = 0x8914
+ ETH_P_HDLC = 0x19
+ ETH_P_HSR = 0x892f
+ ETH_P_IEEE802154 = 0xf6
+ ETH_P_IEEEPUP = 0xa00
+ ETH_P_IEEEPUPAT = 0xa01
+ ETH_P_IP = 0x800
+ ETH_P_IPV6 = 0x86dd
+ ETH_P_IPX = 0x8137
+ ETH_P_IRDA = 0x17
+ ETH_P_LAT = 0x6004
+ ETH_P_LINK_CTL = 0x886c
+ ETH_P_LOCALTALK = 0x9
+ ETH_P_LOOP = 0x60
+ ETH_P_LOOPBACK = 0x9000
+ ETH_P_MACSEC = 0x88e5
+ ETH_P_MOBITEX = 0x15
+ ETH_P_MPLS_MC = 0x8848
+ ETH_P_MPLS_UC = 0x8847
+ ETH_P_MVRP = 0x88f5
+ ETH_P_PAE = 0x888e
+ ETH_P_PAUSE = 0x8808
+ ETH_P_PHONET = 0xf5
+ ETH_P_PPPTALK = 0x10
+ ETH_P_PPP_DISC = 0x8863
+ ETH_P_PPP_MP = 0x8
+ ETH_P_PPP_SES = 0x8864
+ ETH_P_PRP = 0x88fb
+ ETH_P_PUP = 0x200
+ ETH_P_PUPAT = 0x201
+ ETH_P_QINQ1 = 0x9100
+ ETH_P_QINQ2 = 0x9200
+ ETH_P_QINQ3 = 0x9300
+ ETH_P_RARP = 0x8035
+ ETH_P_SCA = 0x6007
+ ETH_P_SLOW = 0x8809
+ ETH_P_SNAP = 0x5
+ ETH_P_TDLS = 0x890d
+ ETH_P_TEB = 0x6558
+ ETH_P_TIPC = 0x88ca
+ ETH_P_TRAILER = 0x1c
+ ETH_P_TR_802_2 = 0x11
+ ETH_P_TSN = 0x22f0
+ ETH_P_WAN_PPP = 0x7
+ ETH_P_WCCP = 0x883e
+ ETH_P_X25 = 0x805
+ ETH_P_XDSA = 0xf8
+ EXTA = 0xe
+ EXTB = 0xf
+ EXTPROC = 0x10000
+ FD_CLOEXEC = 0x1
+ FD_SETSIZE = 0x400
+ FF0 = 0x0
+ FF1 = 0x8000
+ FFDLY = 0x8000
+ FLUSHO = 0x2000
+ F_DUPFD = 0x0
+ F_DUPFD_CLOEXEC = 0x406
+ F_EXLCK = 0x4
+ F_GETFD = 0x1
+ F_GETFL = 0x3
+ F_GETLEASE = 0x401
+ F_GETLK = 0x21
+ F_GETLK64 = 0x21
+ F_GETOWN = 0x17
+ F_GETOWN_EX = 0x10
+ F_GETPIPE_SZ = 0x408
+ F_GETSIG = 0xb
+ F_LOCK = 0x1
+ F_NOTIFY = 0x402
+ F_OFD_GETLK = 0x24
+ F_OFD_SETLK = 0x25
+ F_OFD_SETLKW = 0x26
+ F_OK = 0x0
+ F_RDLCK = 0x0
+ F_SETFD = 0x2
+ F_SETFL = 0x4
+ F_SETLEASE = 0x400
+ F_SETLK = 0x22
+ F_SETLK64 = 0x22
+ F_SETLKW = 0x23
+ F_SETLKW64 = 0x23
+ F_SETOWN = 0x18
+ F_SETOWN_EX = 0xf
+ F_SETPIPE_SZ = 0x407
+ F_SETSIG = 0xa
+ F_SHLCK = 0x8
+ F_TEST = 0x3
+ F_TLOCK = 0x2
+ F_ULOCK = 0x0
+ F_UNLCK = 0x2
+ F_WRLCK = 0x1
+ HUPCL = 0x400
+ IBSHIFT = 0x10
+ ICANON = 0x2
+ ICMPV6_FILTER = 0x1
+ ICRNL = 0x100
+ IEXTEN = 0x100
+ IFA_F_DADFAILED = 0x8
+ IFA_F_DEPRECATED = 0x20
+ IFA_F_HOMEADDRESS = 0x10
+ IFA_F_MANAGETEMPADDR = 0x100
+ IFA_F_MCAUTOJOIN = 0x400
+ IFA_F_NODAD = 0x2
+ IFA_F_NOPREFIXROUTE = 0x200
+ IFA_F_OPTIMISTIC = 0x4
+ IFA_F_PERMANENT = 0x80
+ IFA_F_SECONDARY = 0x1
+ IFA_F_STABLE_PRIVACY = 0x800
+ IFA_F_TEMPORARY = 0x1
+ IFA_F_TENTATIVE = 0x40
+ IFA_MAX = 0x8
+ IFF_ALLMULTI = 0x200
+ IFF_ATTACH_QUEUE = 0x200
+ IFF_AUTOMEDIA = 0x4000
+ IFF_BROADCAST = 0x2
+ IFF_DEBUG = 0x4
+ IFF_DETACH_QUEUE = 0x400
+ IFF_DORMANT = 0x20000
+ IFF_DYNAMIC = 0x8000
+ IFF_ECHO = 0x40000
+ IFF_LOOPBACK = 0x8
+ IFF_LOWER_UP = 0x10000
+ IFF_MASTER = 0x400
+ IFF_MULTICAST = 0x1000
+ IFF_MULTI_QUEUE = 0x100
+ IFF_NOARP = 0x80
+ IFF_NOFILTER = 0x1000
+ IFF_NOTRAILERS = 0x20
+ IFF_NO_PI = 0x1000
+ IFF_ONE_QUEUE = 0x2000
+ IFF_PERSIST = 0x800
+ IFF_POINTOPOINT = 0x10
+ IFF_PORTSEL = 0x2000
+ IFF_PROMISC = 0x100
+ IFF_RUNNING = 0x40
+ IFF_SLAVE = 0x800
+ IFF_TAP = 0x2
+ IFF_TUN = 0x1
+ IFF_TUN_EXCL = 0x8000
+ IFF_UP = 0x1
+ IFF_VNET_HDR = 0x4000
+ IFF_VOLATILE = 0x70c5a
+ IFNAMSIZ = 0x10
+ IGNBRK = 0x1
+ IGNCR = 0x80
+ IGNPAR = 0x4
+ IMAXBEL = 0x2000
+ INLCR = 0x40
+ INPCK = 0x10
+ IN_ACCESS = 0x1
+ IN_ALL_EVENTS = 0xfff
+ IN_ATTRIB = 0x4
+ IN_CLASSA_HOST = 0xffffff
+ IN_CLASSA_MAX = 0x80
+ IN_CLASSA_NET = 0xff000000
+ IN_CLASSA_NSHIFT = 0x18
+ IN_CLASSB_HOST = 0xffff
+ IN_CLASSB_MAX = 0x10000
+ IN_CLASSB_NET = 0xffff0000
+ IN_CLASSB_NSHIFT = 0x10
+ IN_CLASSC_HOST = 0xff
+ IN_CLASSC_NET = 0xffffff00
+ IN_CLASSC_NSHIFT = 0x8
+ IN_CLOEXEC = 0x80000
+ IN_CLOSE = 0x18
+ IN_CLOSE_NOWRITE = 0x10
+ IN_CLOSE_WRITE = 0x8
+ IN_CREATE = 0x100
+ IN_DELETE = 0x200
+ IN_DELETE_SELF = 0x400
+ IN_DONT_FOLLOW = 0x2000000
+ IN_EXCL_UNLINK = 0x4000000
+ IN_IGNORED = 0x8000
+ IN_ISDIR = 0x40000000
+ IN_LOOPBACKNET = 0x7f
+ IN_MASK_ADD = 0x20000000
+ IN_MODIFY = 0x2
+ IN_MOVE = 0xc0
+ IN_MOVED_FROM = 0x40
+ IN_MOVED_TO = 0x80
+ IN_MOVE_SELF = 0x800
+ IN_NONBLOCK = 0x80
+ IN_ONESHOT = 0x80000000
+ IN_ONLYDIR = 0x1000000
+ IN_OPEN = 0x20
+ IN_Q_OVERFLOW = 0x4000
+ IN_UNMOUNT = 0x2000
+ IPPROTO_AH = 0x33
+ IPPROTO_BEETPH = 0x5e
+ IPPROTO_COMP = 0x6c
+ IPPROTO_DCCP = 0x21
+ IPPROTO_DSTOPTS = 0x3c
+ IPPROTO_EGP = 0x8
+ IPPROTO_ENCAP = 0x62
+ IPPROTO_ESP = 0x32
+ IPPROTO_FRAGMENT = 0x2c
+ IPPROTO_GRE = 0x2f
+ IPPROTO_HOPOPTS = 0x0
+ IPPROTO_ICMP = 0x1
+ IPPROTO_ICMPV6 = 0x3a
+ IPPROTO_IDP = 0x16
+ IPPROTO_IGMP = 0x2
+ IPPROTO_IP = 0x0
+ IPPROTO_IPIP = 0x4
+ IPPROTO_IPV6 = 0x29
+ IPPROTO_MH = 0x87
+ IPPROTO_MPLS = 0x89
+ IPPROTO_MTP = 0x5c
+ IPPROTO_NONE = 0x3b
+ IPPROTO_PIM = 0x67
+ IPPROTO_PUP = 0xc
+ IPPROTO_RAW = 0xff
+ IPPROTO_ROUTING = 0x2b
+ IPPROTO_RSVP = 0x2e
+ IPPROTO_SCTP = 0x84
+ IPPROTO_TCP = 0x6
+ IPPROTO_TP = 0x1d
+ IPPROTO_UDP = 0x11
+ IPPROTO_UDPLITE = 0x88
+ IPV6_2292DSTOPTS = 0x4
+ IPV6_2292HOPLIMIT = 0x8
+ IPV6_2292HOPOPTS = 0x3
+ IPV6_2292PKTINFO = 0x2
+ IPV6_2292PKTOPTIONS = 0x6
+ IPV6_2292RTHDR = 0x5
+ IPV6_ADDRFORM = 0x1
+ IPV6_ADD_MEMBERSHIP = 0x14
+ IPV6_AUTHHDR = 0xa
+ IPV6_CHECKSUM = 0x7
+ IPV6_DONTFRAG = 0x3e
+ IPV6_DROP_MEMBERSHIP = 0x15
+ IPV6_DSTOPTS = 0x3b
+ IPV6_HDRINCL = 0x24
+ IPV6_HOPLIMIT = 0x34
+ IPV6_HOPOPTS = 0x36
+ IPV6_IPSEC_POLICY = 0x22
+ IPV6_JOIN_ANYCAST = 0x1b
+ IPV6_JOIN_GROUP = 0x14
+ IPV6_LEAVE_ANYCAST = 0x1c
+ IPV6_LEAVE_GROUP = 0x15
+ IPV6_MTU = 0x18
+ IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_HOPS = 0x12
+ IPV6_MULTICAST_IF = 0x11
+ IPV6_MULTICAST_LOOP = 0x13
+ IPV6_NEXTHOP = 0x9
+ IPV6_PATHMTU = 0x3d
+ IPV6_PKTINFO = 0x32
+ IPV6_PMTUDISC_DO = 0x2
+ IPV6_PMTUDISC_DONT = 0x0
+ IPV6_PMTUDISC_INTERFACE = 0x4
+ IPV6_PMTUDISC_OMIT = 0x5
+ IPV6_PMTUDISC_PROBE = 0x3
+ IPV6_PMTUDISC_WANT = 0x1
+ IPV6_RECVDSTOPTS = 0x3a
+ IPV6_RECVERR = 0x19
+ IPV6_RECVHOPLIMIT = 0x33
+ IPV6_RECVHOPOPTS = 0x35
+ IPV6_RECVPATHMTU = 0x3c
+ IPV6_RECVPKTINFO = 0x31
+ IPV6_RECVRTHDR = 0x38
+ IPV6_RECVTCLASS = 0x42
+ IPV6_ROUTER_ALERT = 0x16
+ IPV6_RTHDR = 0x39
+ IPV6_RTHDRDSTOPTS = 0x37
+ IPV6_RTHDR_LOOSE = 0x0
+ IPV6_RTHDR_STRICT = 0x1
+ IPV6_RTHDR_TYPE_0 = 0x0
+ IPV6_RXDSTOPTS = 0x3b
+ IPV6_RXHOPOPTS = 0x36
+ IPV6_TCLASS = 0x43
+ IPV6_UNICAST_HOPS = 0x10
+ IPV6_V6ONLY = 0x1a
+ IPV6_XFRM_POLICY = 0x23
+ IP_ADD_MEMBERSHIP = 0x23
+ IP_ADD_SOURCE_MEMBERSHIP = 0x27
+ IP_BIND_ADDRESS_NO_PORT = 0x18
+ IP_BLOCK_SOURCE = 0x26
+ IP_CHECKSUM = 0x17
+ IP_DEFAULT_MULTICAST_LOOP = 0x1
+ IP_DEFAULT_MULTICAST_TTL = 0x1
+ IP_DF = 0x4000
+ IP_DROP_MEMBERSHIP = 0x24
+ IP_DROP_SOURCE_MEMBERSHIP = 0x28
+ IP_FREEBIND = 0xf
+ IP_HDRINCL = 0x3
+ IP_IPSEC_POLICY = 0x10
+ IP_MAXPACKET = 0xffff
+ IP_MAX_MEMBERSHIPS = 0x14
+ IP_MF = 0x2000
+ IP_MINTTL = 0x15
+ IP_MSFILTER = 0x29
+ IP_MSS = 0x240
+ IP_MTU = 0xe
+ IP_MTU_DISCOVER = 0xa
+ IP_MULTICAST_ALL = 0x31
+ IP_MULTICAST_IF = 0x20
+ IP_MULTICAST_LOOP = 0x22
+ IP_MULTICAST_TTL = 0x21
+ IP_NODEFRAG = 0x16
+ IP_OFFMASK = 0x1fff
+ IP_OPTIONS = 0x4
+ IP_ORIGDSTADDR = 0x14
+ IP_PASSSEC = 0x12
+ IP_PKTINFO = 0x8
+ IP_PKTOPTIONS = 0x9
+ IP_PMTUDISC = 0xa
+ IP_PMTUDISC_DO = 0x2
+ IP_PMTUDISC_DONT = 0x0
+ IP_PMTUDISC_INTERFACE = 0x4
+ IP_PMTUDISC_OMIT = 0x5
+ IP_PMTUDISC_PROBE = 0x3
+ IP_PMTUDISC_WANT = 0x1
+ IP_RECVERR = 0xb
+ IP_RECVOPTS = 0x6
+ IP_RECVORIGDSTADDR = 0x14
+ IP_RECVRETOPTS = 0x7
+ IP_RECVTOS = 0xd
+ IP_RECVTTL = 0xc
+ IP_RETOPTS = 0x7
+ IP_RF = 0x8000
+ IP_ROUTER_ALERT = 0x5
+ IP_TOS = 0x1
+ IP_TRANSPARENT = 0x13
+ IP_TTL = 0x2
+ IP_UNBLOCK_SOURCE = 0x25
+ IP_UNICAST_IF = 0x32
+ IP_XFRM_POLICY = 0x11
+ ISIG = 0x1
+ ISTRIP = 0x20
+ IUCLC = 0x200
+ IUTF8 = 0x4000
+ IXANY = 0x800
+ IXOFF = 0x1000
+ IXON = 0x400
+ LINUX_REBOOT_CMD_CAD_OFF = 0x0
+ LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef
+ LINUX_REBOOT_CMD_HALT = 0xcdef0123
+ LINUX_REBOOT_CMD_KEXEC = 0x45584543
+ LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc
+ LINUX_REBOOT_CMD_RESTART = 0x1234567
+ LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4
+ LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2
+ LINUX_REBOOT_MAGIC1 = 0xfee1dead
+ LINUX_REBOOT_MAGIC2 = 0x28121969
+ LOCK_EX = 0x2
+ LOCK_NB = 0x4
+ LOCK_SH = 0x1
+ LOCK_UN = 0x8
+ MADV_DODUMP = 0x11
+ MADV_DOFORK = 0xb
+ MADV_DONTDUMP = 0x10
+ MADV_DONTFORK = 0xa
+ MADV_DONTNEED = 0x4
+ MADV_FREE = 0x8
+ MADV_HUGEPAGE = 0xe
+ MADV_HWPOISON = 0x64
+ MADV_MERGEABLE = 0xc
+ MADV_NOHUGEPAGE = 0xf
+ MADV_NORMAL = 0x0
+ MADV_RANDOM = 0x1
+ MADV_REMOVE = 0x9
+ MADV_SEQUENTIAL = 0x2
+ MADV_UNMERGEABLE = 0xd
+ MADV_WILLNEED = 0x3
+ MAP_ANON = 0x800
+ MAP_ANONYMOUS = 0x800
+ MAP_DENYWRITE = 0x2000
+ MAP_EXECUTABLE = 0x4000
+ MAP_FILE = 0x0
+ MAP_FIXED = 0x10
+ MAP_GROWSDOWN = 0x1000
+ MAP_HUGETLB = 0x80000
+ MAP_HUGE_MASK = 0x3f
+ MAP_HUGE_SHIFT = 0x1a
+ MAP_LOCKED = 0x8000
+ MAP_NONBLOCK = 0x20000
+ MAP_NORESERVE = 0x400
+ MAP_POPULATE = 0x10000
+ MAP_PRIVATE = 0x2
+ MAP_RENAME = 0x800
+ MAP_SHARED = 0x1
+ MAP_STACK = 0x40000
+ MAP_TYPE = 0xf
+ MCL_CURRENT = 0x1
+ MCL_FUTURE = 0x2
+ MCL_ONFAULT = 0x4
+ MNT_DETACH = 0x2
+ MNT_EXPIRE = 0x4
+ MNT_FORCE = 0x1
+ MSG_BATCH = 0x40000
+ MSG_CMSG_CLOEXEC = 0x40000000
+ MSG_CONFIRM = 0x800
+ MSG_CTRUNC = 0x8
+ MSG_DONTROUTE = 0x4
+ MSG_DONTWAIT = 0x40
+ MSG_EOR = 0x80
+ MSG_ERRQUEUE = 0x2000
+ MSG_FASTOPEN = 0x20000000
+ MSG_FIN = 0x200
+ MSG_MORE = 0x8000
+ MSG_NOSIGNAL = 0x4000
+ MSG_OOB = 0x1
+ MSG_PEEK = 0x2
+ MSG_PROXY = 0x10
+ MSG_RST = 0x1000
+ MSG_SYN = 0x400
+ MSG_TRUNC = 0x20
+ MSG_TRYHARD = 0x4
+ MSG_WAITALL = 0x100
+ MSG_WAITFORONE = 0x10000
+ MS_ACTIVE = 0x40000000
+ MS_ASYNC = 0x1
+ MS_BIND = 0x1000
+ MS_DIRSYNC = 0x80
+ MS_INVALIDATE = 0x2
+ MS_I_VERSION = 0x800000
+ MS_KERNMOUNT = 0x400000
+ MS_LAZYTIME = 0x2000000
+ MS_MANDLOCK = 0x40
+ MS_MGC_MSK = 0xffff0000
+ MS_MGC_VAL = 0xc0ed0000
+ MS_MOVE = 0x2000
+ MS_NOATIME = 0x400
+ MS_NODEV = 0x4
+ MS_NODIRATIME = 0x800
+ MS_NOEXEC = 0x8
+ MS_NOSUID = 0x2
+ MS_NOUSER = -0x80000000
+ MS_POSIXACL = 0x10000
+ MS_PRIVATE = 0x40000
+ MS_RDONLY = 0x1
+ MS_REC = 0x4000
+ MS_RELATIME = 0x200000
+ MS_REMOUNT = 0x20
+ MS_RMT_MASK = 0x2800051
+ MS_SHARED = 0x100000
+ MS_SILENT = 0x8000
+ MS_SLAVE = 0x80000
+ MS_STRICTATIME = 0x1000000
+ MS_SYNC = 0x4
+ MS_SYNCHRONOUS = 0x10
+ MS_UNBINDABLE = 0x20000
+ NAME_MAX = 0xff
+ NETLINK_ADD_MEMBERSHIP = 0x1
+ NETLINK_AUDIT = 0x9
+ NETLINK_BROADCAST_ERROR = 0x4
+ NETLINK_CAP_ACK = 0xa
+ NETLINK_CONNECTOR = 0xb
+ NETLINK_CRYPTO = 0x15
+ NETLINK_DNRTMSG = 0xe
+ NETLINK_DROP_MEMBERSHIP = 0x2
+ NETLINK_ECRYPTFS = 0x13
+ NETLINK_FIB_LOOKUP = 0xa
+ NETLINK_FIREWALL = 0x3
+ NETLINK_GENERIC = 0x10
+ NETLINK_INET_DIAG = 0x4
+ NETLINK_IP6_FW = 0xd
+ NETLINK_ISCSI = 0x8
+ NETLINK_KOBJECT_UEVENT = 0xf
+ NETLINK_LISTEN_ALL_NSID = 0x8
+ NETLINK_LIST_MEMBERSHIPS = 0x9
+ NETLINK_NETFILTER = 0xc
+ NETLINK_NFLOG = 0x5
+ NETLINK_NO_ENOBUFS = 0x5
+ NETLINK_PKTINFO = 0x3
+ NETLINK_RDMA = 0x14
+ NETLINK_ROUTE = 0x0
+ NETLINK_RX_RING = 0x6
+ NETLINK_SCSITRANSPORT = 0x12
+ NETLINK_SELINUX = 0x7
+ NETLINK_SOCK_DIAG = 0x4
+ NETLINK_TX_RING = 0x7
+ NETLINK_UNUSED = 0x1
+ NETLINK_USERSOCK = 0x2
+ NETLINK_XFRM = 0x6
+ NL0 = 0x0
+ NL1 = 0x100
+ NLA_ALIGNTO = 0x4
+ NLA_F_NESTED = 0x8000
+ NLA_F_NET_BYTEORDER = 0x4000
+ NLA_HDRLEN = 0x4
+ NLDLY = 0x100
+ NLMSG_ALIGNTO = 0x4
+ NLMSG_DONE = 0x3
+ NLMSG_ERROR = 0x2
+ NLMSG_HDRLEN = 0x10
+ NLMSG_MIN_TYPE = 0x10
+ NLMSG_NOOP = 0x1
+ NLMSG_OVERRUN = 0x4
+ NLM_F_ACK = 0x4
+ NLM_F_APPEND = 0x800
+ NLM_F_ATOMIC = 0x400
+ NLM_F_CREATE = 0x400
+ NLM_F_DUMP = 0x300
+ NLM_F_DUMP_FILTERED = 0x20
+ NLM_F_DUMP_INTR = 0x10
+ NLM_F_ECHO = 0x8
+ NLM_F_EXCL = 0x200
+ NLM_F_MATCH = 0x200
+ NLM_F_MULTI = 0x2
+ NLM_F_REPLACE = 0x100
+ NLM_F_REQUEST = 0x1
+ NLM_F_ROOT = 0x100
+ NOFLSH = 0x80
+ OCRNL = 0x8
+ OFDEL = 0x80
+ OFILL = 0x40
+ OLCUC = 0x2
+ ONLCR = 0x4
+ ONLRET = 0x20
+ ONOCR = 0x10
+ OPOST = 0x1
+ O_ACCMODE = 0x3
+ O_APPEND = 0x8
+ O_ASYNC = 0x1000
+ O_CLOEXEC = 0x80000
+ O_CREAT = 0x100
+ O_DIRECT = 0x8000
+ O_DIRECTORY = 0x10000
+ O_DSYNC = 0x10
+ O_EXCL = 0x400
+ O_FSYNC = 0x4010
+ O_LARGEFILE = 0x2000
+ O_NDELAY = 0x80
+ O_NOATIME = 0x40000
+ O_NOCTTY = 0x800
+ O_NOFOLLOW = 0x20000
+ O_NONBLOCK = 0x80
+ O_PATH = 0x200000
+ O_RDONLY = 0x0
+ O_RDWR = 0x2
+ O_RSYNC = 0x4010
+ O_SYNC = 0x4010
+ O_TMPFILE = 0x410000
+ O_TRUNC = 0x200
+ O_WRONLY = 0x1
+ PACKET_ADD_MEMBERSHIP = 0x1
+ PACKET_AUXDATA = 0x8
+ PACKET_BROADCAST = 0x1
+ PACKET_COPY_THRESH = 0x7
+ PACKET_DROP_MEMBERSHIP = 0x2
+ PACKET_FANOUT = 0x12
+ PACKET_FANOUT_CBPF = 0x6
+ PACKET_FANOUT_CPU = 0x2
+ PACKET_FANOUT_DATA = 0x16
+ PACKET_FANOUT_EBPF = 0x7
+ PACKET_FANOUT_FLAG_DEFRAG = 0x8000
+ PACKET_FANOUT_FLAG_ROLLOVER = 0x1000
+ PACKET_FANOUT_HASH = 0x0
+ PACKET_FANOUT_LB = 0x1
+ PACKET_FANOUT_QM = 0x5
+ PACKET_FANOUT_RND = 0x4
+ PACKET_FANOUT_ROLLOVER = 0x3
+ PACKET_FASTROUTE = 0x6
+ PACKET_HDRLEN = 0xb
+ PACKET_HOST = 0x0
+ PACKET_KERNEL = 0x7
+ PACKET_LOOPBACK = 0x5
+ PACKET_LOSS = 0xe
+ PACKET_MR_ALLMULTI = 0x2
+ PACKET_MR_MULTICAST = 0x0
+ PACKET_MR_PROMISC = 0x1
+ PACKET_MR_UNICAST = 0x3
+ PACKET_MULTICAST = 0x2
+ PACKET_ORIGDEV = 0x9
+ PACKET_OTHERHOST = 0x3
+ PACKET_OUTGOING = 0x4
+ PACKET_QDISC_BYPASS = 0x14
+ PACKET_RECV_OUTPUT = 0x3
+ PACKET_RESERVE = 0xc
+ PACKET_ROLLOVER_STATS = 0x15
+ PACKET_RX_RING = 0x5
+ PACKET_STATISTICS = 0x6
+ PACKET_TIMESTAMP = 0x11
+ PACKET_TX_HAS_OFF = 0x13
+ PACKET_TX_RING = 0xd
+ PACKET_TX_TIMESTAMP = 0x10
+ PACKET_USER = 0x6
+ PACKET_VERSION = 0xa
+ PACKET_VNET_HDR = 0xf
+ PARENB = 0x100
+ PARITY_CRC16_PR0 = 0x2
+ PARITY_CRC16_PR0_CCITT = 0x4
+ PARITY_CRC16_PR1 = 0x3
+ PARITY_CRC16_PR1_CCITT = 0x5
+ PARITY_CRC32_PR0_CCITT = 0x6
+ PARITY_CRC32_PR1_CCITT = 0x7
+ PARITY_DEFAULT = 0x0
+ PARITY_NONE = 0x1
+ PARMRK = 0x8
+ PARODD = 0x200
+ PENDIN = 0x4000
+ PRIO_PGRP = 0x1
+ PRIO_PROCESS = 0x0
+ PRIO_USER = 0x2
+ PROT_EXEC = 0x4
+ PROT_GROWSDOWN = 0x1000000
+ PROT_GROWSUP = 0x2000000
+ PROT_NONE = 0x0
+ PROT_READ = 0x1
+ PROT_WRITE = 0x2
+ PR_CAPBSET_DROP = 0x18
+ PR_CAPBSET_READ = 0x17
+ PR_CAP_AMBIENT = 0x2f
+ PR_CAP_AMBIENT_CLEAR_ALL = 0x4
+ PR_CAP_AMBIENT_IS_SET = 0x1
+ PR_CAP_AMBIENT_LOWER = 0x3
+ PR_CAP_AMBIENT_RAISE = 0x2
+ PR_ENDIAN_BIG = 0x0
+ PR_ENDIAN_LITTLE = 0x1
+ PR_ENDIAN_PPC_LITTLE = 0x2
+ PR_FPEMU_NOPRINT = 0x1
+ PR_FPEMU_SIGFPE = 0x2
+ PR_FP_EXC_ASYNC = 0x2
+ PR_FP_EXC_DISABLED = 0x0
+ PR_FP_EXC_DIV = 0x10000
+ PR_FP_EXC_INV = 0x100000
+ PR_FP_EXC_NONRECOV = 0x1
+ PR_FP_EXC_OVF = 0x20000
+ PR_FP_EXC_PRECISE = 0x3
+ PR_FP_EXC_RES = 0x80000
+ PR_FP_EXC_SW_ENABLE = 0x80
+ PR_FP_EXC_UND = 0x40000
+ PR_FP_MODE_FR = 0x1
+ PR_FP_MODE_FRE = 0x2
+ PR_GET_CHILD_SUBREAPER = 0x25
+ PR_GET_DUMPABLE = 0x3
+ PR_GET_ENDIAN = 0x13
+ PR_GET_FPEMU = 0x9
+ PR_GET_FPEXC = 0xb
+ PR_GET_FP_MODE = 0x2e
+ PR_GET_KEEPCAPS = 0x7
+ PR_GET_NAME = 0x10
+ PR_GET_NO_NEW_PRIVS = 0x27
+ PR_GET_PDEATHSIG = 0x2
+ PR_GET_SECCOMP = 0x15
+ PR_GET_SECUREBITS = 0x1b
+ PR_GET_THP_DISABLE = 0x2a
+ PR_GET_TID_ADDRESS = 0x28
+ PR_GET_TIMERSLACK = 0x1e
+ PR_GET_TIMING = 0xd
+ PR_GET_TSC = 0x19
+ PR_GET_UNALIGN = 0x5
+ PR_MCE_KILL = 0x21
+ PR_MCE_KILL_CLEAR = 0x0
+ PR_MCE_KILL_DEFAULT = 0x2
+ PR_MCE_KILL_EARLY = 0x1
+ PR_MCE_KILL_GET = 0x22
+ PR_MCE_KILL_LATE = 0x0
+ PR_MCE_KILL_SET = 0x1
+ PR_MPX_DISABLE_MANAGEMENT = 0x2c
+ PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_SET_CHILD_SUBREAPER = 0x24
+ PR_SET_DUMPABLE = 0x4
+ PR_SET_ENDIAN = 0x14
+ PR_SET_FPEMU = 0xa
+ PR_SET_FPEXC = 0xc
+ PR_SET_FP_MODE = 0x2d
+ PR_SET_KEEPCAPS = 0x8
+ PR_SET_MM = 0x23
+ PR_SET_MM_ARG_END = 0x9
+ PR_SET_MM_ARG_START = 0x8
+ PR_SET_MM_AUXV = 0xc
+ PR_SET_MM_BRK = 0x7
+ PR_SET_MM_END_CODE = 0x2
+ PR_SET_MM_END_DATA = 0x4
+ PR_SET_MM_ENV_END = 0xb
+ PR_SET_MM_ENV_START = 0xa
+ PR_SET_MM_EXE_FILE = 0xd
+ PR_SET_MM_MAP = 0xe
+ PR_SET_MM_MAP_SIZE = 0xf
+ PR_SET_MM_START_BRK = 0x6
+ PR_SET_MM_START_CODE = 0x1
+ PR_SET_MM_START_DATA = 0x3
+ PR_SET_MM_START_STACK = 0x5
+ PR_SET_NAME = 0xf
+ PR_SET_NO_NEW_PRIVS = 0x26
+ PR_SET_PDEATHSIG = 0x1
+ PR_SET_PTRACER = 0x59616d61
+ PR_SET_PTRACER_ANY = 0xffffffff
+ PR_SET_SECCOMP = 0x16
+ PR_SET_SECUREBITS = 0x1c
+ PR_SET_THP_DISABLE = 0x29
+ PR_SET_TIMERSLACK = 0x1d
+ PR_SET_TIMING = 0xe
+ PR_SET_TSC = 0x1a
+ PR_SET_UNALIGN = 0x6
+ PR_TASK_PERF_EVENTS_DISABLE = 0x1f
+ PR_TASK_PERF_EVENTS_ENABLE = 0x20
+ PR_TIMING_STATISTICAL = 0x0
+ PR_TIMING_TIMESTAMP = 0x1
+ PR_TSC_ENABLE = 0x1
+ PR_TSC_SIGSEGV = 0x2
+ PR_UNALIGN_NOPRINT = 0x1
+ PR_UNALIGN_SIGBUS = 0x2
+ PTRACE_ATTACH = 0x10
+ PTRACE_CONT = 0x7
+ PTRACE_DETACH = 0x11
+ PTRACE_EVENT_CLONE = 0x3
+ PTRACE_EVENT_EXEC = 0x4
+ PTRACE_EVENT_EXIT = 0x6
+ PTRACE_EVENT_FORK = 0x1
+ PTRACE_EVENT_SECCOMP = 0x7
+ PTRACE_EVENT_STOP = 0x80
+ PTRACE_EVENT_VFORK = 0x2
+ PTRACE_EVENT_VFORK_DONE = 0x5
+ PTRACE_GETEVENTMSG = 0x4201
+ PTRACE_GETFPREGS = 0xe
+ PTRACE_GETREGS = 0xc
+ PTRACE_GETREGSET = 0x4204
+ PTRACE_GETSIGINFO = 0x4202
+ PTRACE_GETSIGMASK = 0x420a
+ PTRACE_GET_THREAD_AREA = 0x19
+ PTRACE_GET_THREAD_AREA_3264 = 0xc4
+ PTRACE_GET_WATCH_REGS = 0xd0
+ PTRACE_INTERRUPT = 0x4207
+ PTRACE_KILL = 0x8
+ PTRACE_LISTEN = 0x4208
+ PTRACE_OLDSETOPTIONS = 0x15
+ PTRACE_O_EXITKILL = 0x100000
+ PTRACE_O_MASK = 0x3000ff
+ PTRACE_O_SUSPEND_SECCOMP = 0x200000
+ PTRACE_O_TRACECLONE = 0x8
+ PTRACE_O_TRACEEXEC = 0x10
+ PTRACE_O_TRACEEXIT = 0x40
+ PTRACE_O_TRACEFORK = 0x2
+ PTRACE_O_TRACESECCOMP = 0x80
+ PTRACE_O_TRACESYSGOOD = 0x1
+ PTRACE_O_TRACEVFORK = 0x4
+ PTRACE_O_TRACEVFORKDONE = 0x20
+ PTRACE_PEEKDATA = 0x2
+ PTRACE_PEEKDATA_3264 = 0xc1
+ PTRACE_PEEKSIGINFO = 0x4209
+ PTRACE_PEEKSIGINFO_SHARED = 0x1
+ PTRACE_PEEKTEXT = 0x1
+ PTRACE_PEEKTEXT_3264 = 0xc0
+ PTRACE_PEEKUSR = 0x3
+ PTRACE_POKEDATA = 0x5
+ PTRACE_POKEDATA_3264 = 0xc3
+ PTRACE_POKETEXT = 0x4
+ PTRACE_POKETEXT_3264 = 0xc2
+ PTRACE_POKEUSR = 0x6
+ PTRACE_SECCOMP_GET_FILTER = 0x420c
+ PTRACE_SEIZE = 0x4206
+ PTRACE_SETFPREGS = 0xf
+ PTRACE_SETOPTIONS = 0x4200
+ PTRACE_SETREGS = 0xd
+ PTRACE_SETREGSET = 0x4205
+ PTRACE_SETSIGINFO = 0x4203
+ PTRACE_SETSIGMASK = 0x420b
+ PTRACE_SET_THREAD_AREA = 0x1a
+ PTRACE_SET_WATCH_REGS = 0xd1
+ PTRACE_SINGLESTEP = 0x9
+ PTRACE_SYSCALL = 0x18
+ PTRACE_TRACEME = 0x0
+ RLIMIT_AS = 0x6
+ RLIMIT_CORE = 0x4
+ RLIMIT_CPU = 0x0
+ RLIMIT_DATA = 0x2
+ RLIMIT_FSIZE = 0x1
+ RLIMIT_NOFILE = 0x5
+ RLIMIT_STACK = 0x3
+ RLIM_INFINITY = -0x1
+ RTAX_ADVMSS = 0x8
+ RTAX_CC_ALGO = 0x10
+ RTAX_CWND = 0x7
+ RTAX_FEATURES = 0xc
+ RTAX_FEATURE_ALLFRAG = 0x8
+ RTAX_FEATURE_ECN = 0x1
+ RTAX_FEATURE_MASK = 0xf
+ RTAX_FEATURE_SACK = 0x2
+ RTAX_FEATURE_TIMESTAMP = 0x4
+ RTAX_HOPLIMIT = 0xa
+ RTAX_INITCWND = 0xb
+ RTAX_INITRWND = 0xe
+ RTAX_LOCK = 0x1
+ RTAX_MAX = 0x10
+ RTAX_MTU = 0x2
+ RTAX_QUICKACK = 0xf
+ RTAX_REORDERING = 0x9
+ RTAX_RTO_MIN = 0xd
+ RTAX_RTT = 0x4
+ RTAX_RTTVAR = 0x5
+ RTAX_SSTHRESH = 0x6
+ RTAX_UNSPEC = 0x0
+ RTAX_WINDOW = 0x3
+ RTA_ALIGNTO = 0x4
+ RTA_MAX = 0x18
+ RTCF_DIRECTSRC = 0x4000000
+ RTCF_DOREDIRECT = 0x1000000
+ RTCF_LOG = 0x2000000
+ RTCF_MASQ = 0x400000
+ RTCF_NAT = 0x800000
+ RTCF_VALVE = 0x200000
+ RTF_ADDRCLASSMASK = 0xf8000000
+ RTF_ADDRCONF = 0x40000
+ RTF_ALLONLINK = 0x20000
+ RTF_BROADCAST = 0x10000000
+ RTF_CACHE = 0x1000000
+ RTF_DEFAULT = 0x10000
+ RTF_DYNAMIC = 0x10
+ RTF_FLOW = 0x2000000
+ RTF_GATEWAY = 0x2
+ RTF_HOST = 0x4
+ RTF_INTERFACE = 0x40000000
+ RTF_IRTT = 0x100
+ RTF_LINKRT = 0x100000
+ RTF_LOCAL = 0x80000000
+ RTF_MODIFIED = 0x20
+ RTF_MSS = 0x40
+ RTF_MTU = 0x40
+ RTF_MULTICAST = 0x20000000
+ RTF_NAT = 0x8000000
+ RTF_NOFORWARD = 0x1000
+ RTF_NONEXTHOP = 0x200000
+ RTF_NOPMTUDISC = 0x4000
+ RTF_POLICY = 0x4000000
+ RTF_REINSTATE = 0x8
+ RTF_REJECT = 0x200
+ RTF_STATIC = 0x400
+ RTF_THROW = 0x2000
+ RTF_UP = 0x1
+ RTF_WINDOW = 0x80
+ RTF_XRESOLVE = 0x800
+ RTM_BASE = 0x10
+ RTM_DELACTION = 0x31
+ RTM_DELADDR = 0x15
+ RTM_DELADDRLABEL = 0x49
+ RTM_DELLINK = 0x11
+ RTM_DELMDB = 0x55
+ RTM_DELNEIGH = 0x1d
+ RTM_DELNSID = 0x59
+ RTM_DELQDISC = 0x25
+ RTM_DELROUTE = 0x19
+ RTM_DELRULE = 0x21
+ RTM_DELTCLASS = 0x29
+ RTM_DELTFILTER = 0x2d
+ RTM_F_CLONED = 0x200
+ RTM_F_EQUALIZE = 0x400
+ RTM_F_LOOKUP_TABLE = 0x1000
+ RTM_F_NOTIFY = 0x100
+ RTM_F_PREFIX = 0x800
+ RTM_GETACTION = 0x32
+ RTM_GETADDR = 0x16
+ RTM_GETADDRLABEL = 0x4a
+ RTM_GETANYCAST = 0x3e
+ RTM_GETDCB = 0x4e
+ RTM_GETLINK = 0x12
+ RTM_GETMDB = 0x56
+ RTM_GETMULTICAST = 0x3a
+ RTM_GETNEIGH = 0x1e
+ RTM_GETNEIGHTBL = 0x42
+ RTM_GETNETCONF = 0x52
+ RTM_GETNSID = 0x5a
+ RTM_GETQDISC = 0x26
+ RTM_GETROUTE = 0x1a
+ RTM_GETRULE = 0x22
+ RTM_GETSTATS = 0x5e
+ RTM_GETTCLASS = 0x2a
+ RTM_GETTFILTER = 0x2e
+ RTM_MAX = 0x5f
+ RTM_NEWACTION = 0x30
+ RTM_NEWADDR = 0x14
+ RTM_NEWADDRLABEL = 0x48
+ RTM_NEWLINK = 0x10
+ RTM_NEWMDB = 0x54
+ RTM_NEWNDUSEROPT = 0x44
+ RTM_NEWNEIGH = 0x1c
+ RTM_NEWNEIGHTBL = 0x40
+ RTM_NEWNETCONF = 0x50
+ RTM_NEWNSID = 0x58
+ RTM_NEWPREFIX = 0x34
+ RTM_NEWQDISC = 0x24
+ RTM_NEWROUTE = 0x18
+ RTM_NEWRULE = 0x20
+ RTM_NEWSTATS = 0x5c
+ RTM_NEWTCLASS = 0x28
+ RTM_NEWTFILTER = 0x2c
+ RTM_NR_FAMILIES = 0x14
+ RTM_NR_MSGTYPES = 0x50
+ RTM_SETDCB = 0x4f
+ RTM_SETLINK = 0x13
+ RTM_SETNEIGHTBL = 0x43
+ RTNH_ALIGNTO = 0x4
+ RTNH_COMPARE_MASK = 0x11
+ RTNH_F_DEAD = 0x1
+ RTNH_F_LINKDOWN = 0x10
+ RTNH_F_OFFLOAD = 0x8
+ RTNH_F_ONLINK = 0x4
+ RTNH_F_PERVASIVE = 0x2
+ RTN_MAX = 0xb
+ RTPROT_BABEL = 0x2a
+ RTPROT_BIRD = 0xc
+ RTPROT_BOOT = 0x3
+ RTPROT_DHCP = 0x10
+ RTPROT_DNROUTED = 0xd
+ RTPROT_GATED = 0x8
+ RTPROT_KERNEL = 0x2
+ RTPROT_MROUTED = 0x11
+ RTPROT_MRT = 0xa
+ RTPROT_NTK = 0xf
+ RTPROT_RA = 0x9
+ RTPROT_REDIRECT = 0x1
+ RTPROT_STATIC = 0x4
+ RTPROT_UNSPEC = 0x0
+ RTPROT_XORP = 0xe
+ RTPROT_ZEBRA = 0xb
+ RT_CLASS_DEFAULT = 0xfd
+ RT_CLASS_LOCAL = 0xff
+ RT_CLASS_MAIN = 0xfe
+ RT_CLASS_MAX = 0xff
+ RT_CLASS_UNSPEC = 0x0
+ RUSAGE_CHILDREN = -0x1
+ RUSAGE_SELF = 0x0
+ RUSAGE_THREAD = 0x1
+ SCM_CREDENTIALS = 0x2
+ SCM_RIGHTS = 0x1
+ SCM_TIMESTAMP = 0x1d
+ SCM_TIMESTAMPING = 0x25
+ SCM_TIMESTAMPNS = 0x23
+ SCM_WIFI_STATUS = 0x29
+ SHUT_RD = 0x0
+ SHUT_RDWR = 0x2
+ SHUT_WR = 0x1
+ SIOCADDDLCI = 0x8980
+ SIOCADDMULTI = 0x8931
+ SIOCADDRT = 0x890b
+ SIOCATMARK = 0x40047307
+ SIOCDARP = 0x8953
+ SIOCDELDLCI = 0x8981
+ SIOCDELMULTI = 0x8932
+ SIOCDELRT = 0x890c
+ SIOCDEVPRIVATE = 0x89f0
+ SIOCDIFADDR = 0x8936
+ SIOCDRARP = 0x8960
+ SIOCGARP = 0x8954
+ SIOCGIFADDR = 0x8915
+ SIOCGIFBR = 0x8940
+ SIOCGIFBRDADDR = 0x8919
+ SIOCGIFCONF = 0x8912
+ SIOCGIFCOUNT = 0x8938
+ SIOCGIFDSTADDR = 0x8917
+ SIOCGIFENCAP = 0x8925
+ SIOCGIFFLAGS = 0x8913
+ SIOCGIFHWADDR = 0x8927
+ SIOCGIFINDEX = 0x8933
+ SIOCGIFMAP = 0x8970
+ SIOCGIFMEM = 0x891f
+ SIOCGIFMETRIC = 0x891d
+ SIOCGIFMTU = 0x8921
+ SIOCGIFNAME = 0x8910
+ SIOCGIFNETMASK = 0x891b
+ SIOCGIFPFLAGS = 0x8935
+ SIOCGIFSLAVE = 0x8929
+ SIOCGIFTXQLEN = 0x8942
+ SIOCGPGRP = 0x40047309
+ SIOCGRARP = 0x8961
+ SIOCGSTAMP = 0x8906
+ SIOCGSTAMPNS = 0x8907
+ SIOCPROTOPRIVATE = 0x89e0
+ SIOCRTMSG = 0x890d
+ SIOCSARP = 0x8955
+ SIOCSIFADDR = 0x8916
+ SIOCSIFBR = 0x8941
+ SIOCSIFBRDADDR = 0x891a
+ SIOCSIFDSTADDR = 0x8918
+ SIOCSIFENCAP = 0x8926
+ SIOCSIFFLAGS = 0x8914
+ SIOCSIFHWADDR = 0x8924
+ SIOCSIFHWBROADCAST = 0x8937
+ SIOCSIFLINK = 0x8911
+ SIOCSIFMAP = 0x8971
+ SIOCSIFMEM = 0x8920
+ SIOCSIFMETRIC = 0x891e
+ SIOCSIFMTU = 0x8922
+ SIOCSIFNAME = 0x8923
+ SIOCSIFNETMASK = 0x891c
+ SIOCSIFPFLAGS = 0x8934
+ SIOCSIFSLAVE = 0x8930
+ SIOCSIFTXQLEN = 0x8943
+ SIOCSPGRP = 0x80047308
+ SIOCSRARP = 0x8962
+ SOCK_CLOEXEC = 0x80000
+ SOCK_DCCP = 0x6
+ SOCK_DGRAM = 0x1
+ SOCK_NONBLOCK = 0x80
+ SOCK_PACKET = 0xa
+ SOCK_RAW = 0x3
+ SOCK_RDM = 0x4
+ SOCK_SEQPACKET = 0x5
+ SOCK_STREAM = 0x2
+ SOL_AAL = 0x109
+ SOL_ALG = 0x117
+ SOL_ATM = 0x108
+ SOL_CAIF = 0x116
+ SOL_CAN_BASE = 0x64
+ SOL_DCCP = 0x10d
+ SOL_DECNET = 0x105
+ SOL_ICMPV6 = 0x3a
+ SOL_IP = 0x0
+ SOL_IPV6 = 0x29
+ SOL_IRDA = 0x10a
+ SOL_IUCV = 0x115
+ SOL_KCM = 0x119
+ SOL_LLC = 0x10c
+ SOL_NETBEUI = 0x10b
+ SOL_NETLINK = 0x10e
+ SOL_NFC = 0x118
+ SOL_PACKET = 0x107
+ SOL_PNPIPE = 0x113
+ SOL_PPPOL2TP = 0x111
+ SOL_RAW = 0xff
+ SOL_RDS = 0x114
+ SOL_RXRPC = 0x110
+ SOL_SOCKET = 0xffff
+ SOL_TCP = 0x6
+ SOL_TIPC = 0x10f
+ SOL_X25 = 0x106
+ SOMAXCONN = 0x80
+ SO_ACCEPTCONN = 0x1009
+ SO_ATTACH_BPF = 0x32
+ SO_ATTACH_FILTER = 0x1a
+ SO_ATTACH_REUSEPORT_CBPF = 0x33
+ SO_ATTACH_REUSEPORT_EBPF = 0x34
+ SO_BINDTODEVICE = 0x19
+ SO_BPF_EXTENSIONS = 0x30
+ SO_BROADCAST = 0x20
+ SO_BSDCOMPAT = 0xe
+ SO_BUSY_POLL = 0x2e
+ SO_CNX_ADVICE = 0x35
+ SO_DEBUG = 0x1
+ SO_DETACH_BPF = 0x1b
+ SO_DETACH_FILTER = 0x1b
+ SO_DOMAIN = 0x1029
+ SO_DONTROUTE = 0x10
+ SO_ERROR = 0x1007
+ SO_GET_FILTER = 0x1a
+ SO_INCOMING_CPU = 0x31
+ SO_KEEPALIVE = 0x8
+ SO_LINGER = 0x80
+ SO_LOCK_FILTER = 0x2c
+ SO_MARK = 0x24
+ SO_MAX_PACING_RATE = 0x2f
+ SO_NOFCS = 0x2b
+ SO_NO_CHECK = 0xb
+ SO_OOBINLINE = 0x100
+ SO_PASSCRED = 0x11
+ SO_PASSSEC = 0x22
+ SO_PEEK_OFF = 0x2a
+ SO_PEERCRED = 0x12
+ SO_PEERNAME = 0x1c
+ SO_PEERSEC = 0x1e
+ SO_PRIORITY = 0xc
+ SO_PROTOCOL = 0x1028
+ SO_RCVBUF = 0x1002
+ SO_RCVBUFFORCE = 0x21
+ SO_RCVLOWAT = 0x1004
+ SO_RCVTIMEO = 0x1006
+ SO_REUSEADDR = 0x4
+ SO_REUSEPORT = 0x200
+ SO_RXQ_OVFL = 0x28
+ SO_SECURITY_AUTHENTICATION = 0x16
+ SO_SECURITY_ENCRYPTION_NETWORK = 0x18
+ SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
+ SO_SELECT_ERR_QUEUE = 0x2d
+ SO_SNDBUF = 0x1001
+ SO_SNDBUFFORCE = 0x1f
+ SO_SNDLOWAT = 0x1003
+ SO_SNDTIMEO = 0x1005
+ SO_STYLE = 0x1008
+ SO_TIMESTAMP = 0x1d
+ SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPNS = 0x23
+ SO_TYPE = 0x1008
+ SO_WIFI_STATUS = 0x29
+ S_BLKSIZE = 0x200
+ S_IEXEC = 0x40
+ S_IFBLK = 0x6000
+ S_IFCHR = 0x2000
+ S_IFDIR = 0x4000
+ S_IFIFO = 0x1000
+ S_IFLNK = 0xa000
+ S_IFMT = 0xf000
+ S_IFREG = 0x8000
+ S_IFSOCK = 0xc000
+ S_IREAD = 0x100
+ S_IRGRP = 0x20
+ S_IROTH = 0x4
+ S_IRUSR = 0x100
+ S_IRWXG = 0x38
+ S_IRWXO = 0x7
+ S_IRWXU = 0x1c0
+ S_ISGID = 0x400
+ S_ISUID = 0x800
+ S_ISVTX = 0x200
+ S_IWGRP = 0x10
+ S_IWOTH = 0x2
+ S_IWRITE = 0x80
+ S_IWUSR = 0x80
+ S_IXGRP = 0x8
+ S_IXOTH = 0x1
+ S_IXUSR = 0x40
+ TAB0 = 0x0
+ TAB1 = 0x800
+ TAB2 = 0x1000
+ TAB3 = 0x1800
+ TABDLY = 0x1800
+ TCFLSH = 0x5407
+ TCGETA = 0x5401
+ TCGETS = 0x540d
+ TCGETS2 = 0x4030542a
+ TCIFLUSH = 0x0
+ TCIOFF = 0x2
+ TCIOFLUSH = 0x2
+ TCION = 0x3
+ TCOFLUSH = 0x1
+ TCOOFF = 0x0
+ TCOON = 0x1
+ TCP_CC_INFO = 0x1a
+ TCP_CONGESTION = 0xd
+ TCP_COOKIE_IN_ALWAYS = 0x1
+ TCP_COOKIE_MAX = 0x10
+ TCP_COOKIE_MIN = 0x8
+ TCP_COOKIE_OUT_NEVER = 0x2
+ TCP_COOKIE_PAIR_SIZE = 0x20
+ TCP_COOKIE_TRANSACTIONS = 0xf
+ TCP_CORK = 0x3
+ TCP_DEFER_ACCEPT = 0x9
+ TCP_FASTOPEN = 0x17
+ TCP_INFO = 0xb
+ TCP_KEEPCNT = 0x6
+ TCP_KEEPIDLE = 0x4
+ TCP_KEEPINTVL = 0x5
+ TCP_LINGER2 = 0x8
+ TCP_MAXSEG = 0x2
+ TCP_MAXWIN = 0xffff
+ TCP_MAX_WINSHIFT = 0xe
+ TCP_MD5SIG = 0xe
+ TCP_MD5SIG_MAXKEYLEN = 0x50
+ TCP_MSS = 0x200
+ TCP_MSS_DEFAULT = 0x218
+ TCP_MSS_DESIRED = 0x4c4
+ TCP_NODELAY = 0x1
+ TCP_NOTSENT_LOWAT = 0x19
+ TCP_QUEUE_SEQ = 0x15
+ TCP_QUICKACK = 0xc
+ TCP_REPAIR = 0x13
+ TCP_REPAIR_OPTIONS = 0x16
+ TCP_REPAIR_QUEUE = 0x14
+ TCP_SAVED_SYN = 0x1c
+ TCP_SAVE_SYN = 0x1b
+ TCP_SYNCNT = 0x7
+ TCP_S_DATA_IN = 0x4
+ TCP_S_DATA_OUT = 0x8
+ TCP_THIN_DUPACK = 0x11
+ TCP_THIN_LINEAR_TIMEOUTS = 0x10
+ TCP_TIMESTAMP = 0x18
+ TCP_USER_TIMEOUT = 0x12
+ TCP_WINDOW_CLAMP = 0xa
+ TCSAFLUSH = 0x5410
+ TCSBRK = 0x5405
+ TCSBRKP = 0x5486
+ TCSETA = 0x5402
+ TCSETAF = 0x5404
+ TCSETAW = 0x5403
+ TCSETS = 0x540e
+ TCSETS2 = 0x8030542b
+ TCSETSF = 0x5410
+ TCSETSF2 = 0x8030542d
+ TCSETSW = 0x540f
+ TCSETSW2 = 0x8030542c
+ TCXONC = 0x5406
+ TIOCCBRK = 0x5428
+ TIOCCONS = 0x80047478
+ TIOCEXCL = 0x740d
+ TIOCGDEV = 0x40045432
+ TIOCGETD = 0x7400
+ TIOCGETP = 0x7408
+ TIOCGEXCL = 0x40045440
+ TIOCGICOUNT = 0x5492
+ TIOCGLCKTRMIOS = 0x548b
+ TIOCGLTC = 0x7474
+ TIOCGPGRP = 0x40047477
+ TIOCGPKT = 0x40045438
+ TIOCGPTLCK = 0x40045439
+ TIOCGPTN = 0x40045430
+ TIOCGRS485 = 0x4020542e
+ TIOCGSERIAL = 0x5484
+ TIOCGSID = 0x7416
+ TIOCGSOFTCAR = 0x5481
+ TIOCGWINSZ = 0x40087468
+ TIOCINQ = 0x467f
+ TIOCLINUX = 0x5483
+ TIOCMBIC = 0x741c
+ TIOCMBIS = 0x741b
+ TIOCMGET = 0x741d
+ TIOCMIWAIT = 0x5491
+ TIOCMSET = 0x741a
+ TIOCM_CAR = 0x100
+ TIOCM_CD = 0x100
+ TIOCM_CTS = 0x40
+ TIOCM_DSR = 0x400
+ TIOCM_DTR = 0x2
+ TIOCM_LE = 0x1
+ TIOCM_RI = 0x200
+ TIOCM_RNG = 0x200
+ TIOCM_RTS = 0x4
+ TIOCM_SR = 0x20
+ TIOCM_ST = 0x10
+ TIOCNOTTY = 0x5471
+ TIOCNXCL = 0x740e
+ TIOCOUTQ = 0x7472
+ TIOCPKT = 0x5470
+ TIOCPKT_DATA = 0x0
+ TIOCPKT_DOSTOP = 0x20
+ TIOCPKT_FLUSHREAD = 0x1
+ TIOCPKT_FLUSHWRITE = 0x2
+ TIOCPKT_IOCTL = 0x40
+ TIOCPKT_NOSTOP = 0x10
+ TIOCPKT_START = 0x8
+ TIOCPKT_STOP = 0x4
+ TIOCSBRK = 0x5427
+ TIOCSCTTY = 0x5480
+ TIOCSERCONFIG = 0x5488
+ TIOCSERGETLSR = 0x548e
+ TIOCSERGETMULTI = 0x548f
+ TIOCSERGSTRUCT = 0x548d
+ TIOCSERGWILD = 0x5489
+ TIOCSERSETMULTI = 0x5490
+ TIOCSERSWILD = 0x548a
+ TIOCSER_TEMT = 0x1
+ TIOCSETD = 0x7401
+ TIOCSETN = 0x740a
+ TIOCSETP = 0x7409
+ TIOCSIG = 0x80045436
+ TIOCSLCKTRMIOS = 0x548c
+ TIOCSLTC = 0x7475
+ TIOCSPGRP = 0x80047476
+ TIOCSPTLCK = 0x80045431
+ TIOCSRS485 = 0xc020542f
+ TIOCSSERIAL = 0x5485
+ TIOCSSOFTCAR = 0x5482
+ TIOCSTI = 0x5472
+ TIOCSWINSZ = 0x80087467
+ TIOCVHANGUP = 0x5437
+ TOSTOP = 0x8000
+ TUNATTACHFILTER = 0x800854d5
+ TUNDETACHFILTER = 0x800854d6
+ TUNGETFEATURES = 0x400454cf
+ TUNGETFILTER = 0x400854db
+ TUNGETIFF = 0x400454d2
+ TUNGETSNDBUF = 0x400454d3
+ TUNGETVNETBE = 0x400454df
+ TUNGETVNETHDRSZ = 0x400454d7
+ TUNGETVNETLE = 0x400454dd
+ TUNSETDEBUG = 0x800454c9
+ TUNSETGROUP = 0x800454ce
+ TUNSETIFF = 0x800454ca
+ TUNSETIFINDEX = 0x800454da
+ TUNSETLINK = 0x800454cd
+ TUNSETNOCSUM = 0x800454c8
+ TUNSETOFFLOAD = 0x800454d0
+ TUNSETOWNER = 0x800454cc
+ TUNSETPERSIST = 0x800454cb
+ TUNSETQUEUE = 0x800454d9
+ TUNSETSNDBUF = 0x800454d4
+ TUNSETTXFILTER = 0x800454d1
+ TUNSETVNETBE = 0x800454de
+ TUNSETVNETHDRSZ = 0x800454d8
+ TUNSETVNETLE = 0x800454dc
+ VDISCARD = 0xd
+ VEOF = 0x10
+ VEOL = 0x11
+ VEOL2 = 0x6
+ VERASE = 0x2
+ VINTR = 0x0
+ VKILL = 0x3
+ VLNEXT = 0xf
+ VMIN = 0x4
+ VQUIT = 0x1
+ VREPRINT = 0xc
+ VSTART = 0x8
+ VSTOP = 0x9
+ VSUSP = 0xa
+ VSWTC = 0x7
+ VSWTCH = 0x7
+ VT0 = 0x0
+ VT1 = 0x4000
+ VTDLY = 0x4000
+ VTIME = 0x5
+ VWERASE = 0xe
+ WALL = 0x40000000
+ WCLONE = 0x80000000
+ WCONTINUED = 0x8
+ WEXITED = 0x4
+ WNOHANG = 0x1
+ WNOTHREAD = 0x20000000
+ WNOWAIT = 0x1000000
+ WORDSIZE = 0x20
+ WSTOPPED = 0x2
+ WUNTRACED = 0x2
+ XCASE = 0x4
+ XTABS = 0x1800
+)
+
+// Errors
+const (
+ E2BIG = syscall.Errno(0x7)
+ EACCES = syscall.Errno(0xd)
+ EADDRINUSE = syscall.Errno(0x7d)
+ EADDRNOTAVAIL = syscall.Errno(0x7e)
+ EADV = syscall.Errno(0x44)
+ EAFNOSUPPORT = syscall.Errno(0x7c)
+ EAGAIN = syscall.Errno(0xb)
+ EALREADY = syscall.Errno(0x95)
+ EBADE = syscall.Errno(0x32)
+ EBADF = syscall.Errno(0x9)
+ EBADFD = syscall.Errno(0x51)
+ EBADMSG = syscall.Errno(0x4d)
+ EBADR = syscall.Errno(0x33)
+ EBADRQC = syscall.Errno(0x36)
+ EBADSLT = syscall.Errno(0x37)
+ EBFONT = syscall.Errno(0x3b)
+ EBUSY = syscall.Errno(0x10)
+ ECANCELED = syscall.Errno(0x9e)
+ ECHILD = syscall.Errno(0xa)
+ ECHRNG = syscall.Errno(0x25)
+ ECOMM = syscall.Errno(0x46)
+ ECONNABORTED = syscall.Errno(0x82)
+ ECONNREFUSED = syscall.Errno(0x92)
+ ECONNRESET = syscall.Errno(0x83)
+ EDEADLK = syscall.Errno(0x2d)
+ EDEADLOCK = syscall.Errno(0x38)
+ EDESTADDRREQ = syscall.Errno(0x60)
+ EDOM = syscall.Errno(0x21)
+ EDOTDOT = syscall.Errno(0x49)
+ EDQUOT = syscall.Errno(0x46d)
+ EEXIST = syscall.Errno(0x11)
+ EFAULT = syscall.Errno(0xe)
+ EFBIG = syscall.Errno(0x1b)
+ EHOSTDOWN = syscall.Errno(0x93)
+ EHOSTUNREACH = syscall.Errno(0x94)
+ EHWPOISON = syscall.Errno(0xa8)
+ EIDRM = syscall.Errno(0x24)
+ EILSEQ = syscall.Errno(0x58)
+ EINIT = syscall.Errno(0x8d)
+ EINPROGRESS = syscall.Errno(0x96)
+ EINTR = syscall.Errno(0x4)
+ EINVAL = syscall.Errno(0x16)
+ EIO = syscall.Errno(0x5)
+ EISCONN = syscall.Errno(0x85)
+ EISDIR = syscall.Errno(0x15)
+ EISNAM = syscall.Errno(0x8b)
+ EKEYEXPIRED = syscall.Errno(0xa2)
+ EKEYREJECTED = syscall.Errno(0xa4)
+ EKEYREVOKED = syscall.Errno(0xa3)
+ EL2HLT = syscall.Errno(0x2c)
+ EL2NSYNC = syscall.Errno(0x26)
+ EL3HLT = syscall.Errno(0x27)
+ EL3RST = syscall.Errno(0x28)
+ ELIBACC = syscall.Errno(0x53)
+ ELIBBAD = syscall.Errno(0x54)
+ ELIBEXEC = syscall.Errno(0x57)
+ ELIBMAX = syscall.Errno(0x56)
+ ELIBSCN = syscall.Errno(0x55)
+ ELNRNG = syscall.Errno(0x29)
+ ELOOP = syscall.Errno(0x5a)
+ EMEDIUMTYPE = syscall.Errno(0xa0)
+ EMFILE = syscall.Errno(0x18)
+ EMLINK = syscall.Errno(0x1f)
+ EMSGSIZE = syscall.Errno(0x61)
+ EMULTIHOP = syscall.Errno(0x4a)
+ ENAMETOOLONG = syscall.Errno(0x4e)
+ ENAVAIL = syscall.Errno(0x8a)
+ ENETDOWN = syscall.Errno(0x7f)
+ ENETRESET = syscall.Errno(0x81)
+ ENETUNREACH = syscall.Errno(0x80)
+ ENFILE = syscall.Errno(0x17)
+ ENOANO = syscall.Errno(0x35)
+ ENOBUFS = syscall.Errno(0x84)
+ ENOCSI = syscall.Errno(0x2b)
+ ENODATA = syscall.Errno(0x3d)
+ ENODEV = syscall.Errno(0x13)
+ ENOENT = syscall.Errno(0x2)
+ ENOEXEC = syscall.Errno(0x8)
+ ENOKEY = syscall.Errno(0xa1)
+ ENOLCK = syscall.Errno(0x2e)
+ ENOLINK = syscall.Errno(0x43)
+ ENOMEDIUM = syscall.Errno(0x9f)
+ ENOMEM = syscall.Errno(0xc)
+ ENOMSG = syscall.Errno(0x23)
+ ENONET = syscall.Errno(0x40)
+ ENOPKG = syscall.Errno(0x41)
+ ENOPROTOOPT = syscall.Errno(0x63)
+ ENOSPC = syscall.Errno(0x1c)
+ ENOSR = syscall.Errno(0x3f)
+ ENOSTR = syscall.Errno(0x3c)
+ ENOSYS = syscall.Errno(0x59)
+ ENOTBLK = syscall.Errno(0xf)
+ ENOTCONN = syscall.Errno(0x86)
+ ENOTDIR = syscall.Errno(0x14)
+ ENOTEMPTY = syscall.Errno(0x5d)
+ ENOTNAM = syscall.Errno(0x89)
+ ENOTRECOVERABLE = syscall.Errno(0xa6)
+ ENOTSOCK = syscall.Errno(0x5f)
+ ENOTSUP = syscall.Errno(0x7a)
+ ENOTTY = syscall.Errno(0x19)
+ ENOTUNIQ = syscall.Errno(0x50)
+ ENXIO = syscall.Errno(0x6)
+ EOPNOTSUPP = syscall.Errno(0x7a)
+ EOVERFLOW = syscall.Errno(0x4f)
+ EOWNERDEAD = syscall.Errno(0xa5)
+ EPERM = syscall.Errno(0x1)
+ EPFNOSUPPORT = syscall.Errno(0x7b)
+ EPIPE = syscall.Errno(0x20)
+ EPROTO = syscall.Errno(0x47)
+ EPROTONOSUPPORT = syscall.Errno(0x78)
+ EPROTOTYPE = syscall.Errno(0x62)
+ ERANGE = syscall.Errno(0x22)
+ EREMCHG = syscall.Errno(0x52)
+ EREMDEV = syscall.Errno(0x8e)
+ EREMOTE = syscall.Errno(0x42)
+ EREMOTEIO = syscall.Errno(0x8c)
+ ERESTART = syscall.Errno(0x5b)
+ ERFKILL = syscall.Errno(0xa7)
+ EROFS = syscall.Errno(0x1e)
+ ESHUTDOWN = syscall.Errno(0x8f)
+ ESOCKTNOSUPPORT = syscall.Errno(0x79)
+ ESPIPE = syscall.Errno(0x1d)
+ ESRCH = syscall.Errno(0x3)
+ ESRMNT = syscall.Errno(0x45)
+ ESTALE = syscall.Errno(0x97)
+ ESTRPIPE = syscall.Errno(0x5c)
+ ETIME = syscall.Errno(0x3e)
+ ETIMEDOUT = syscall.Errno(0x91)
+ ETOOMANYREFS = syscall.Errno(0x90)
+ ETXTBSY = syscall.Errno(0x1a)
+ EUCLEAN = syscall.Errno(0x87)
+ EUNATCH = syscall.Errno(0x2a)
+ EUSERS = syscall.Errno(0x5e)
+ EWOULDBLOCK = syscall.Errno(0xb)
+ EXDEV = syscall.Errno(0x12)
+ EXFULL = syscall.Errno(0x34)
+)
+
+// Signals
+const (
+ SIGABRT = syscall.Signal(0x6)
+ SIGALRM = syscall.Signal(0xe)
+ SIGBUS = syscall.Signal(0xa)
+ SIGCHLD = syscall.Signal(0x12)
+ SIGCLD = syscall.Signal(0x12)
+ SIGCONT = syscall.Signal(0x19)
+ SIGEMT = syscall.Signal(0x7)
+ SIGFPE = syscall.Signal(0x8)
+ SIGHUP = syscall.Signal(0x1)
+ SIGILL = syscall.Signal(0x4)
+ SIGINT = syscall.Signal(0x2)
+ SIGIO = syscall.Signal(0x16)
+ SIGIOT = syscall.Signal(0x6)
+ SIGKILL = syscall.Signal(0x9)
+ SIGPIPE = syscall.Signal(0xd)
+ SIGPOLL = syscall.Signal(0x16)
+ SIGPROF = syscall.Signal(0x1d)
+ SIGPWR = syscall.Signal(0x13)
+ SIGQUIT = syscall.Signal(0x3)
+ SIGSEGV = syscall.Signal(0xb)
+ SIGSTOP = syscall.Signal(0x17)
+ SIGSYS = syscall.Signal(0xc)
+ SIGTERM = syscall.Signal(0xf)
+ SIGTRAP = syscall.Signal(0x5)
+ SIGTSTP = syscall.Signal(0x18)
+ SIGTTIN = syscall.Signal(0x1a)
+ SIGTTOU = syscall.Signal(0x1b)
+ SIGURG = syscall.Signal(0x15)
+ SIGUSR1 = syscall.Signal(0x10)
+ SIGUSR2 = syscall.Signal(0x11)
+ SIGVTALRM = syscall.Signal(0x1c)
+ SIGWINCH = syscall.Signal(0x14)
+ SIGXCPU = syscall.Signal(0x1e)
+ SIGXFSZ = syscall.Signal(0x1f)
+)
+
+// Error table
+var errors = [...]string{
+ 1: "operation not permitted",
+ 2: "no such file or directory",
+ 3: "no such process",
+ 4: "interrupted system call",
+ 5: "input/output error",
+ 6: "no such device or address",
+ 7: "argument list too long",
+ 8: "exec format error",
+ 9: "bad file descriptor",
+ 10: "no child processes",
+ 11: "resource temporarily unavailable",
+ 12: "cannot allocate memory",
+ 13: "permission denied",
+ 14: "bad address",
+ 15: "block device required",
+ 16: "device or resource busy",
+ 17: "file exists",
+ 18: "invalid cross-device link",
+ 19: "no such device",
+ 20: "not a directory",
+ 21: "is a directory",
+ 22: "invalid argument",
+ 23: "too many open files in system",
+ 24: "too many open files",
+ 25: "inappropriate ioctl for device",
+ 26: "text file busy",
+ 27: "file too large",
+ 28: "no space left on device",
+ 29: "illegal seek",
+ 30: "read-only file system",
+ 31: "too many links",
+ 32: "broken pipe",
+ 33: "numerical argument out of domain",
+ 34: "numerical result out of range",
+ 35: "no message of desired type",
+ 36: "identifier removed",
+ 37: "channel number out of range",
+ 38: "level 2 not synchronized",
+ 39: "level 3 halted",
+ 40: "level 3 reset",
+ 41: "link number out of range",
+ 42: "protocol driver not attached",
+ 43: "no CSI structure available",
+ 44: "level 2 halted",
+ 45: "resource deadlock avoided",
+ 46: "no locks available",
+ 50: "invalid exchange",
+ 51: "invalid request descriptor",
+ 52: "exchange full",
+ 53: "no anode",
+ 54: "invalid request code",
+ 55: "invalid slot",
+ 56: "file locking deadlock error",
+ 59: "bad font file format",
+ 60: "device not a stream",
+ 61: "no data available",
+ 62: "timer expired",
+ 63: "out of streams resources",
+ 64: "machine is not on the network",
+ 65: "package not installed",
+ 66: "object is remote",
+ 67: "link has been severed",
+ 68: "advertise error",
+ 69: "srmount error",
+ 70: "communication error on send",
+ 71: "protocol error",
+ 73: "RFS specific error",
+ 74: "multihop attempted",
+ 77: "bad message",
+ 78: "file name too long",
+ 79: "value too large for defined data type",
+ 80: "name not unique on network",
+ 81: "file descriptor in bad state",
+ 82: "remote address changed",
+ 83: "can not access a needed shared library",
+ 84: "accessing a corrupted shared library",
+ 85: ".lib section in a.out corrupted",
+ 86: "attempting to link in too many shared libraries",
+ 87: "cannot exec a shared library directly",
+ 88: "invalid or incomplete multibyte or wide character",
+ 89: "function not implemented",
+ 90: "too many levels of symbolic links",
+ 91: "interrupted system call should be restarted",
+ 92: "streams pipe error",
+ 93: "directory not empty",
+ 94: "too many users",
+ 95: "socket operation on non-socket",
+ 96: "destination address required",
+ 97: "message too long",
+ 98: "protocol wrong type for socket",
+ 99: "protocol not available",
+ 120: "protocol not supported",
+ 121: "socket type not supported",
+ 122: "operation not supported",
+ 123: "protocol family not supported",
+ 124: "address family not supported by protocol",
+ 125: "address already in use",
+ 126: "cannot assign requested address",
+ 127: "network is down",
+ 128: "network is unreachable",
+ 129: "network dropped connection on reset",
+ 130: "software caused connection abort",
+ 131: "connection reset by peer",
+ 132: "no buffer space available",
+ 133: "transport endpoint is already connected",
+ 134: "transport endpoint is not connected",
+ 135: "structure needs cleaning",
+ 137: "not a XENIX named type file",
+ 138: "no XENIX semaphores available",
+ 139: "is a named type file",
+ 140: "remote I/O error",
+ 141: "unknown error 141",
+ 142: "unknown error 142",
+ 143: "cannot send after transport endpoint shutdown",
+ 144: "too many references: cannot splice",
+ 145: "connection timed out",
+ 146: "connection refused",
+ 147: "host is down",
+ 148: "no route to host",
+ 149: "operation already in progress",
+ 150: "operation now in progress",
+ 151: "stale file handle",
+ 158: "operation canceled",
+ 159: "no medium found",
+ 160: "wrong medium type",
+ 161: "required key not available",
+ 162: "key has expired",
+ 163: "key has been revoked",
+ 164: "key was rejected by service",
+ 165: "owner died",
+ 166: "state not recoverable",
+ 167: "operation not possible due to RF-kill",
+ 168: "memory page has hardware error",
+ 1133: "disk quota exceeded",
+}
+
+// Signal table
+var signals = [...]string{
+ 1: "hangup",
+ 2: "interrupt",
+ 3: "quit",
+ 4: "illegal instruction",
+ 5: "trace/breakpoint trap",
+ 6: "aborted",
+ 7: "EMT trap",
+ 8: "floating point exception",
+ 9: "killed",
+ 10: "bus error",
+ 11: "segmentation fault",
+ 12: "bad system call",
+ 13: "broken pipe",
+ 14: "alarm clock",
+ 15: "terminated",
+ 16: "user defined signal 1",
+ 17: "user defined signal 2",
+ 18: "child exited",
+ 19: "power failure",
+ 20: "window changed",
+ 21: "urgent I/O condition",
+ 22: "I/O possible",
+ 23: "stopped (signal)",
+ 24: "stopped",
+ 25: "continued",
+ 26: "stopped (tty input)",
+ 27: "stopped (tty output)",
+ 28: "virtual timer expired",
+ 29: "profiling timer expired",
+ 30: "CPU time limit exceeded",
+ 31: "file size limit exceeded",
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 8b42ca2..4e41939 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -197,6 +197,25 @@ const (
BS0 = 0x0
BS1 = 0x8000
BSDLY = 0x8000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_ID_BITS = 0x1d
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MAX_DLC = 0x8
+ CAN_MAX_DLEN = 0x8
+ CAN_MCNET = 0x5
+ CAN_MTU = 0x10
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_ID_BITS = 0xb
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
CBAUD = 0xff
CBAUDEX = 0x0
CFLUSH = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index e8d12b5..407e6b5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -196,6 +196,25 @@ const (
BS0 = 0x0
BS1 = 0x8000
BSDLY = 0x8000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_ID_BITS = 0x1d
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MAX_DLC = 0x8
+ CAN_MAX_DLEN = 0x8
+ CAN_MCNET = 0x5
+ CAN_MTU = 0x10
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_ID_BITS = 0xb
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
CBAUD = 0xff
CBAUDEX = 0x0
CFLUSH = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 329f25e..40c9b87 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -201,6 +201,25 @@ const (
BS0 = 0x0
BS1 = 0x2000
BSDLY = 0x2000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_ID_BITS = 0x1d
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MAX_DLC = 0x8
+ CAN_MAX_DLEN = 0x8
+ CAN_MCNET = 0x5
+ CAN_MTU = 0x10
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_ID_BITS = 0xb
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 766d1e6..62680ed 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -205,6 +205,25 @@ const (
BS0 = 0x0
BS1 = 0x2000
BSDLY = 0x2000
+ CAN_BCM = 0x2
+ CAN_EFF_FLAG = 0x80000000
+ CAN_EFF_ID_BITS = 0x1d
+ CAN_EFF_MASK = 0x1fffffff
+ CAN_ERR_FLAG = 0x20000000
+ CAN_ERR_MASK = 0x1fffffff
+ CAN_INV_FILTER = 0x20000000
+ CAN_ISOTP = 0x6
+ CAN_MAX_DLC = 0x8
+ CAN_MAX_DLEN = 0x8
+ CAN_MCNET = 0x5
+ CAN_MTU = 0x10
+ CAN_NPROTO = 0x7
+ CAN_RAW = 0x1
+ CAN_RTR_FLAG = 0x40000000
+ CAN_SFF_ID_BITS = 0xb
+ CAN_SFF_MASK = 0x7ff
+ CAN_TP16 = 0x3
+ CAN_TP20 = 0x4
CBAUD = 0x100f
CBAUDEX = 0x1000
CFLUSH = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index 80f6a1b..fa92387 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index 078c8f0..b34d5c2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index 76e5f7c..2e5cb39 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index 72b7947..0d584cc 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
new file mode 100644
index 0000000..a18e0b1
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
@@ -0,0 +1,1829 @@
+// mksyscall.pl -b32 -arm syscall_linux.go syscall_linux_mipsx.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build mips,linux
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _ syscall.Errno
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
+ use(unsafe.Pointer(_p0))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(buf) > 0 {
+ _p1 = unsafe.Pointer(&buf[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlinkat(dirfd int, path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, times *[2]Timeval) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) {
+ _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getcwd(buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+ r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ wpid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(arg)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(source)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(target)
+ if err != nil {
+ return
+ }
+ var _p2 *byte
+ _p2, err = BytePtrFromString(fstype)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ use(unsafe.Pointer(_p2))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Acct(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtimex(buf *Timex) (state int, err error) {
+ r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
+ state = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup3(oldfd int, newfd int, flags int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate(size int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate1(flag int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Exit(code int) {
+ Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
+ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+ _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fdatasync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdents(fd int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+ pgid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+ r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+ r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+ ppid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+ r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+ prio = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettid() (tid int) {
+ r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
+ tid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ var _p2 unsafe.Pointer
+ if len(dest) > 0 {
+ _p2 = unsafe.Pointer(&dest[0])
+ } else {
+ _p2 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ sz = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
+ use(unsafe.Pointer(_p0))
+ watchdesc = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit1(flags int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
+ success = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, sig syscall.Signal) (err error) {
+ _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Klogctl(typ int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listxattr(path string, dest []byte) (sz int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(dest) > 0 {
+ _p1 = unsafe.Pointer(&dest[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
+ use(unsafe.Pointer(_p0))
+ sz = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func PivotRoot(newroot string, putold string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(newroot)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(putold)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Removexattr(path string, attr string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setdomainname(p []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sethostname(p []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+ pid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setns(fd int, nstype int) (err error) {
+ _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+ _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ var _p2 unsafe.Pointer
+ if len(data) > 0 {
+ _p2 = unsafe.Pointer(&data[0])
+ } else {
+ _p2 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() {
+ Syscall(SYS_SYNC, 0, 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sysinfo(info *Sysinfo_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
+ r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
+ n = int64(int64(r0)<<32 | int64(r1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+ _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Times(tms *Tms) (ticks uintptr, err error) {
+ r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
+ ticks = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(mask int) (oldmask int) {
+ r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0)
+ oldmask = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Uname(buf *Utsname) (err error) {
+ _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(target string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(target)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unshare(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ustat(dev int, ubuf *Ustat_t) (err error) {
+ _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exitThread(code int) (err error) {
+ _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, p *byte, np int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writelen(fd int, p *byte, np int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+ _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Madvise(b []byte, advice int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+ _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(oldfd int, newfd int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+ _, _, e1 := Syscall6(SYS_FTRUNCATE64, uintptr(fd), 0, uintptr(length>>32), uintptr(length), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+ r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+ egid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (euid int) {
+ r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+ euid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+ r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+ r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, n int) (err error) {
+ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset>>32), uintptr(offset))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset>>32), uintptr(offset))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+ written = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsgid(gid int) (err error) {
+ _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsuid(uid int) (err error) {
+ _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresgid(rgid int, egid int, sgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresuid(ruid int, euid int, suid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
+ r0, r1, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
+ n = int64(int64(r0)<<32 | int64(r1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
+ _, _, e1 := Syscall9(SYS_SYNC_FILE_RANGE, uintptr(fd), 0, uintptr(off>>32), uintptr(off), uintptr(n>>32), uintptr(n), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length>>32), uintptr(length), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(n int, list *_Gid_t) (nn int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+ nn = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(n int, list *_Gid_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit() (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ioperm(from int, num int, on int) (err error) {
+ _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Iopl(level int) (err error) {
+ _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Time(t *Time_t) (tt Time_t, err error) {
+ r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0)
+ tt = Time_t(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(events) > 0 {
+ _p0 = unsafe.Pointer(&events[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pause() (err error) {
+ _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe2(p *[2]_C_int, flags int) (err error) {
+ _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) {
+ r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset))
+ xaddr = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getrlimit(resource int, rlim *rlimit32) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setrlimit(resource int, rlim *rlimit32) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index ba55509..bf6f360 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index 2b1cc84..8c86bd7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
new file mode 100644
index 0000000..645e00e
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
@@ -0,0 +1,1829 @@
+// mksyscall.pl -l32 -arm syscall_linux.go syscall_linux_mipsx.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build mipsle,linux
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var _ syscall.Errno
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
+ use(unsafe.Pointer(_p0))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(buf) > 0 {
+ _p1 = unsafe.Pointer(&buf[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unlinkat(dirfd int, path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, times *[2]Timeval) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) {
+ _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getcwd(buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+ r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ wpid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(arg)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(source)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(target)
+ if err != nil {
+ return
+ }
+ var _p2 *byte
+ _p2, err = BytePtrFromString(fstype)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ use(unsafe.Pointer(_p2))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Acct(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtimex(buf *Timex) (state int, err error) {
+ r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
+ state = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ClockGettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup3(oldfd int, newfd int, flags int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate(size int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate1(flag int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Exit(code int) {
+ Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
+ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+ _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fdatasync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdents(fd int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+ pgid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+ r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+ r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+ ppid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+ r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+ prio = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettid() (tid int) {
+ r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
+ tid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ var _p2 unsafe.Pointer
+ if len(dest) > 0 {
+ _p2 = unsafe.Pointer(&dest[0])
+ } else {
+ _p2 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ sz = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
+ use(unsafe.Pointer(_p0))
+ watchdesc = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit1(flags int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
+ success = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, sig syscall.Signal) (err error) {
+ _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Klogctl(typ int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listxattr(path string, dest []byte) (sz int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(dest) > 0 {
+ _p1 = unsafe.Pointer(&dest[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
+ use(unsafe.Pointer(_p0))
+ sz = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func PivotRoot(newroot string, putold string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(newroot)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(putold)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Removexattr(path string, attr string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setdomainname(p []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sethostname(p []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+ pid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setns(fd int, nstype int) (err error) {
+ _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+ _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ var _p2 unsafe.Pointer
+ if len(data) > 0 {
+ _p2 = unsafe.Pointer(&data[0])
+ } else {
+ _p2 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() {
+ Syscall(SYS_SYNC, 0, 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sysinfo(info *Sysinfo_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
+ r0, r1, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
+ n = int64(int64(r1)<<32 | int64(r0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+ _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Times(tms *Tms) (ticks uintptr, err error) {
+ r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
+ ticks = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(mask int) (oldmask int) {
+ r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0)
+ oldmask = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Uname(buf *Utsname) (err error) {
+ _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(target string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(target)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unshare(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ustat(dev int, ubuf *Ustat_t) (err error) {
+ _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exitThread(code int) (err error) {
+ _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, p *byte, np int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writelen(fd int, p *byte, np int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+ _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Madvise(b []byte, advice int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+ _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(oldfd int, newfd int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+ _, _, e1 := Syscall6(SYS_FTRUNCATE64, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+ r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+ egid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (euid int) {
+ r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+ euid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+ r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+ r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listen(s int, n int) (err error) {
+ _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ r0, _, e1 := Syscall6(SYS_SENDFILE64, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+ written = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsgid(gid int) (err error) {
+ _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsuid(uid int) (err error) {
+ _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresgid(rgid int, egid int, sgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresuid(ruid int, euid int, suid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Shutdown(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
+ r0, r1, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
+ n = int64(int64(r1)<<32 | int64(r0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
+ _, _, e1 := Syscall9(SYS_SYNC_FILE_RANGE, uintptr(fd), 0, uintptr(off), uintptr(off>>32), uintptr(n), uintptr(n>>32), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_TRUNCATE64, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+ r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(n int, list *_Gid_t) (nn int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+ nn = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(n int, list *_Gid_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
+ _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit() (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ioperm(from int, num int, on int) (err error) {
+ _, _, e1 := Syscall(SYS_IOPERM, uintptr(from), uintptr(num), uintptr(on))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Iopl(level int) (err error) {
+ _, _, e1 := Syscall(SYS_IOPL, uintptr(level), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Time(t *Time_t) (tt Time_t, err error) {
+ r0, _, e1 := RawSyscall(SYS_TIME, uintptr(unsafe.Pointer(t)), 0, 0)
+ tt = Time_t(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(events) > 0 {
+ _p0 = unsafe.Pointer(&events[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pause() (err error) {
+ _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe2(p *[2]_C_int, flags int) (err error) {
+ _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) {
+ r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset))
+ xaddr = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getrlimit(resource int, rlim *rlimit32) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setrlimit(resource int, rlim *rlimit32) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index 25f39db..f5d488b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index 70702b5..5183711 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
index 94b93d3..4c7ed08 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
index 774b10e..beb83e4 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
@@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Getsid(pid int) (sid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ sid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Gettid() (tid int) {
r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
tid = int(r0)
@@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
- _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
new file mode 100644
index 0000000..0786867
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -0,0 +1,359 @@
+// mksysnum_linux.pl /usr/include/mips-linux-gnu/asm/unistd.h
+// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
+
+// +build mips,linux
+
+package unix
+
+const (
+ SYS_SYSCALL = 4000
+ SYS_EXIT = 4001
+ SYS_FORK = 4002
+ SYS_READ = 4003
+ SYS_WRITE = 4004
+ SYS_OPEN = 4005
+ SYS_CLOSE = 4006
+ SYS_WAITPID = 4007
+ SYS_CREAT = 4008
+ SYS_LINK = 4009
+ SYS_UNLINK = 4010
+ SYS_EXECVE = 4011
+ SYS_CHDIR = 4012
+ SYS_TIME = 4013
+ SYS_MKNOD = 4014
+ SYS_CHMOD = 4015
+ SYS_LCHOWN = 4016
+ SYS_BREAK = 4017
+ SYS_UNUSED18 = 4018
+ SYS_LSEEK = 4019
+ SYS_GETPID = 4020
+ SYS_MOUNT = 4021
+ SYS_UMOUNT = 4022
+ SYS_SETUID = 4023
+ SYS_GETUID = 4024
+ SYS_STIME = 4025
+ SYS_PTRACE = 4026
+ SYS_ALARM = 4027
+ SYS_UNUSED28 = 4028
+ SYS_PAUSE = 4029
+ SYS_UTIME = 4030
+ SYS_STTY = 4031
+ SYS_GTTY = 4032
+ SYS_ACCESS = 4033
+ SYS_NICE = 4034
+ SYS_FTIME = 4035
+ SYS_SYNC = 4036
+ SYS_KILL = 4037
+ SYS_RENAME = 4038
+ SYS_MKDIR = 4039
+ SYS_RMDIR = 4040
+ SYS_DUP = 4041
+ SYS_PIPE = 4042
+ SYS_TIMES = 4043
+ SYS_PROF = 4044
+ SYS_BRK = 4045
+ SYS_SETGID = 4046
+ SYS_GETGID = 4047
+ SYS_SIGNAL = 4048
+ SYS_GETEUID = 4049
+ SYS_GETEGID = 4050
+ SYS_ACCT = 4051
+ SYS_UMOUNT2 = 4052
+ SYS_LOCK = 4053
+ SYS_IOCTL = 4054
+ SYS_FCNTL = 4055
+ SYS_MPX = 4056
+ SYS_SETPGID = 4057
+ SYS_ULIMIT = 4058
+ SYS_UNUSED59 = 4059
+ SYS_UMASK = 4060
+ SYS_CHROOT = 4061
+ SYS_USTAT = 4062
+ SYS_DUP2 = 4063
+ SYS_GETPPID = 4064
+ SYS_GETPGRP = 4065
+ SYS_SETSID = 4066
+ SYS_SIGACTION = 4067
+ SYS_SGETMASK = 4068
+ SYS_SSETMASK = 4069
+ SYS_SETREUID = 4070
+ SYS_SETREGID = 4071
+ SYS_SIGSUSPEND = 4072
+ SYS_SIGPENDING = 4073
+ SYS_SETHOSTNAME = 4074
+ SYS_SETRLIMIT = 4075
+ SYS_GETRLIMIT = 4076
+ SYS_GETRUSAGE = 4077
+ SYS_GETTIMEOFDAY = 4078
+ SYS_SETTIMEOFDAY = 4079
+ SYS_GETGROUPS = 4080
+ SYS_SETGROUPS = 4081
+ SYS_RESERVED82 = 4082
+ SYS_SYMLINK = 4083
+ SYS_UNUSED84 = 4084
+ SYS_READLINK = 4085
+ SYS_USELIB = 4086
+ SYS_SWAPON = 4087
+ SYS_REBOOT = 4088
+ SYS_READDIR = 4089
+ SYS_MMAP = 4090
+ SYS_MUNMAP = 4091
+ SYS_TRUNCATE = 4092
+ SYS_FTRUNCATE = 4093
+ SYS_FCHMOD = 4094
+ SYS_FCHOWN = 4095
+ SYS_GETPRIORITY = 4096
+ SYS_SETPRIORITY = 4097
+ SYS_PROFIL = 4098
+ SYS_STATFS = 4099
+ SYS_FSTATFS = 4100
+ SYS_IOPERM = 4101
+ SYS_SOCKETCALL = 4102
+ SYS_SYSLOG = 4103
+ SYS_SETITIMER = 4104
+ SYS_GETITIMER = 4105
+ SYS_STAT = 4106
+ SYS_LSTAT = 4107
+ SYS_FSTAT = 4108
+ SYS_UNUSED109 = 4109
+ SYS_IOPL = 4110
+ SYS_VHANGUP = 4111
+ SYS_IDLE = 4112
+ SYS_VM86 = 4113
+ SYS_WAIT4 = 4114
+ SYS_SWAPOFF = 4115
+ SYS_SYSINFO = 4116
+ SYS_IPC = 4117
+ SYS_FSYNC = 4118
+ SYS_SIGRETURN = 4119
+ SYS_CLONE = 4120
+ SYS_SETDOMAINNAME = 4121
+ SYS_UNAME = 4122
+ SYS_MODIFY_LDT = 4123
+ SYS_ADJTIMEX = 4124
+ SYS_MPROTECT = 4125
+ SYS_SIGPROCMASK = 4126
+ SYS_CREATE_MODULE = 4127
+ SYS_INIT_MODULE = 4128
+ SYS_DELETE_MODULE = 4129
+ SYS_GET_KERNEL_SYMS = 4130
+ SYS_QUOTACTL = 4131
+ SYS_GETPGID = 4132
+ SYS_FCHDIR = 4133
+ SYS_BDFLUSH = 4134
+ SYS_SYSFS = 4135
+ SYS_PERSONALITY = 4136
+ SYS_AFS_SYSCALL = 4137
+ SYS_SETFSUID = 4138
+ SYS_SETFSGID = 4139
+ SYS__LLSEEK = 4140
+ SYS_GETDENTS = 4141
+ SYS__NEWSELECT = 4142
+ SYS_FLOCK = 4143
+ SYS_MSYNC = 4144
+ SYS_READV = 4145
+ SYS_WRITEV = 4146
+ SYS_CACHEFLUSH = 4147
+ SYS_CACHECTL = 4148
+ SYS_SYSMIPS = 4149
+ SYS_UNUSED150 = 4150
+ SYS_GETSID = 4151
+ SYS_FDATASYNC = 4152
+ SYS__SYSCTL = 4153
+ SYS_MLOCK = 4154
+ SYS_MUNLOCK = 4155
+ SYS_MLOCKALL = 4156
+ SYS_MUNLOCKALL = 4157
+ SYS_SCHED_SETPARAM = 4158
+ SYS_SCHED_GETPARAM = 4159
+ SYS_SCHED_SETSCHEDULER = 4160
+ SYS_SCHED_GETSCHEDULER = 4161
+ SYS_SCHED_YIELD = 4162
+ SYS_SCHED_GET_PRIORITY_MAX = 4163
+ SYS_SCHED_GET_PRIORITY_MIN = 4164
+ SYS_SCHED_RR_GET_INTERVAL = 4165
+ SYS_NANOSLEEP = 4166
+ SYS_MREMAP = 4167
+ SYS_ACCEPT = 4168
+ SYS_BIND = 4169
+ SYS_CONNECT = 4170
+ SYS_GETPEERNAME = 4171
+ SYS_GETSOCKNAME = 4172
+ SYS_GETSOCKOPT = 4173
+ SYS_LISTEN = 4174
+ SYS_RECV = 4175
+ SYS_RECVFROM = 4176
+ SYS_RECVMSG = 4177
+ SYS_SEND = 4178
+ SYS_SENDMSG = 4179
+ SYS_SENDTO = 4180
+ SYS_SETSOCKOPT = 4181
+ SYS_SHUTDOWN = 4182
+ SYS_SOCKET = 4183
+ SYS_SOCKETPAIR = 4184
+ SYS_SETRESUID = 4185
+ SYS_GETRESUID = 4186
+ SYS_QUERY_MODULE = 4187
+ SYS_POLL = 4188
+ SYS_NFSSERVCTL = 4189
+ SYS_SETRESGID = 4190
+ SYS_GETRESGID = 4191
+ SYS_PRCTL = 4192
+ SYS_RT_SIGRETURN = 4193
+ SYS_RT_SIGACTION = 4194
+ SYS_RT_SIGPROCMASK = 4195
+ SYS_RT_SIGPENDING = 4196
+ SYS_RT_SIGTIMEDWAIT = 4197
+ SYS_RT_SIGQUEUEINFO = 4198
+ SYS_RT_SIGSUSPEND = 4199
+ SYS_PREAD64 = 4200
+ SYS_PWRITE64 = 4201
+ SYS_CHOWN = 4202
+ SYS_GETCWD = 4203
+ SYS_CAPGET = 4204
+ SYS_CAPSET = 4205
+ SYS_SIGALTSTACK = 4206
+ SYS_SENDFILE = 4207
+ SYS_GETPMSG = 4208
+ SYS_PUTPMSG = 4209
+ SYS_MMAP2 = 4210
+ SYS_TRUNCATE64 = 4211
+ SYS_FTRUNCATE64 = 4212
+ SYS_STAT64 = 4213
+ SYS_LSTAT64 = 4214
+ SYS_FSTAT64 = 4215
+ SYS_PIVOT_ROOT = 4216
+ SYS_MINCORE = 4217
+ SYS_MADVISE = 4218
+ SYS_GETDENTS64 = 4219
+ SYS_FCNTL64 = 4220
+ SYS_RESERVED221 = 4221
+ SYS_GETTID = 4222
+ SYS_READAHEAD = 4223
+ SYS_SETXATTR = 4224
+ SYS_LSETXATTR = 4225
+ SYS_FSETXATTR = 4226
+ SYS_GETXATTR = 4227
+ SYS_LGETXATTR = 4228
+ SYS_FGETXATTR = 4229
+ SYS_LISTXATTR = 4230
+ SYS_LLISTXATTR = 4231
+ SYS_FLISTXATTR = 4232
+ SYS_REMOVEXATTR = 4233
+ SYS_LREMOVEXATTR = 4234
+ SYS_FREMOVEXATTR = 4235
+ SYS_TKILL = 4236
+ SYS_SENDFILE64 = 4237
+ SYS_FUTEX = 4238
+ SYS_SCHED_SETAFFINITY = 4239
+ SYS_SCHED_GETAFFINITY = 4240
+ SYS_IO_SETUP = 4241
+ SYS_IO_DESTROY = 4242
+ SYS_IO_GETEVENTS = 4243
+ SYS_IO_SUBMIT = 4244
+ SYS_IO_CANCEL = 4245
+ SYS_EXIT_GROUP = 4246
+ SYS_LOOKUP_DCOOKIE = 4247
+ SYS_EPOLL_CREATE = 4248
+ SYS_EPOLL_CTL = 4249
+ SYS_EPOLL_WAIT = 4250
+ SYS_REMAP_FILE_PAGES = 4251
+ SYS_SET_TID_ADDRESS = 4252
+ SYS_RESTART_SYSCALL = 4253
+ SYS_FADVISE64 = 4254
+ SYS_STATFS64 = 4255
+ SYS_FSTATFS64 = 4256
+ SYS_TIMER_CREATE = 4257
+ SYS_TIMER_SETTIME = 4258
+ SYS_TIMER_GETTIME = 4259
+ SYS_TIMER_GETOVERRUN = 4260
+ SYS_TIMER_DELETE = 4261
+ SYS_CLOCK_SETTIME = 4262
+ SYS_CLOCK_GETTIME = 4263
+ SYS_CLOCK_GETRES = 4264
+ SYS_CLOCK_NANOSLEEP = 4265
+ SYS_TGKILL = 4266
+ SYS_UTIMES = 4267
+ SYS_MBIND = 4268
+ SYS_GET_MEMPOLICY = 4269
+ SYS_SET_MEMPOLICY = 4270
+ SYS_MQ_OPEN = 4271
+ SYS_MQ_UNLINK = 4272
+ SYS_MQ_TIMEDSEND = 4273
+ SYS_MQ_TIMEDRECEIVE = 4274
+ SYS_MQ_NOTIFY = 4275
+ SYS_MQ_GETSETATTR = 4276
+ SYS_VSERVER = 4277
+ SYS_WAITID = 4278
+ SYS_ADD_KEY = 4280
+ SYS_REQUEST_KEY = 4281
+ SYS_KEYCTL = 4282
+ SYS_SET_THREAD_AREA = 4283
+ SYS_INOTIFY_INIT = 4284
+ SYS_INOTIFY_ADD_WATCH = 4285
+ SYS_INOTIFY_RM_WATCH = 4286
+ SYS_MIGRATE_PAGES = 4287
+ SYS_OPENAT = 4288
+ SYS_MKDIRAT = 4289
+ SYS_MKNODAT = 4290
+ SYS_FCHOWNAT = 4291
+ SYS_FUTIMESAT = 4292
+ SYS_FSTATAT64 = 4293
+ SYS_UNLINKAT = 4294
+ SYS_RENAMEAT = 4295
+ SYS_LINKAT = 4296
+ SYS_SYMLINKAT = 4297
+ SYS_READLINKAT = 4298
+ SYS_FCHMODAT = 4299
+ SYS_FACCESSAT = 4300
+ SYS_PSELECT6 = 4301
+ SYS_PPOLL = 4302
+ SYS_UNSHARE = 4303
+ SYS_SPLICE = 4304
+ SYS_SYNC_FILE_RANGE = 4305
+ SYS_TEE = 4306
+ SYS_VMSPLICE = 4307
+ SYS_MOVE_PAGES = 4308
+ SYS_SET_ROBUST_LIST = 4309
+ SYS_GET_ROBUST_LIST = 4310
+ SYS_KEXEC_LOAD = 4311
+ SYS_GETCPU = 4312
+ SYS_EPOLL_PWAIT = 4313
+ SYS_IOPRIO_SET = 4314
+ SYS_IOPRIO_GET = 4315
+ SYS_UTIMENSAT = 4316
+ SYS_SIGNALFD = 4317
+ SYS_TIMERFD = 4318
+ SYS_EVENTFD = 4319
+ SYS_FALLOCATE = 4320
+ SYS_TIMERFD_CREATE = 4321
+ SYS_TIMERFD_GETTIME = 4322
+ SYS_TIMERFD_SETTIME = 4323
+ SYS_SIGNALFD4 = 4324
+ SYS_EVENTFD2 = 4325
+ SYS_EPOLL_CREATE1 = 4326
+ SYS_DUP3 = 4327
+ SYS_PIPE2 = 4328
+ SYS_INOTIFY_INIT1 = 4329
+ SYS_PREADV = 4330
+ SYS_PWRITEV = 4331
+ SYS_RT_TGSIGQUEUEINFO = 4332
+ SYS_PERF_EVENT_OPEN = 4333
+ SYS_ACCEPT4 = 4334
+ SYS_RECVMMSG = 4335
+ SYS_FANOTIFY_INIT = 4336
+ SYS_FANOTIFY_MARK = 4337
+ SYS_PRLIMIT64 = 4338
+ SYS_NAME_TO_HANDLE_AT = 4339
+ SYS_OPEN_BY_HANDLE_AT = 4340
+ SYS_CLOCK_ADJTIME = 4341
+ SYS_SYNCFS = 4342
+ SYS_SENDMMSG = 4343
+ SYS_SETNS = 4344
+ SYS_PROCESS_VM_READV = 4345
+ SYS_PROCESS_VM_WRITEV = 4346
+ SYS_LINUX_SYSCALLS = 4346
+ SYS_O32_LINUX_SYSCALLS = 4346
+ SYS_64_LINUX_SYSCALLS = 4305
+ SYS_N32_LINUX_SYSCALLS = 4310
+)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
new file mode 100644
index 0000000..25d2317
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -0,0 +1,359 @@
+// mksysnum_linux.pl /usr/include/mips-linux-gnu/asm/unistd.h
+// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
+
+// +build mipsle,linux
+
+package unix
+
+const (
+ SYS_SYSCALL = 4000
+ SYS_EXIT = 4001
+ SYS_FORK = 4002
+ SYS_READ = 4003
+ SYS_WRITE = 4004
+ SYS_OPEN = 4005
+ SYS_CLOSE = 4006
+ SYS_WAITPID = 4007
+ SYS_CREAT = 4008
+ SYS_LINK = 4009
+ SYS_UNLINK = 4010
+ SYS_EXECVE = 4011
+ SYS_CHDIR = 4012
+ SYS_TIME = 4013
+ SYS_MKNOD = 4014
+ SYS_CHMOD = 4015
+ SYS_LCHOWN = 4016
+ SYS_BREAK = 4017
+ SYS_UNUSED18 = 4018
+ SYS_LSEEK = 4019
+ SYS_GETPID = 4020
+ SYS_MOUNT = 4021
+ SYS_UMOUNT = 4022
+ SYS_SETUID = 4023
+ SYS_GETUID = 4024
+ SYS_STIME = 4025
+ SYS_PTRACE = 4026
+ SYS_ALARM = 4027
+ SYS_UNUSED28 = 4028
+ SYS_PAUSE = 4029
+ SYS_UTIME = 4030
+ SYS_STTY = 4031
+ SYS_GTTY = 4032
+ SYS_ACCESS = 4033
+ SYS_NICE = 4034
+ SYS_FTIME = 4035
+ SYS_SYNC = 4036
+ SYS_KILL = 4037
+ SYS_RENAME = 4038
+ SYS_MKDIR = 4039
+ SYS_RMDIR = 4040
+ SYS_DUP = 4041
+ SYS_PIPE = 4042
+ SYS_TIMES = 4043
+ SYS_PROF = 4044
+ SYS_BRK = 4045
+ SYS_SETGID = 4046
+ SYS_GETGID = 4047
+ SYS_SIGNAL = 4048
+ SYS_GETEUID = 4049
+ SYS_GETEGID = 4050
+ SYS_ACCT = 4051
+ SYS_UMOUNT2 = 4052
+ SYS_LOCK = 4053
+ SYS_IOCTL = 4054
+ SYS_FCNTL = 4055
+ SYS_MPX = 4056
+ SYS_SETPGID = 4057
+ SYS_ULIMIT = 4058
+ SYS_UNUSED59 = 4059
+ SYS_UMASK = 4060
+ SYS_CHROOT = 4061
+ SYS_USTAT = 4062
+ SYS_DUP2 = 4063
+ SYS_GETPPID = 4064
+ SYS_GETPGRP = 4065
+ SYS_SETSID = 4066
+ SYS_SIGACTION = 4067
+ SYS_SGETMASK = 4068
+ SYS_SSETMASK = 4069
+ SYS_SETREUID = 4070
+ SYS_SETREGID = 4071
+ SYS_SIGSUSPEND = 4072
+ SYS_SIGPENDING = 4073
+ SYS_SETHOSTNAME = 4074
+ SYS_SETRLIMIT = 4075
+ SYS_GETRLIMIT = 4076
+ SYS_GETRUSAGE = 4077
+ SYS_GETTIMEOFDAY = 4078
+ SYS_SETTIMEOFDAY = 4079
+ SYS_GETGROUPS = 4080
+ SYS_SETGROUPS = 4081
+ SYS_RESERVED82 = 4082
+ SYS_SYMLINK = 4083
+ SYS_UNUSED84 = 4084
+ SYS_READLINK = 4085
+ SYS_USELIB = 4086
+ SYS_SWAPON = 4087
+ SYS_REBOOT = 4088
+ SYS_READDIR = 4089
+ SYS_MMAP = 4090
+ SYS_MUNMAP = 4091
+ SYS_TRUNCATE = 4092
+ SYS_FTRUNCATE = 4093
+ SYS_FCHMOD = 4094
+ SYS_FCHOWN = 4095
+ SYS_GETPRIORITY = 4096
+ SYS_SETPRIORITY = 4097
+ SYS_PROFIL = 4098
+ SYS_STATFS = 4099
+ SYS_FSTATFS = 4100
+ SYS_IOPERM = 4101
+ SYS_SOCKETCALL = 4102
+ SYS_SYSLOG = 4103
+ SYS_SETITIMER = 4104
+ SYS_GETITIMER = 4105
+ SYS_STAT = 4106
+ SYS_LSTAT = 4107
+ SYS_FSTAT = 4108
+ SYS_UNUSED109 = 4109
+ SYS_IOPL = 4110
+ SYS_VHANGUP = 4111
+ SYS_IDLE = 4112
+ SYS_VM86 = 4113
+ SYS_WAIT4 = 4114
+ SYS_SWAPOFF = 4115
+ SYS_SYSINFO = 4116
+ SYS_IPC = 4117
+ SYS_FSYNC = 4118
+ SYS_SIGRETURN = 4119
+ SYS_CLONE = 4120
+ SYS_SETDOMAINNAME = 4121
+ SYS_UNAME = 4122
+ SYS_MODIFY_LDT = 4123
+ SYS_ADJTIMEX = 4124
+ SYS_MPROTECT = 4125
+ SYS_SIGPROCMASK = 4126
+ SYS_CREATE_MODULE = 4127
+ SYS_INIT_MODULE = 4128
+ SYS_DELETE_MODULE = 4129
+ SYS_GET_KERNEL_SYMS = 4130
+ SYS_QUOTACTL = 4131
+ SYS_GETPGID = 4132
+ SYS_FCHDIR = 4133
+ SYS_BDFLUSH = 4134
+ SYS_SYSFS = 4135
+ SYS_PERSONALITY = 4136
+ SYS_AFS_SYSCALL = 4137
+ SYS_SETFSUID = 4138
+ SYS_SETFSGID = 4139
+ SYS__LLSEEK = 4140
+ SYS_GETDENTS = 4141
+ SYS__NEWSELECT = 4142
+ SYS_FLOCK = 4143
+ SYS_MSYNC = 4144
+ SYS_READV = 4145
+ SYS_WRITEV = 4146
+ SYS_CACHEFLUSH = 4147
+ SYS_CACHECTL = 4148
+ SYS_SYSMIPS = 4149
+ SYS_UNUSED150 = 4150
+ SYS_GETSID = 4151
+ SYS_FDATASYNC = 4152
+ SYS__SYSCTL = 4153
+ SYS_MLOCK = 4154
+ SYS_MUNLOCK = 4155
+ SYS_MLOCKALL = 4156
+ SYS_MUNLOCKALL = 4157
+ SYS_SCHED_SETPARAM = 4158
+ SYS_SCHED_GETPARAM = 4159
+ SYS_SCHED_SETSCHEDULER = 4160
+ SYS_SCHED_GETSCHEDULER = 4161
+ SYS_SCHED_YIELD = 4162
+ SYS_SCHED_GET_PRIORITY_MAX = 4163
+ SYS_SCHED_GET_PRIORITY_MIN = 4164
+ SYS_SCHED_RR_GET_INTERVAL = 4165
+ SYS_NANOSLEEP = 4166
+ SYS_MREMAP = 4167
+ SYS_ACCEPT = 4168
+ SYS_BIND = 4169
+ SYS_CONNECT = 4170
+ SYS_GETPEERNAME = 4171
+ SYS_GETSOCKNAME = 4172
+ SYS_GETSOCKOPT = 4173
+ SYS_LISTEN = 4174
+ SYS_RECV = 4175
+ SYS_RECVFROM = 4176
+ SYS_RECVMSG = 4177
+ SYS_SEND = 4178
+ SYS_SENDMSG = 4179
+ SYS_SENDTO = 4180
+ SYS_SETSOCKOPT = 4181
+ SYS_SHUTDOWN = 4182
+ SYS_SOCKET = 4183
+ SYS_SOCKETPAIR = 4184
+ SYS_SETRESUID = 4185
+ SYS_GETRESUID = 4186
+ SYS_QUERY_MODULE = 4187
+ SYS_POLL = 4188
+ SYS_NFSSERVCTL = 4189
+ SYS_SETRESGID = 4190
+ SYS_GETRESGID = 4191
+ SYS_PRCTL = 4192
+ SYS_RT_SIGRETURN = 4193
+ SYS_RT_SIGACTION = 4194
+ SYS_RT_SIGPROCMASK = 4195
+ SYS_RT_SIGPENDING = 4196
+ SYS_RT_SIGTIMEDWAIT = 4197
+ SYS_RT_SIGQUEUEINFO = 4198
+ SYS_RT_SIGSUSPEND = 4199
+ SYS_PREAD64 = 4200
+ SYS_PWRITE64 = 4201
+ SYS_CHOWN = 4202
+ SYS_GETCWD = 4203
+ SYS_CAPGET = 4204
+ SYS_CAPSET = 4205
+ SYS_SIGALTSTACK = 4206
+ SYS_SENDFILE = 4207
+ SYS_GETPMSG = 4208
+ SYS_PUTPMSG = 4209
+ SYS_MMAP2 = 4210
+ SYS_TRUNCATE64 = 4211
+ SYS_FTRUNCATE64 = 4212
+ SYS_STAT64 = 4213
+ SYS_LSTAT64 = 4214
+ SYS_FSTAT64 = 4215
+ SYS_PIVOT_ROOT = 4216
+ SYS_MINCORE = 4217
+ SYS_MADVISE = 4218
+ SYS_GETDENTS64 = 4219
+ SYS_FCNTL64 = 4220
+ SYS_RESERVED221 = 4221
+ SYS_GETTID = 4222
+ SYS_READAHEAD = 4223
+ SYS_SETXATTR = 4224
+ SYS_LSETXATTR = 4225
+ SYS_FSETXATTR = 4226
+ SYS_GETXATTR = 4227
+ SYS_LGETXATTR = 4228
+ SYS_FGETXATTR = 4229
+ SYS_LISTXATTR = 4230
+ SYS_LLISTXATTR = 4231
+ SYS_FLISTXATTR = 4232
+ SYS_REMOVEXATTR = 4233
+ SYS_LREMOVEXATTR = 4234
+ SYS_FREMOVEXATTR = 4235
+ SYS_TKILL = 4236
+ SYS_SENDFILE64 = 4237
+ SYS_FUTEX = 4238
+ SYS_SCHED_SETAFFINITY = 4239
+ SYS_SCHED_GETAFFINITY = 4240
+ SYS_IO_SETUP = 4241
+ SYS_IO_DESTROY = 4242
+ SYS_IO_GETEVENTS = 4243
+ SYS_IO_SUBMIT = 4244
+ SYS_IO_CANCEL = 4245
+ SYS_EXIT_GROUP = 4246
+ SYS_LOOKUP_DCOOKIE = 4247
+ SYS_EPOLL_CREATE = 4248
+ SYS_EPOLL_CTL = 4249
+ SYS_EPOLL_WAIT = 4250
+ SYS_REMAP_FILE_PAGES = 4251
+ SYS_SET_TID_ADDRESS = 4252
+ SYS_RESTART_SYSCALL = 4253
+ SYS_FADVISE64 = 4254
+ SYS_STATFS64 = 4255
+ SYS_FSTATFS64 = 4256
+ SYS_TIMER_CREATE = 4257
+ SYS_TIMER_SETTIME = 4258
+ SYS_TIMER_GETTIME = 4259
+ SYS_TIMER_GETOVERRUN = 4260
+ SYS_TIMER_DELETE = 4261
+ SYS_CLOCK_SETTIME = 4262
+ SYS_CLOCK_GETTIME = 4263
+ SYS_CLOCK_GETRES = 4264
+ SYS_CLOCK_NANOSLEEP = 4265
+ SYS_TGKILL = 4266
+ SYS_UTIMES = 4267
+ SYS_MBIND = 4268
+ SYS_GET_MEMPOLICY = 4269
+ SYS_SET_MEMPOLICY = 4270
+ SYS_MQ_OPEN = 4271
+ SYS_MQ_UNLINK = 4272
+ SYS_MQ_TIMEDSEND = 4273
+ SYS_MQ_TIMEDRECEIVE = 4274
+ SYS_MQ_NOTIFY = 4275
+ SYS_MQ_GETSETATTR = 4276
+ SYS_VSERVER = 4277
+ SYS_WAITID = 4278
+ SYS_ADD_KEY = 4280
+ SYS_REQUEST_KEY = 4281
+ SYS_KEYCTL = 4282
+ SYS_SET_THREAD_AREA = 4283
+ SYS_INOTIFY_INIT = 4284
+ SYS_INOTIFY_ADD_WATCH = 4285
+ SYS_INOTIFY_RM_WATCH = 4286
+ SYS_MIGRATE_PAGES = 4287
+ SYS_OPENAT = 4288
+ SYS_MKDIRAT = 4289
+ SYS_MKNODAT = 4290
+ SYS_FCHOWNAT = 4291
+ SYS_FUTIMESAT = 4292
+ SYS_FSTATAT64 = 4293
+ SYS_UNLINKAT = 4294
+ SYS_RENAMEAT = 4295
+ SYS_LINKAT = 4296
+ SYS_SYMLINKAT = 4297
+ SYS_READLINKAT = 4298
+ SYS_FCHMODAT = 4299
+ SYS_FACCESSAT = 4300
+ SYS_PSELECT6 = 4301
+ SYS_PPOLL = 4302
+ SYS_UNSHARE = 4303
+ SYS_SPLICE = 4304
+ SYS_SYNC_FILE_RANGE = 4305
+ SYS_TEE = 4306
+ SYS_VMSPLICE = 4307
+ SYS_MOVE_PAGES = 4308
+ SYS_SET_ROBUST_LIST = 4309
+ SYS_GET_ROBUST_LIST = 4310
+ SYS_KEXEC_LOAD = 4311
+ SYS_GETCPU = 4312
+ SYS_EPOLL_PWAIT = 4313
+ SYS_IOPRIO_SET = 4314
+ SYS_IOPRIO_GET = 4315
+ SYS_UTIMENSAT = 4316
+ SYS_SIGNALFD = 4317
+ SYS_TIMERFD = 4318
+ SYS_EVENTFD = 4319
+ SYS_FALLOCATE = 4320
+ SYS_TIMERFD_CREATE = 4321
+ SYS_TIMERFD_GETTIME = 4322
+ SYS_TIMERFD_SETTIME = 4323
+ SYS_SIGNALFD4 = 4324
+ SYS_EVENTFD2 = 4325
+ SYS_EPOLL_CREATE1 = 4326
+ SYS_DUP3 = 4327
+ SYS_PIPE2 = 4328
+ SYS_INOTIFY_INIT1 = 4329
+ SYS_PREADV = 4330
+ SYS_PWRITEV = 4331
+ SYS_RT_TGSIGQUEUEINFO = 4332
+ SYS_PERF_EVENT_OPEN = 4333
+ SYS_ACCEPT4 = 4334
+ SYS_RECVMMSG = 4335
+ SYS_FANOTIFY_INIT = 4336
+ SYS_FANOTIFY_MARK = 4337
+ SYS_PRLIMIT64 = 4338
+ SYS_NAME_TO_HANDLE_AT = 4339
+ SYS_OPEN_BY_HANDLE_AT = 4340
+ SYS_CLOCK_ADJTIME = 4341
+ SYS_SYNCFS = 4342
+ SYS_SENDMMSG = 4343
+ SYS_SETNS = 4344
+ SYS_PROCESS_VM_READV = 4345
+ SYS_PROCESS_VM_WRITEV = 4346
+ SYS_LINUX_SYSCALLS = 4346
+ SYS_O32_LINUX_SYSCALLS = 4346
+ SYS_64_LINUX_SYSCALLS = 4305
+ SYS_N32_LINUX_SYSCALLS = 4310
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index f3ddf53..a363105 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -203,6 +203,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -326,6 +333,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index a923bef..0573e6c 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -205,6 +205,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 35f11bd..0578b53 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -207,6 +207,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]uint8
@@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index e786add..808e046 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -206,6 +206,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -331,6 +338,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
new file mode 100644
index 0000000..2eaff57
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -0,0 +1,642 @@
+// +build mips,linux
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go | go run mkpost.go
+
+package unix
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ PathMax = 0x1000
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int32
+ Nsec int32
+}
+
+type Timeval struct {
+ Sec int32
+ Usec int32
+}
+
+type Timex struct {
+ Modes uint32
+ Offset int32
+ Freq int32
+ Maxerror int32
+ Esterror int32
+ Status int32
+ Constant int32
+ Precision int32
+ Tolerance int32
+ Time Timeval
+ Tick int32
+ Ppsfreq int32
+ Jitter int32
+ Shift int32
+ Stabil int32
+ Jitcnt int32
+ Calcnt int32
+ Errcnt int32
+ Stbcnt int32
+ Tai int32
+ Pad_cgo_0 [44]byte
+}
+
+type Time_t int32
+
+type Tms struct {
+ Utime int32
+ Stime int32
+ Cutime int32
+ Cstime int32
+}
+
+type Utimbuf struct {
+ Actime int32
+ Modtime int32
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int32
+ Ixrss int32
+ Idrss int32
+ Isrss int32
+ Minflt int32
+ Majflt int32
+ Nswap int32
+ Inblock int32
+ Oublock int32
+ Msgsnd int32
+ Msgrcv int32
+ Nsignals int32
+ Nvcsw int32
+ Nivcsw int32
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type _Gid_t uint32
+
+type Stat_t struct {
+ Dev uint32
+ Pad1 [3]int32
+ Ino uint64
+ Mode uint32
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Pad2 [3]int32
+ Size int64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Blksize int32
+ Pad4 int32
+ Blocks int64
+ Pad5 [14]int32
+}
+
+type Statfs_t struct {
+ Type int32
+ Bsize int32
+ Frsize int32
+ Pad_cgo_0 [4]byte
+ Blocks uint64
+ Bfree uint64
+ Files uint64
+ Ffree uint64
+ Bavail uint64
+ Fsid Fsid
+ Namelen int32
+ Flags int32
+ Spare [5]int32
+ Pad_cgo_1 [4]byte
+}
+
+type Dirent struct {
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ Pad_cgo_0 [5]byte
+}
+
+type Fsid struct {
+ X__val [2]int32
+}
+
+type Flock_t struct {
+ Type int16
+ Whence int16
+ Pad_cgo_0 [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ Pad_cgo_1 [4]byte
+}
+
+const (
+ FADV_NORMAL = 0x0
+ FADV_RANDOM = 0x1
+ FADV_SEQUENTIAL = 0x2
+ FADV_WILLNEED = 0x3
+ FADV_DONTNEED = 0x4
+ FADV_NOREUSE = 0x5
+)
+
+type RawSockaddrInet4 struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]uint8
+}
+
+type RawSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+ Family uint16
+ Path [108]int8
+}
+
+type RawSockaddrLinklayer struct {
+ Family uint16
+ Protocol uint16
+ Ifindex int32
+ Hatype uint16
+ Pkttype uint8
+ Halen uint8
+ Addr [8]uint8
+}
+
+type RawSockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+}
+
+type RawSockaddrHCI struct {
+ Family uint16
+ Dev uint16
+ Channel uint16
+}
+
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
+type RawSockaddr struct {
+ Family uint16
+ Data [14]int8
+}
+
+type RawSockaddrAny struct {
+ Addr RawSockaddr
+ Pad [96]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+ Onoff int32
+ Linger int32
+}
+
+type Iovec struct {
+ Base *byte
+ Len uint32
+}
+
+type IPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type IPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type IPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type Msghdr struct {
+ Name *byte
+ Namelen uint32
+ Iov *Iovec
+ Iovlen uint32
+ Control *byte
+ Controllen uint32
+ Flags int32
+}
+
+type Cmsghdr struct {
+ Len uint32
+ Level int32
+ Type int32
+}
+
+type Inet4Pktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type Inet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+ Addr RawSockaddrInet6
+ Mtu uint32
+}
+
+type ICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type Ucred struct {
+ Pid int32
+ Uid uint32
+ Gid uint32
+}
+
+type TCPInfo struct {
+ State uint8
+ Ca_state uint8
+ Retransmits uint8
+ Probes uint8
+ Backoff uint8
+ Options uint8
+ Pad_cgo_0 [2]byte
+ Rto uint32
+ Ato uint32
+ Snd_mss uint32
+ Rcv_mss uint32
+ Unacked uint32
+ Sacked uint32
+ Lost uint32
+ Retrans uint32
+ Fackets uint32
+ Last_data_sent uint32
+ Last_ack_sent uint32
+ Last_data_recv uint32
+ Last_ack_recv uint32
+ Pmtu uint32
+ Rcv_ssthresh uint32
+ Rtt uint32
+ Rttvar uint32
+ Snd_ssthresh uint32
+ Snd_cwnd uint32
+ Advmss uint32
+ Reordering uint32
+ Rcv_rtt uint32
+ Rcv_space uint32
+ Total_retrans uint32
+}
+
+const (
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x70
+ SizeofSockaddrUnix = 0x6e
+ SizeofSockaddrLinklayer = 0x14
+ SizeofSockaddrNetlink = 0xc
+ SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
+ SizeofLinger = 0x8
+ SizeofIPMreq = 0x8
+ SizeofIPMreqn = 0xc
+ SizeofIPv6Mreq = 0x14
+ SizeofMsghdr = 0x1c
+ SizeofCmsghdr = 0xc
+ SizeofInet4Pktinfo = 0xc
+ SizeofInet6Pktinfo = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofICMPv6Filter = 0x20
+ SizeofUcred = 0xc
+ SizeofTCPInfo = 0x68
+)
+
+const (
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_MAX = 0x1d
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+)
+
+type NlMsghdr struct {
+ Len uint32
+ Type uint16
+ Flags uint16
+ Seq uint32
+ Pid uint32
+}
+
+type NlMsgerr struct {
+ Error int32
+ Msg NlMsghdr
+}
+
+type RtGenmsg struct {
+ Family uint8
+}
+
+type NlAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type RtAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type IfInfomsg struct {
+ Family uint8
+ X__ifi_pad uint8
+ Type uint16
+ Index int32
+ Flags uint32
+ Change uint32
+}
+
+type IfAddrmsg struct {
+ Family uint8
+ Prefixlen uint8
+ Flags uint8
+ Scope uint8
+ Index uint32
+}
+
+type RtMsg struct {
+ Family uint8
+ Dst_len uint8
+ Src_len uint8
+ Tos uint8
+ Table uint8
+ Protocol uint8
+ Scope uint8
+ Type uint8
+ Flags uint32
+}
+
+type RtNexthop struct {
+ Len uint16
+ Flags uint8
+ Hops uint8
+ Ifindex int32
+}
+
+const (
+ SizeofSockFilter = 0x8
+ SizeofSockFprog = 0x8
+)
+
+type SockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
+
+type SockFprog struct {
+ Len uint16
+ Pad_cgo_0 [2]byte
+ Filter *SockFilter
+}
+
+type InotifyEvent struct {
+ Wd int32
+ Mask uint32
+ Cookie uint32
+ Len uint32
+}
+
+const SizeofInotifyEvent = 0x10
+
+type PtraceRegs struct {
+ Regs [109]uint32
+ U_tsize uint32
+ U_dsize uint32
+ U_ssize uint32
+ Start_code uint32
+ Start_data uint32
+ Start_stack uint32
+ Signal int32
+ U_ar0 *byte
+ Magic uint32
+ U_comm [32]int8
+}
+
+type ptracePsw struct {
+}
+
+type ptraceFpregs struct {
+}
+
+type ptracePer struct {
+}
+
+type FdSet struct {
+ Bits [32]int32
+}
+
+type Sysinfo_t struct {
+ Uptime int32
+ Loads [3]uint32
+ Totalram uint32
+ Freeram uint32
+ Sharedram uint32
+ Bufferram uint32
+ Totalswap uint32
+ Freeswap uint32
+ Procs uint16
+ Pad uint16
+ Totalhigh uint32
+ Freehigh uint32
+ Unit uint32
+ X_f [8]int8
+}
+
+type Utsname struct {
+ Sysname [65]int8
+ Nodename [65]int8
+ Release [65]int8
+ Version [65]int8
+ Machine [65]int8
+ Domainname [65]int8
+}
+
+type Ustat_t struct {
+ Tfree int32
+ Tinode uint32
+ Fname [6]int8
+ Fpack [6]int8
+}
+
+type EpollEvent struct {
+ Events uint32
+ PadFd int32
+ Fd int32
+ Pad int32
+}
+
+const (
+ AT_FDCWD = -0x64
+ AT_REMOVEDIR = 0x200
+ AT_SYMLINK_FOLLOW = 0x400
+ AT_SYMLINK_NOFOLLOW = 0x100
+)
+
+type PollFd struct {
+ Fd int32
+ Events int16
+ Revents int16
+}
+
+const (
+ POLLIN = 0x1
+ POLLPRI = 0x2
+ POLLOUT = 0x4
+ POLLRDHUP = 0x2000
+ POLLERR = 0x8
+ POLLHUP = 0x10
+ POLLNVAL = 0x20
+)
+
+type Sigset_t struct {
+ X__val [32]uint32
+}
+
+const _SC_PAGESIZE = 0x1e
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Line uint8
+ Cc [23]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index b29894d..73e4b76 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -206,6 +206,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index d9af71b..479ca3e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -206,6 +206,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
new file mode 100644
index 0000000..7617a69
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -0,0 +1,642 @@
+// +build mipsle,linux
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go | go run mkpost.go
+
+package unix
+
+const (
+ sizeofPtr = 0x4
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x4
+ sizeofLongLong = 0x8
+ PathMax = 0x1000
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int32
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int32
+ Nsec int32
+}
+
+type Timeval struct {
+ Sec int32
+ Usec int32
+}
+
+type Timex struct {
+ Modes uint32
+ Offset int32
+ Freq int32
+ Maxerror int32
+ Esterror int32
+ Status int32
+ Constant int32
+ Precision int32
+ Tolerance int32
+ Time Timeval
+ Tick int32
+ Ppsfreq int32
+ Jitter int32
+ Shift int32
+ Stabil int32
+ Jitcnt int32
+ Calcnt int32
+ Errcnt int32
+ Stbcnt int32
+ Tai int32
+ Pad_cgo_0 [44]byte
+}
+
+type Time_t int32
+
+type Tms struct {
+ Utime int32
+ Stime int32
+ Cutime int32
+ Cstime int32
+}
+
+type Utimbuf struct {
+ Actime int32
+ Modtime int32
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int32
+ Ixrss int32
+ Idrss int32
+ Isrss int32
+ Minflt int32
+ Majflt int32
+ Nswap int32
+ Inblock int32
+ Oublock int32
+ Msgsnd int32
+ Msgrcv int32
+ Nsignals int32
+ Nvcsw int32
+ Nivcsw int32
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type _Gid_t uint32
+
+type Stat_t struct {
+ Dev uint32
+ Pad1 [3]int32
+ Ino uint64
+ Mode uint32
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Rdev uint32
+ Pad2 [3]int32
+ Size int64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Blksize int32
+ Pad4 int32
+ Blocks int64
+ Pad5 [14]int32
+}
+
+type Statfs_t struct {
+ Type int32
+ Bsize int32
+ Frsize int32
+ Pad_cgo_0 [4]byte
+ Blocks uint64
+ Bfree uint64
+ Files uint64
+ Ffree uint64
+ Bavail uint64
+ Fsid Fsid
+ Namelen int32
+ Flags int32
+ Spare [5]int32
+ Pad_cgo_1 [4]byte
+}
+
+type Dirent struct {
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]int8
+ Pad_cgo_0 [5]byte
+}
+
+type Fsid struct {
+ X__val [2]int32
+}
+
+type Flock_t struct {
+ Type int16
+ Whence int16
+ Pad_cgo_0 [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ Pad_cgo_1 [4]byte
+}
+
+const (
+ FADV_NORMAL = 0x0
+ FADV_RANDOM = 0x1
+ FADV_SEQUENTIAL = 0x2
+ FADV_WILLNEED = 0x3
+ FADV_DONTNEED = 0x4
+ FADV_NOREUSE = 0x5
+)
+
+type RawSockaddrInet4 struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]uint8
+}
+
+type RawSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+ Family uint16
+ Path [108]int8
+}
+
+type RawSockaddrLinklayer struct {
+ Family uint16
+ Protocol uint16
+ Ifindex int32
+ Hatype uint16
+ Pkttype uint8
+ Halen uint8
+ Addr [8]uint8
+}
+
+type RawSockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+}
+
+type RawSockaddrHCI struct {
+ Family uint16
+ Dev uint16
+ Channel uint16
+}
+
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
+type RawSockaddr struct {
+ Family uint16
+ Data [14]int8
+}
+
+type RawSockaddrAny struct {
+ Addr RawSockaddr
+ Pad [96]int8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+ Onoff int32
+ Linger int32
+}
+
+type Iovec struct {
+ Base *byte
+ Len uint32
+}
+
+type IPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type IPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type IPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type Msghdr struct {
+ Name *byte
+ Namelen uint32
+ Iov *Iovec
+ Iovlen uint32
+ Control *byte
+ Controllen uint32
+ Flags int32
+}
+
+type Cmsghdr struct {
+ Len uint32
+ Level int32
+ Type int32
+}
+
+type Inet4Pktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type Inet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+ Addr RawSockaddrInet6
+ Mtu uint32
+}
+
+type ICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type Ucred struct {
+ Pid int32
+ Uid uint32
+ Gid uint32
+}
+
+type TCPInfo struct {
+ State uint8
+ Ca_state uint8
+ Retransmits uint8
+ Probes uint8
+ Backoff uint8
+ Options uint8
+ Pad_cgo_0 [2]byte
+ Rto uint32
+ Ato uint32
+ Snd_mss uint32
+ Rcv_mss uint32
+ Unacked uint32
+ Sacked uint32
+ Lost uint32
+ Retrans uint32
+ Fackets uint32
+ Last_data_sent uint32
+ Last_ack_sent uint32
+ Last_data_recv uint32
+ Last_ack_recv uint32
+ Pmtu uint32
+ Rcv_ssthresh uint32
+ Rtt uint32
+ Rttvar uint32
+ Snd_ssthresh uint32
+ Snd_cwnd uint32
+ Advmss uint32
+ Reordering uint32
+ Rcv_rtt uint32
+ Rcv_space uint32
+ Total_retrans uint32
+}
+
+const (
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x70
+ SizeofSockaddrUnix = 0x6e
+ SizeofSockaddrLinklayer = 0x14
+ SizeofSockaddrNetlink = 0xc
+ SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
+ SizeofLinger = 0x8
+ SizeofIPMreq = 0x8
+ SizeofIPMreqn = 0xc
+ SizeofIPv6Mreq = 0x14
+ SizeofMsghdr = 0x1c
+ SizeofCmsghdr = 0xc
+ SizeofInet4Pktinfo = 0xc
+ SizeofInet6Pktinfo = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofICMPv6Filter = 0x20
+ SizeofUcred = 0xc
+ SizeofTCPInfo = 0x68
+)
+
+const (
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_MAX = 0x2a
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+)
+
+type NlMsghdr struct {
+ Len uint32
+ Type uint16
+ Flags uint16
+ Seq uint32
+ Pid uint32
+}
+
+type NlMsgerr struct {
+ Error int32
+ Msg NlMsghdr
+}
+
+type RtGenmsg struct {
+ Family uint8
+}
+
+type NlAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type RtAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type IfInfomsg struct {
+ Family uint8
+ X__ifi_pad uint8
+ Type uint16
+ Index int32
+ Flags uint32
+ Change uint32
+}
+
+type IfAddrmsg struct {
+ Family uint8
+ Prefixlen uint8
+ Flags uint8
+ Scope uint8
+ Index uint32
+}
+
+type RtMsg struct {
+ Family uint8
+ Dst_len uint8
+ Src_len uint8
+ Tos uint8
+ Table uint8
+ Protocol uint8
+ Scope uint8
+ Type uint8
+ Flags uint32
+}
+
+type RtNexthop struct {
+ Len uint16
+ Flags uint8
+ Hops uint8
+ Ifindex int32
+}
+
+const (
+ SizeofSockFilter = 0x8
+ SizeofSockFprog = 0x8
+)
+
+type SockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
+
+type SockFprog struct {
+ Len uint16
+ Pad_cgo_0 [2]byte
+ Filter *SockFilter
+}
+
+type InotifyEvent struct {
+ Wd int32
+ Mask uint32
+ Cookie uint32
+ Len uint32
+}
+
+const SizeofInotifyEvent = 0x10
+
+type PtraceRegs struct {
+ Regs [109]uint32
+ U_tsize uint32
+ U_dsize uint32
+ U_ssize uint32
+ Start_code uint32
+ Start_data uint32
+ Start_stack uint32
+ Signal int32
+ U_ar0 *byte
+ Magic uint32
+ U_comm [32]int8
+}
+
+type ptracePsw struct {
+}
+
+type ptraceFpregs struct {
+}
+
+type ptracePer struct {
+}
+
+type FdSet struct {
+ Bits [32]int32
+}
+
+type Sysinfo_t struct {
+ Uptime int32
+ Loads [3]uint32
+ Totalram uint32
+ Freeram uint32
+ Sharedram uint32
+ Bufferram uint32
+ Totalswap uint32
+ Freeswap uint32
+ Procs uint16
+ Pad uint16
+ Totalhigh uint32
+ Freehigh uint32
+ Unit uint32
+ X_f [8]int8
+}
+
+type Utsname struct {
+ Sysname [65]int8
+ Nodename [65]int8
+ Release [65]int8
+ Version [65]int8
+ Machine [65]int8
+ Domainname [65]int8
+}
+
+type Ustat_t struct {
+ Tfree int32
+ Tinode uint32
+ Fname [6]int8
+ Fpack [6]int8
+}
+
+type EpollEvent struct {
+ Events uint32
+ PadFd int32
+ Fd int32
+ Pad int32
+}
+
+const (
+ AT_FDCWD = -0x64
+ AT_REMOVEDIR = 0x200
+ AT_SYMLINK_FOLLOW = 0x400
+ AT_SYMLINK_NOFOLLOW = 0x100
+)
+
+type PollFd struct {
+ Fd int32
+ Events int16
+ Revents int16
+}
+
+const (
+ POLLIN = 0x1
+ POLLPRI = 0x2
+ POLLOUT = 0x4
+ POLLRDHUP = 0x2000
+ POLLERR = 0x8
+ POLLHUP = 0x10
+ POLLNVAL = 0x20
+)
+
+type Sigset_t struct {
+ X__val [32]uint32
+}
+
+const _SC_PAGESIZE = 0x1e
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Line uint8
+ Cc [23]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 4218170..2db548b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -207,6 +207,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]uint8
@@ -332,6 +339,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 7db4c78..4bfdcc0 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -207,6 +207,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]uint8
@@ -332,6 +339,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index 76ee57c..435cd79 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -206,6 +206,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -330,6 +337,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 7d18b70..439f969 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -211,6 +211,13 @@ type RawSockaddrHCI struct {
Channel uint16
}
+type RawSockaddrCAN struct {
+ Family uint16
+ Pad_cgo_0 [2]byte
+ Ifindex int32
+ Addr [8]byte
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
@@ -335,6 +342,7 @@ const (
SizeofSockaddrLinklayer = 0x14
SizeofSockaddrNetlink = 0xc
SizeofSockaddrHCI = 0x6
+ SizeofSockaddrCAN = 0x10
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPMreqn = 0xc
diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go
index 2202c9f..d7031b6 100644
--- a/vendor/golang.org/x/text/internal/gen/code.go
+++ b/vendor/golang.org/x/text/internal/gen/code.go
@@ -117,13 +117,7 @@ func (w *CodeWriter) WriteConst(name string, x interface{}) {
switch v.Type().Kind() {
case reflect.String:
- // See golang.org/issue/13145.
- const arbitraryCutoff = 16
- if v.Len() > arbitraryCutoff {
- w.printf("var %s %s = ", name, typeName(x))
- } else {
- w.printf("const %s %s = ", name, typeName(x))
- }
+ w.printf("const %s %s = ", name, typeName(x))
w.WriteString(v.String())
w.printf("\n")
default:
@@ -203,16 +197,27 @@ func (w *CodeWriter) WriteString(s string) {
// When starting on its own line, go fmt indents line 2+ an extra level.
n, max := maxWidth, maxWidth-4
+ // As per https://golang.org/issue/18078, the compiler has trouble
+ // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
+ // for large N. We insert redundant, explicit parentheses to work around
+ // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
+ // ... + s127) + etc + (etc + ... + sN).
+ explicitParens, extraComment := len(s) > 128*1024, ""
+ if explicitParens {
+ w.printf(`(`)
+ extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
+ }
+
// Print "" +\n, if a string does not start on its own line.
b := w.buf.Bytes()
if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
- w.printf("\"\" + // Size: %d bytes\n", len(s))
+ w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
n, max = maxWidth, maxWidth
}
w.printf(`"`)
- for sz, p := 0, 0; p < len(s); {
+ for sz, p, nLines := 0, 0, 0; p < len(s); {
var r rune
r, sz = utf8.DecodeRuneInString(s[p:])
out := s[p : p+sz]
@@ -229,6 +234,10 @@ func (w *CodeWriter) WriteString(s string) {
chars = len(out)
}
if n -= chars; n < 0 {
+ nLines++
+ if explicitParens && nLines&63 == 63 {
+ w.printf("\") + (\"")
+ }
w.printf("\" +\n\"")
n = max - len(out)
}
@@ -236,6 +245,9 @@ func (w *CodeWriter) WriteString(s string) {
p += sz
}
w.printf(`"`)
+ if explicitParens {
+ w.printf(`)`)
+ }
}
// WriteSlice writes a slice value.
diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go
index 695e365..dcd591f 100644
--- a/vendor/google.golang.org/api/gensupport/resumable.go
+++ b/vendor/google.golang.org/api/gensupport/resumable.go
@@ -5,6 +5,7 @@
package gensupport
import (
+ "errors"
"fmt"
"io"
"net/http"
@@ -15,10 +16,6 @@ import (
)
const (
- // statusResumeIncomplete is the code returned by the Google uploader
- // when the transfer is not yet complete.
- statusResumeIncomplete = 308
-
// statusTooManyRequests is returned by the storage API if the
// per-project limits have been temporarily exceeded. The request
// should be retried.
@@ -79,8 +76,23 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader,
req.Header.Set("Content-Range", contentRange)
req.Header.Set("Content-Type", rx.MediaType)
req.Header.Set("User-Agent", rx.UserAgent)
+
+ // Google's upload endpoint uses status code 308 for a
+ // different purpose than the "308 Permanent Redirect"
+ // since-standardized in RFC 7238. Because of the conflict in
+ // semantics, Google added this new request header which
+ // causes it to not use "308" and instead reply with 200 OK
+ // and sets the upload-specific "X-HTTP-Status-Code-Override:
+ // 308" response header.
+ req.Header.Set("X-GUploader-No-308", "yes")
+
return SendRequest(ctx, rx.Client, req)
+}
+func statusResumeIncomplete(resp *http.Response) bool {
+ // This is how the server signals "status resume incomplete"
+ // when X-GUploader-No-308 is set to "yes":
+ return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308"
}
// reportProgress calls a user-supplied callback to report upload progress.
@@ -111,11 +123,17 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e
return res, err
}
- if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK {
+ // We sent "X-GUploader-No-308: yes" (see comment elsewhere in
+ // this file), so we don't expect to get a 308.
+ if res.StatusCode == 308 {
+ return nil, errors.New("unexpected 308 response status code")
+ }
+
+ if res.StatusCode == http.StatusOK {
rx.reportProgress(off, off+int64(size))
}
- if res.StatusCode == statusResumeIncomplete {
+ if statusResumeIncomplete(res) {
rx.Media.Next()
}
return res, nil
@@ -177,7 +195,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
// If the chunk was uploaded successfully, but there's still
// more to go, upload the next chunk without any delay.
- if status == statusResumeIncomplete {
+ if statusResumeIncomplete(resp) {
pause = 0
backoff.Reset()
resp.Body.Close()
diff --git a/vendor/google.golang.org/api/gensupport/retry.go b/vendor/google.golang.org/api/gensupport/retry.go
index 7f83d1d..9023368 100644
--- a/vendor/google.golang.org/api/gensupport/retry.go
+++ b/vendor/google.golang.org/api/gensupport/retry.go
@@ -55,23 +55,17 @@ func DefaultBackoffStrategy() BackoffStrategy {
// shouldRetry returns true if the HTTP response / error indicates that the
// request should be attempted again.
func shouldRetry(status int, err error) bool {
- // Retry for 5xx response codes.
- if 500 <= status && status < 600 {
+ if 500 <= status && status <= 599 {
return true
}
-
- // Retry on statusTooManyRequests{
if status == statusTooManyRequests {
return true
}
-
- // Retry on unexpected EOFs and temporary network errors.
if err == io.ErrUnexpectedEOF {
return true
}
if err, ok := err.(net.Error); ok {
return err.Temporary()
}
-
return false
}
diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go
new file mode 100644
index 0000000..eca1ea2
--- /dev/null
+++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go
@@ -0,0 +1,38 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package transport contains HTTP transports used to make
+// authenticated API requests.
+package transport
+
+import (
+ "errors"
+ "net/http"
+)
+
+// APIKey is an HTTP Transport which wraps an underlying transport and
+// appends an API Key "key" parameter to the URL of outgoing requests.
+type APIKey struct {
+ // Key is the API Key to set on requests.
+ Key string
+
+ // Transport is the underlying HTTP transport.
+ // If nil, http.DefaultTransport is used.
+ Transport http.RoundTripper
+}
+
+func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
+ rt := t.Transport
+ if rt == nil {
+ rt = http.DefaultTransport
+ if rt == nil {
+ return nil, errors.New("googleapi/transport: no Transport specified or available")
+ }
+ }
+ newReq := *req
+ args := newReq.URL.Query()
+ args.Set("key", t.Key)
+ newReq.URL.RawQuery = args.Encode()
+ return rt.RoundTrip(&newReq)
+}
diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go
index a02b4b0..c8fdd54 100644
--- a/vendor/google.golang.org/api/googleapi/types.go
+++ b/vendor/google.golang.org/api/googleapi/types.go
@@ -6,6 +6,7 @@ package googleapi
import (
"encoding/json"
+ "errors"
"strconv"
)
@@ -149,6 +150,25 @@ func (s Float64s) MarshalJSON() ([]byte, error) {
})
}
+// RawMessage is a raw encoded JSON value.
+// It is identical to json.RawMessage, except it does not suffer from
+// https://golang.org/issue/14493.
+type RawMessage []byte
+
+// MarshalJSON returns m.
+func (m RawMessage) MarshalJSON() ([]byte, error) {
+ return m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[:0], data...)
+ return nil
+}
+
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
index 976280b..6e60e48 100644
--- a/vendor/google.golang.org/api/internal/settings.go
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -16,6 +16,7 @@ type DialSettings struct {
ServiceAccountJSONFilename string // if set, TokenSource is ignored.
TokenSource oauth2.TokenSource
UserAgent string
+ APIKey string
HTTPClient *http.Client
GRPCDialOpts []grpc.DialOption
GRPCConn *grpc.ClientConn
diff --git a/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json b/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json
index fb63f08..f1f8beb 100644
--- a/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json
+++ b/vendor/google.golang.org/api/oauth2/v2/oauth2-api.json
@@ -1,18 +1,18 @@
{
"kind": "discovery#restDescription",
- "etag": "\"jQLIOHBVnDZie4rQHGH1WJF-INE/VY5CxwtmcPmH-ruiSL2amW9TN0Q\"",
+ "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/DLGEtypjIuIXh77Iqrmfcan50ew\"",
"discoveryVersion": "v1",
"id": "oauth2:v2",
"name": "oauth2",
"version": "v2",
- "revision": "20160330",
+ "revision": "20161103",
"title": "Google OAuth2 API",
"description": "Obtains end-user authorization grants for use with other Google APIs.",
"ownerDomain": "google.com",
"ownerName": "Google",
"icons": {
- "x16": "http://www.google.com/images/icons/product/search-16.gif",
- "x32": "http://www.google.com/images/icons/product/search-32.gif"
+ "x16": "https://www.gstatic.com/images/branding/product/1x/googleg_16dp.png",
+ "x32": "https://www.gstatic.com/images/branding/product/1x/googleg_32dp.png"
},
"documentationLink": "https://developers.google.com/accounts/docs/OAuth2",
"protocol": "rest",
diff --git a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go b/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go
index b1a7694..7440468 100644
--- a/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go
+++ b/vendor/google.golang.org/api/oauth2/v2/oauth2-gen.go
@@ -317,6 +317,7 @@ type GetCertForOpenIdConnectCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// GetCertForOpenIdConnect:
@@ -351,8 +352,20 @@ func (c *GetCertForOpenIdConnectCall) Context(ctx context.Context) *GetCertForOp
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *GetCertForOpenIdConnectCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *GetCertForOpenIdConnectCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -420,6 +433,7 @@ type TokeninfoCall struct {
s *Service
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Tokeninfo:
@@ -462,8 +476,20 @@ func (c *TokeninfoCall) Context(ctx context.Context) *TokeninfoCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *TokeninfoCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *TokeninfoCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
@@ -543,6 +569,7 @@ type UserinfoGetCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// Get:
@@ -577,8 +604,20 @@ func (c *UserinfoGetCall) Context(ctx context.Context) *UserinfoGetCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *UserinfoGetCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *UserinfoGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -653,6 +692,7 @@ type UserinfoV2MeGetCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// Get:
@@ -687,8 +727,20 @@ func (c *UserinfoV2MeGetCall) Context(ctx context.Context) *UserinfoV2MeGetCall
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *UserinfoV2MeGetCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *UserinfoV2MeGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go
index b935e6d..f266919 100644
--- a/vendor/google.golang.org/api/option/option.go
+++ b/vendor/google.golang.org/api/option/option.go
@@ -130,3 +130,13 @@ func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o))
o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer))
}
+
+// WithAPIKey returns a ClientOption that specifies an API key to be used
+// as the basis for authentication.
+func WithAPIKey(apiKey string) ClientOption {
+ return withAPIKey(apiKey)
+}
+
+type withAPIKey string
+
+func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) }
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
index 598d1a5..67fe1a2 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -1,11 +1,11 @@
{
"kind": "discovery#restDescription",
- "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/G3kZz5Dv92Y-2NZwaNrcr5jwm4A\"",
+ "etag": "\"tbys6C40o18GZwyMen5GMkdK-3s/sMgjc4eoIFjgub4daTU-MGW0WMA\"",
"discoveryVersion": "v1",
"id": "storage:v1",
"name": "storage",
"version": "v1",
- "revision": "20161019",
+ "revision": "20161109",
"title": "Cloud Storage JSON API",
"description": "Stores and retrieves potentially large, immutable data objects.",
"ownerDomain": "google.com",
@@ -685,6 +685,11 @@
"description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
"format": "date-time"
},
+ "timeStorageClassUpdated": {
+ "type": "string",
+ "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.",
+ "format": "date-time"
+ },
"updated": {
"type": "string",
"description": "The modification time of the object metadata in RFC 3339 format.",
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index 4c458af..0f1d094 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -1034,6 +1034,11 @@ type Object struct {
// deleted.
TimeDeleted string `json:"timeDeleted,omitempty"`
+ // TimeStorageClassUpdated: The time at which the object's storage class
+ // was last changed. When the object is initially created, it will be
+ // set to timeCreated.
+ TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"`
+
// Updated: The modification time of the object metadata in RFC 3339
// format.
Updated string `json:"updated,omitempty"`
@@ -1390,6 +1395,7 @@ type BucketAccessControlsDeleteCall struct {
entity string
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Delete: Permanently deletes the ACL entry for the specified entity on
@@ -1417,8 +1423,20 @@ func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAcc
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketAccessControlsDeleteCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
@@ -1485,6 +1503,7 @@ type BucketAccessControlsGetCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// Get: Returns the ACL entry for the specified entity on the specified
@@ -1522,8 +1541,20 @@ func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccess
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketAccessControlsGetCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -1620,6 +1651,7 @@ type BucketAccessControlsInsertCall struct {
bucketaccesscontrol *BucketAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Insert: Creates a new ACL entry on the specified bucket.
@@ -1646,8 +1678,20 @@ func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAcc
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketAccessControlsInsertCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
@@ -1741,6 +1785,7 @@ type BucketAccessControlsListCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// List: Retrieves ACL entries on the specified bucket.
@@ -1776,8 +1821,20 @@ func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAcces
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketAccessControlsListCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -1867,6 +1924,7 @@ type BucketAccessControlsPatchCall struct {
bucketaccesscontrol *BucketAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Patch: Updates an ACL entry on the specified bucket. This method
@@ -1895,8 +1953,20 @@ func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAcce
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketAccessControlsPatchCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
@@ -1999,6 +2069,7 @@ type BucketAccessControlsUpdateCall struct {
bucketaccesscontrol *BucketAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Update: Updates an ACL entry on the specified bucket.
@@ -2026,8 +2097,20 @@ func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAcc
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketAccessControlsUpdateCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
@@ -2128,6 +2211,7 @@ type BucketsDeleteCall struct {
bucket string
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Delete: Permanently deletes an empty bucket.
@@ -2169,8 +2253,20 @@ func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketsDeleteCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
@@ -2241,6 +2337,7 @@ type BucketsGetCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// Get: Returns metadata for the specified bucket.
@@ -2305,8 +2402,20 @@ func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketsGetCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -2422,6 +2531,7 @@ type BucketsInsertCall struct {
bucket *Bucket
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Insert: Creates a new bucket.
@@ -2500,8 +2610,20 @@ func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketsInsertCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket)
@@ -2645,6 +2767,7 @@ type BucketsListCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// List: Retrieves a list of buckets for a given project.
@@ -2713,8 +2836,20 @@ func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketsListCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -2854,6 +2989,7 @@ type BucketsPatchCall struct {
bucket2 *Bucket
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Patch: Updates a bucket. Changes to the bucket will be readable
@@ -2950,8 +3086,20 @@ func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketsPatchCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
@@ -3110,6 +3258,7 @@ type BucketsUpdateCall struct {
bucket2 *Bucket
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Update: Updates a bucket. Changes to the bucket will be readable
@@ -3206,8 +3355,20 @@ func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *BucketsUpdateCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
@@ -3365,6 +3526,7 @@ type ChannelsStopCall struct {
channel *Channel
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Stop: Stop watching resources through this channel
@@ -3390,8 +3552,20 @@ func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ChannelsStopCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
@@ -3447,6 +3621,7 @@ type DefaultObjectAccessControlsDeleteCall struct {
entity string
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Delete: Permanently deletes the default object ACL entry for the
@@ -3474,8 +3649,20 @@ func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *De
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
@@ -3542,6 +3729,7 @@ type DefaultObjectAccessControlsGetCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// Get: Returns the default object ACL entry for the specified entity on
@@ -3579,8 +3767,20 @@ func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *Defau
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *DefaultObjectAccessControlsGetCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -3677,6 +3877,7 @@ type DefaultObjectAccessControlsInsertCall struct {
objectaccesscontrol *ObjectAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Insert: Creates a new default object ACL entry on the specified
@@ -3704,8 +3905,20 @@ func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *De
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
@@ -3799,6 +4012,7 @@ type DefaultObjectAccessControlsListCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// List: Retrieves default object ACL entries on the specified bucket.
@@ -3851,8 +4065,20 @@ func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *Defa
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *DefaultObjectAccessControlsListCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -3954,6 +4180,7 @@ type DefaultObjectAccessControlsPatchCall struct {
objectaccesscontrol *ObjectAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Patch: Updates a default object ACL entry on the specified bucket.
@@ -3982,8 +4209,20 @@ func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *Def
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
@@ -4086,6 +4325,7 @@ type DefaultObjectAccessControlsUpdateCall struct {
objectaccesscontrol *ObjectAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Update: Updates a default object ACL entry on the specified bucket.
@@ -4113,8 +4353,20 @@ func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *De
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
@@ -4217,6 +4469,7 @@ type ObjectAccessControlsDeleteCall struct {
entity string
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Delete: Permanently deletes the ACL entry for the specified entity on
@@ -4253,8 +4506,20 @@ func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAcc
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectAccessControlsDeleteCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
@@ -4336,6 +4601,7 @@ type ObjectAccessControlsGetCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// Get: Returns the ACL entry for the specified entity on the specified
@@ -4382,8 +4648,20 @@ func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccess
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectAccessControlsGetCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -4495,6 +4773,7 @@ type ObjectAccessControlsInsertCall struct {
objectaccesscontrol *ObjectAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Insert: Creates a new ACL entry on the specified object.
@@ -4530,8 +4809,20 @@ func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAcc
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectAccessControlsInsertCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
@@ -4640,6 +4931,7 @@ type ObjectAccessControlsListCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// List: Retrieves ACL entries on the specified object.
@@ -4684,8 +4976,20 @@ func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAcces
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectAccessControlsListCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -4790,6 +5094,7 @@ type ObjectAccessControlsPatchCall struct {
objectaccesscontrol *ObjectAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Patch: Updates an ACL entry on the specified object. This method
@@ -4827,8 +5132,20 @@ func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAcce
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectAccessControlsPatchCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
@@ -4946,6 +5263,7 @@ type ObjectAccessControlsUpdateCall struct {
objectaccesscontrol *ObjectAccessControl
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Update: Updates an ACL entry on the specified object.
@@ -4982,8 +5300,20 @@ func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAcc
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectAccessControlsUpdateCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
@@ -5100,6 +5430,7 @@ type ObjectsComposeCall struct {
composerequest *ComposeRequest
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Compose: Concatenates a list of existing objects into a new object in
@@ -5165,8 +5496,20 @@ func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsComposeCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest)
@@ -5323,6 +5666,7 @@ type ObjectsCopyCall struct {
object *Object
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Copy: Copies a source object to a destination object. Optionally
@@ -5464,8 +5808,20 @@ func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsCopyCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
@@ -5690,6 +6046,7 @@ type ObjectsDeleteCall struct {
object string
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Delete: Deletes an object and its metadata. Deletions are permanent
@@ -5759,8 +6116,20 @@ func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsDeleteCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
c.urlParams_.Set("alt", alt)
@@ -5858,6 +6227,7 @@ type ObjectsGetCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// Get: Retrieves an object or its metadata.
@@ -5946,8 +6316,20 @@ func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsGetCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -6113,6 +6495,7 @@ type ObjectsInsertCall struct {
mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_.
progressUpdater_ googleapi.ProgressUpdater
ctx_ context.Context
+ header_ http.Header
}
// Insert: Stores a new object and metadata.
@@ -6275,8 +6658,20 @@ func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsInsertCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
@@ -6505,6 +6900,7 @@ type ObjectsListCall struct {
urlParams_ gensupport.URLParams
ifNoneMatch_ string
ctx_ context.Context
+ header_ http.Header
}
// List: Retrieves a list of objects matching the criteria.
@@ -6594,8 +6990,20 @@ func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsListCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
if c.ifNoneMatch_ != "" {
reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
@@ -6750,6 +7158,7 @@ type ObjectsPatchCall struct {
object2 *Object
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Patch: Updates an object's metadata. This method supports patch
@@ -6850,8 +7259,20 @@ func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsPatchCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
@@ -7020,6 +7441,7 @@ type ObjectsRewriteCall struct {
object *Object
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Rewrite: Rewrites a source object to a destination object. Optionally
@@ -7186,8 +7608,20 @@ func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsRewriteCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
@@ -7406,6 +7840,7 @@ type ObjectsUpdateCall struct {
object2 *Object
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// Update: Updates an object's metadata.
@@ -7505,8 +7940,20 @@ func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall {
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsUpdateCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
@@ -7690,6 +8137,7 @@ type ObjectsWatchAllCall struct {
channel *Channel
urlParams_ gensupport.URLParams
ctx_ context.Context
+ header_ http.Header
}
// WatchAll: Watch for changes on all objects in a bucket.
@@ -7770,8 +8218,20 @@ func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall
return c
}
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *ObjectsWatchAllCall) Header() http.Header {
+ if c.header_ == nil {
+ c.header_ = make(http.Header)
+ }
+ return c.header_
+}
+
func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
+ for k, v := range c.header_ {
+ reqHeaders[k] = v
+ }
reqHeaders.Set("User-Agent", c.s.userAgent())
var body io.Reader = nil
body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
diff --git a/vendor/google.golang.org/api/transport/dial.go b/vendor/google.golang.org/api/transport/dial.go
index c054460..9971eb8 100644
--- a/vendor/google.golang.org/api/transport/dial.go
+++ b/vendor/google.golang.org/api/transport/dial.go
@@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/oauth"
+ gtransport "google.golang.org/api/googleapi/transport"
"google.golang.org/api/internal"
"google.golang.org/api/option"
)
@@ -49,6 +50,15 @@ func NewHTTPClient(ctx context.Context, opts ...option.ClientOption) (*http.Clie
if o.HTTPClient != nil {
return o.HTTPClient, o.Endpoint, nil
}
+ if o.APIKey != "" {
+ hc := &http.Client{
+ Transport: &gtransport.APIKey{
+ Key: o.APIKey,
+ Transport: http.DefaultTransport,
+ },
+ }
+ return hc, o.Endpoint, nil
+ }
if o.ServiceAccountJSONFilename != "" {
ts, err := serviceAcctTokenSource(ctx, o.ServiceAccountJSONFilename, o.Scopes...)
if err != nil {
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
index 09562c4..aba0f83 100644
--- a/vendor/google.golang.org/appengine/internal/api.go
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -576,6 +576,9 @@ var logLevelName = map[int64]string{
}
func logf(c *context, level int64, format string, args ...interface{}) {
+ if c == nil {
+ panic("not an App Engine context")
+ }
s := fmt.Sprintf(format, args...)
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
c.addLogLine(&logpb.UserAppLogLine{
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 110a8cf..39120c2 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -18,6 +18,22 @@ Prerequisites
This requires Go 1.5 or later.
+A note on the version used: significant performance improvements in benchmarks
+of grpc-go have been seen by upgrading the go version from 1.5 to the latest
+1.7.1.
+
+From https://golang.org/doc/install, one way to install the latest version of go is:
+```
+$ GO_VERSION=1.7.1
+$ OS=linux
+$ ARCH=amd64
+$ curl -O https://storage.googleapis.com/golang/go${GO_VERSION}.${OS}-${ARCH}.tar.gz
+$ sudo tar -C /usr/local -xzf go$GO_VERSION.$OS-$ARCH.tar.gz
+$ # Put go on the PATH, keep the usual installation dir
+$ sudo ln -s /usr/local/go/bin/go /usr/bin/go
+$ rm go$GO_VERSION.$OS-$ARCH.tar.gz
+```
+
Constraints
-----------
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
@@ -30,3 +46,12 @@ Status
------
GA
+FAQ
+---
+
+#### Compiling error, undefined: grpc.SupportPackageIsVersion
+
+Please update proto package, gRPC package and rebuild the proto files:
+ - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
+ - `go get -u google.golang.org/grpc`
+ - `protoc --go_out=plugins=grpc:. *.proto`
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
index 772c817..4d8023d 100644
--- a/vendor/google.golang.org/grpc/call.go
+++ b/vendor/google.golang.org/grpc/call.go
@@ -42,6 +42,7 @@ import (
"golang.org/x/net/context"
"golang.org/x/net/trace"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/stats"
"google.golang.org/grpc/transport"
)
@@ -49,7 +50,8 @@ import (
// On error, it returns the error and indicates whether the call should be retried.
//
// TODO(zhaoq): Check whether the received message sequence is valid.
-func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
+// TODO ctx is used for stats collection and processing. It is the context passed from the application.
+func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
// Try to acquire header metadata from the server if there is any.
defer func() {
if err != nil {
@@ -63,14 +65,25 @@ func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, s
return
}
p := &parser{r: stream}
+ var inPayload *stats.InPayload
+ if stats.On() {
+ inPayload = &stats.InPayload{
+ Client: true,
+ }
+ }
for {
- if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32); err != nil {
+ if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32, inPayload); err != nil {
if err == io.EOF {
break
}
return
}
}
+ if inPayload != nil && err == io.EOF && stream.StatusCode() == codes.OK {
+ // TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
+ // Fix the order if necessary.
+ stats.HandleRPC(ctx, inPayload)
+ }
c.trailerMD = stream.Trailer()
return nil
}
@@ -89,15 +102,27 @@ func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHd
}
}
}()
- var cbuf *bytes.Buffer
+ var (
+ cbuf *bytes.Buffer
+ outPayload *stats.OutPayload
+ )
if compressor != nil {
cbuf = new(bytes.Buffer)
}
- outBuf, err := encode(codec, args, compressor, cbuf)
+ if stats.On() {
+ outPayload = &stats.OutPayload{
+ Client: true,
+ }
+ }
+ outBuf, err := encode(codec, args, compressor, cbuf, outPayload)
if err != nil {
return nil, Errorf(codes.Internal, "grpc: %v", err)
}
err = t.Write(stream, outBuf, opts)
+ if err == nil && outPayload != nil {
+ outPayload.SentTime = time.Now()
+ stats.HandleRPC(ctx, outPayload)
+ }
// t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
// does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
// recvResponse to get the final status.
@@ -118,8 +143,16 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
return invoke(ctx, method, args, reply, cc, opts...)
}
-func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
+func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
c := defaultCallInfo
+ if mc, ok := cc.getMethodConfig(method); ok {
+ c.failFast = !mc.WaitForReady
+ if mc.Timeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
+ defer cancel()
+ }
+ }
for _, o := range opts {
if err := o.before(&c); err != nil {
return toRPCErr(err)
@@ -140,12 +173,31 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
defer func() {
- if err != nil {
- c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ if e != nil {
+ c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
c.traceInfo.tr.SetError()
}
}()
}
+ if stats.On() {
+ ctx = stats.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
+ begin := &stats.Begin{
+ Client: true,
+ BeginTime: time.Now(),
+ FailFast: c.failFast,
+ }
+ stats.HandleRPC(ctx, begin)
+ }
+ defer func() {
+ if stats.On() {
+ end := &stats.End{
+ Client: true,
+ EndTime: time.Now(),
+ Error: e,
+ }
+ stats.HandleRPC(ctx, end)
+ }
+ }()
topts := &transport.Options{
Last: true,
Delay: false,
@@ -167,6 +219,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
if cc.dopts.cp != nil {
callHdr.SendCompress = cc.dopts.cp.Type()
}
+
gopts := BalancerGetOptions{
BlockingWait: !c.failFast,
}
@@ -205,7 +258,7 @@ func invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
}
return toRPCErr(err)
}
- err = recvResponse(cc.dopts, t, &c, stream, reply)
+ err = recvResponse(ctx, cc.dopts, t, &c, stream, reply)
if err != nil {
if put != nil {
put()
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 6167472..aa6b63d 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -54,6 +54,8 @@ var (
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
// underlying connections within the specified timeout.
+ // DEPRECATED: Please use context.DeadlineExceeded instead. This error will be
+ // removed in Q1 2017.
ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// errNoTransportSecurity indicates that there is no transport security
@@ -93,6 +95,7 @@ type dialOptions struct {
block bool
insecure bool
timeout time.Duration
+ scChan <-chan ServiceConfig
copts transport.ConnectOptions
}
@@ -129,6 +132,13 @@ func WithBalancer(b Balancer) DialOption {
}
}
+// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
+func WithServiceConfig(c <-chan ServiceConfig) DialOption {
+ return func(o *dialOptions) {
+ o.scChan = c
+ }
+}
+
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
// when backing off after failed connection attempts.
func WithBackoffMaxDelay(md time.Duration) DialOption {
@@ -199,6 +209,8 @@ func WithTimeout(d time.Duration) DialOption {
}
// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
+// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
+// Temporary() method to decide if it should try to reconnect to the network address.
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
return func(o *dialOptions) {
o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) {
@@ -210,6 +222,17 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
}
}
+// FailOnNonTempDialError returns a DialOption that specified if gRPC fails on non-temporary dial errors.
+// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
+// address and won't try to reconnect.
+// The default value of FailOnNonTempDialError is false.
+// This is an EXPERIMENTAL API.
+func FailOnNonTempDialError(f bool) DialOption {
+ return func(o *dialOptions) {
+ o.copts.FailOnNonTempDialError = f
+ }
+}
+
// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
func WithUserAgent(s string) DialOption {
return func(o *dialOptions) {
@@ -247,6 +270,15 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
conns: make(map[Address]*addrConn),
}
cc.ctx, cc.cancel = context.WithCancel(context.Background())
+ for _, opt := range opts {
+ opt(&cc.dopts)
+ }
+ if cc.dopts.timeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
+ defer cancel()
+ }
+
defer func() {
select {
case <-ctx.Done():
@@ -259,10 +291,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
- for _, opt := range opts {
- opt(&cc.dopts)
+ if cc.dopts.scChan != nil {
+ // Wait for the initial service config.
+ select {
+ case sc, ok := <-cc.dopts.scChan:
+ if ok {
+ cc.sc = sc
+ }
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
}
-
// Set defaults.
if cc.dopts.codec == nil {
cc.dopts.codec = protoCodec{}
@@ -284,6 +323,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
waitC := make(chan error, 1)
go func() {
var addrs []Address
+ if cc.dopts.balancer == nil && cc.sc.LB != nil {
+ cc.dopts.balancer = cc.sc.LB
+ }
if cc.dopts.balancer == nil {
// Connect to target directly if balancer is nil.
addrs = append(addrs, Address{Addr: target})
@@ -319,10 +361,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
close(waitC)
}()
- var timeoutCh <-chan time.Time
- if cc.dopts.timeout > 0 {
- timeoutCh = time.After(cc.dopts.timeout)
- }
select {
case <-ctx.Done():
return nil, ctx.Err()
@@ -330,14 +368,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if err != nil {
return nil, err
}
- case <-timeoutCh:
- return nil, ErrClientConnTimeout
}
+
// If balancer is nil or balancer.Notify() is nil, ok will be false here.
// The lbWatcher goroutine will not be created.
if ok {
go cc.lbWatcher()
}
+
+ if cc.dopts.scChan != nil {
+ go cc.scWatcher()
+ }
return cc, nil
}
@@ -384,6 +425,7 @@ type ClientConn struct {
dopts dialOptions
mu sync.RWMutex
+ sc ServiceConfig
conns map[Address]*addrConn
}
@@ -422,6 +464,24 @@ func (cc *ClientConn) lbWatcher() {
}
}
+func (cc *ClientConn) scWatcher() {
+ for {
+ select {
+ case sc, ok := <-cc.dopts.scChan:
+ if !ok {
+ return
+ }
+ cc.mu.Lock()
+ // TODO: load balance policy runtime change is ignored.
+ // We may revist this decision in the future.
+ cc.sc = sc
+ cc.mu.Unlock()
+ case <-cc.ctx.Done():
+ return
+ }
+ }
+}
+
// resetAddrConn creates an addrConn for addr and adds it to cc.conns.
// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason.
// If tearDownErr is nil, errConnDrain will be used instead.
@@ -509,6 +569,14 @@ func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr err
return nil
}
+// TODO: Avoid the locking here.
+func (cc *ClientConn) getMethodConfig(method string) (m MethodConfig, ok bool) {
+ cc.mu.RLock()
+ defer cc.mu.RUnlock()
+ m, ok = cc.sc.Methods[method]
+ return
+}
+
func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
var (
ac *addrConn
diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
index b009488..4cdc6ba 100755
--- a/vendor/google.golang.org/grpc/codegen.sh
+++ b/vendor/google.golang.org/grpc/codegen.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# This script serves as an example to demonstrate how to generate the gRPC-Go
# interface and the related messages from .proto file.
diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh
index 1202353..b85f918 100755
--- a/vendor/google.golang.org/grpc/coverage.sh
+++ b/vendor/google.golang.org/grpc/coverage.sh
@@ -1,4 +1,5 @@
-#!/bin/bash
+#!/usr/bin/env bash
+
set -e
diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
index 8e68c4d..25393cc 100644
--- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
+++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
@@ -61,7 +61,7 @@ func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (ma
}, nil
}
-// RequireTransportSecurity indicates whether the credentails requires transport security.
+// RequireTransportSecurity indicates whether the credentials requires transport security.
func (ts TokenSource) RequireTransportSecurity() bool {
return true
}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index 3c0ca7a..65dc5af 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -141,6 +141,8 @@ func NewContext(ctx context.Context, md MD) context.Context {
}
// FromContext returns the MD in ctx if it exists.
+// The returned md should be immutable, writing to it may cause races.
+// Modification should be made to the copies of the returned md.
func FromContext(ctx context.Context) (md MD, ok bool) {
md, ok = ctx.Value(mdKey{}).(MD)
return
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 6b60095..2619d39 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -42,11 +42,13 @@ import (
"io/ioutil"
"math"
"os"
+ "time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/stats"
"google.golang.org/grpc/transport"
)
@@ -255,9 +257,11 @@ func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err erro
// encode serializes msg and prepends the message header. If msg is nil, it
// generates the message header of 0 message length.
-func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) {
- var b []byte
- var length uint
+func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer, outPayload *stats.OutPayload) ([]byte, error) {
+ var (
+ b []byte
+ length uint
+ )
if msg != nil {
var err error
// TODO(zhaoq): optimize to reduce memory alloc and copying.
@@ -265,6 +269,12 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte
if err != nil {
return nil, err
}
+ if outPayload != nil {
+ outPayload.Payload = msg
+ // TODO truncate large payload.
+ outPayload.Data = b
+ outPayload.Length = len(b)
+ }
if cp != nil {
if err := cp.Do(cbuf, b); err != nil {
return nil, err
@@ -295,6 +305,10 @@ func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte
// Copy encoded msg to buf
copy(buf[5:], b)
+ if outPayload != nil {
+ outPayload.WireLength = len(buf)
+ }
+
return buf, nil
}
@@ -311,11 +325,14 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) er
return nil
}
-func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int) error {
+func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int, inPayload *stats.InPayload) error {
pf, d, err := p.recvMsg(maxMsgSize)
if err != nil {
return err
}
+ if inPayload != nil {
+ inPayload.WireLength = len(d)
+ }
if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
return err
}
@@ -333,6 +350,13 @@ func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{
if err := c.Unmarshal(d, m); err != nil {
return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
}
+ if inPayload != nil {
+ inPayload.RecvTime = time.Now()
+ inPayload.Payload = m
+ // TODO truncate large payload.
+ inPayload.Data = d
+ inPayload.Length = len(d)
+ }
return nil
}
@@ -448,10 +472,48 @@ func convertCode(err error) codes.Code {
return codes.Unknown
}
-// SupportPackageIsVersion3 is referenced from generated protocol buffer files
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+// This is EXPERIMENTAL and subject to change.
+type MethodConfig struct {
+ // WaitForReady indicates whether RPCs sent to this method should wait until
+ // the connection is ready by default (!failfast). The value specified via the
+ // gRPC client API will override the value set here.
+ WaitForReady bool
+ // Timeout is the default timeout for RPCs sent to this method. The actual
+ // deadline used will be the minimum of the value specified here and the value
+ // set by the application via the gRPC client API. If either one is not set,
+ // then the other will be used. If neither is set, then the RPC has no deadline.
+ Timeout time.Duration
+ // MaxReqSize is the maximum allowed payload size for an individual request in a
+ // stream (client->server) in bytes. The size which is measured is the serialized,
+ // uncompressed payload in bytes. The actual value used is the minumum of the value
+ // specified here and the value set by the application via the gRPC client API. If
+ // either one is not set, then the other will be used. If neither is set, then the
+ // built-in default is used.
+ // TODO: support this.
+ MaxReqSize uint64
+ // MaxRespSize is the maximum allowed payload size for an individual response in a
+ // stream (server->client) in bytes.
+ // TODO: support this.
+ MaxRespSize uint64
+}
+
+// ServiceConfig is provided by the service provider and contains parameters for how
+// clients that connect to the service should behave.
+// This is EXPERIMENTAL and subject to change.
+type ServiceConfig struct {
+ // LB is the load balancer the service providers recommends. The balancer specified
+ // via grpc.WithBalancer will override this.
+ LB Balancer
+ // Methods contains a map for the methods in this service.
+ Methods map[string]MethodConfig
+}
+
+// SupportPackageIsVersion4 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the grpc package.
//
// This constant may be renamed in the future if a change in the generated code
// requires a synchronised update of grpc-go and protoc-gen-go. This constant
// should not be referenced from any other code.
-const SupportPackageIsVersion3 = true
+const SupportPackageIsVersion4 = true
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index e0bb187..b52a563 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -54,6 +54,8 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/tap"
"google.golang.org/grpc/transport"
)
@@ -110,6 +112,7 @@ type options struct {
maxMsgSize int
unaryInt UnaryServerInterceptor
streamInt StreamServerInterceptor
+ inTapHandle tap.ServerInHandle
maxConcurrentStreams uint32
useHandlerImpl bool // use http.Handler-based server
}
@@ -186,6 +189,17 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption {
}
}
+// InTapHandle returns a ServerOption that sets the tap handle for all the server
+// transport to be created. Only one can be installed.
+func InTapHandle(h tap.ServerInHandle) ServerOption {
+ return func(o *options) {
+ if o.inTapHandle != nil {
+ panic("The tap handle has been set.")
+ }
+ o.inTapHandle = h
+ }
+}
+
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
@@ -329,6 +343,7 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
// read gRPC requests and then call the registered handlers to reply to them.
// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
// this method returns.
+// Serve always returns non-nil error.
func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock()
s.printf("serving")
@@ -412,17 +427,22 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
if s.opts.useHandlerImpl {
s.serveUsingHandler(conn)
} else {
- s.serveNewHTTP2Transport(conn, authInfo)
+ s.serveHTTP2Transport(conn, authInfo)
}
}
-// serveNewHTTP2Transport sets up a new http/2 transport (using the
+// serveHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go) and
// serves streams on it.
// This is run in its own goroutine (it does network I/O in
// transport.NewServerTransport).
-func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
- st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
+func (s *Server) serveHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
+ config := &transport.ServerConfig{
+ MaxStreams: s.opts.maxConcurrentStreams,
+ AuthInfo: authInfo,
+ InTapHandle: s.opts.inTapHandle,
+ }
+ st, err := transport.NewServerTransport("http2", c, config)
if err != nil {
s.mu.Lock()
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
@@ -448,6 +468,12 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
defer wg.Done()
s.handleStream(st, stream, s.traceInfo(st, stream))
}()
+ }, func(ctx context.Context, method string) context.Context {
+ if !EnableTracing {
+ return ctx
+ }
+ tr := trace.New("grpc.Recv."+methodFamily(method), method)
+ return trace.NewContext(ctx, tr)
})
wg.Wait()
}
@@ -497,15 +523,17 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
// If tracing is not enabled, it returns nil.
func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
- if !EnableTracing {
+ tr, ok := trace.FromContext(stream.Context())
+ if !ok {
return nil
}
+
trInfo = &traceInfo{
- tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()),
+ tr: tr,
}
trInfo.firstLine.client = false
trInfo.firstLine.remoteAddr = st.RemoteAddr()
- stream.TraceContext(trInfo.tr)
+
if dl, ok := stream.Context().Deadline(); ok {
trInfo.firstLine.deadline = dl.Sub(time.Now())
}
@@ -532,11 +560,17 @@ func (s *Server) removeConn(c io.Closer) {
}
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
- var cbuf *bytes.Buffer
+ var (
+ cbuf *bytes.Buffer
+ outPayload *stats.OutPayload
+ )
if cp != nil {
cbuf = new(bytes.Buffer)
}
- p, err := encode(s.opts.codec, msg, cp, cbuf)
+ if stats.On() {
+ outPayload = &stats.OutPayload{}
+ }
+ p, err := encode(s.opts.codec, msg, cp, cbuf, outPayload)
if err != nil {
// This typically indicates a fatal issue (e.g., memory
// corruption or hardware faults) the application program
@@ -547,10 +581,32 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
// the optimal option.
grpclog.Fatalf("grpc: Server failed to encode response %v", err)
}
- return t.Write(stream, p, opts)
+ err = t.Write(stream, p, opts)
+ if err == nil && outPayload != nil {
+ outPayload.SentTime = time.Now()
+ stats.HandleRPC(stream.Context(), outPayload)
+ }
+ return err
}
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
+ if stats.On() {
+ begin := &stats.Begin{
+ BeginTime: time.Now(),
+ }
+ stats.HandleRPC(stream.Context(), begin)
+ }
+ defer func() {
+ if stats.On() {
+ end := &stats.End{
+ EndTime: time.Now(),
+ }
+ if err != nil && err != io.EOF {
+ end.Error = toRPCErr(err)
+ }
+ stats.HandleRPC(stream.Context(), end)
+ }
+ }()
if trInfo != nil {
defer trInfo.tr.Finish()
trInfo.firstLine.client = false
@@ -579,14 +635,14 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if err != nil {
switch err := err.(type) {
case *rpcError:
- if err := t.WriteStatus(stream, err.code, err.desc); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+ if e := t.WriteStatus(stream, err.code, err.desc); e != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
case transport.ConnectionError:
// Nothing to do here.
case transport.StreamError:
- if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+ if e := t.WriteStatus(stream, err.Code, err.Desc); e != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
@@ -597,20 +653,29 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
switch err := err.(type) {
case *rpcError:
- if err := t.WriteStatus(stream, err.code, err.desc); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+ if e := t.WriteStatus(stream, err.code, err.desc); e != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
+ return err
default:
- if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
- grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+ if e := t.WriteStatus(stream, codes.Internal, err.Error()); e != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
-
+ // TODO checkRecvPayload always return RPC error. Add a return here if necessary.
+ }
+ }
+ var inPayload *stats.InPayload
+ if stats.On() {
+ inPayload = &stats.InPayload{
+ RecvTime: time.Now(),
}
- return err
}
statusCode := codes.OK
statusDesc := ""
df := func(v interface{}) error {
+ if inPayload != nil {
+ inPayload.WireLength = len(req)
+ }
if pf == compressionMade {
var err error
req, err = s.opts.dc.Do(bytes.NewReader(req))
@@ -618,7 +683,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
}
- return err
+ return Errorf(codes.Internal, err.Error())
}
}
if len(req) > s.opts.maxMsgSize {
@@ -630,6 +695,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if err := s.opts.codec.Unmarshal(req, v); err != nil {
return err
}
+ if inPayload != nil {
+ inPayload.Payload = v
+ inPayload.Data = req
+ inPayload.Length = len(req)
+ stats.HandleRPC(stream.Context(), inPayload)
+ }
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
}
@@ -650,9 +721,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
- return err
}
- return nil
+ return Errorf(statusCode, statusDesc)
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false)
@@ -677,11 +747,32 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
}
- return t.WriteStatus(stream, statusCode, statusDesc)
+ errWrite := t.WriteStatus(stream, statusCode, statusDesc)
+ if statusCode != codes.OK {
+ return Errorf(statusCode, statusDesc)
+ }
+ return errWrite
}
}
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
+ if stats.On() {
+ begin := &stats.Begin{
+ BeginTime: time.Now(),
+ }
+ stats.HandleRPC(stream.Context(), begin)
+ }
+ defer func() {
+ if stats.On() {
+ end := &stats.End{
+ EndTime: time.Now(),
+ }
+ if err != nil && err != io.EOF {
+ end.Error = toRPCErr(err)
+ }
+ stats.HandleRPC(stream.Context(), end)
+ }
+ }()
if s.opts.cp != nil {
stream.SetSendCompress(s.opts.cp.Type())
}
@@ -744,7 +835,11 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
}
ss.mu.Unlock()
}
- return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
+ errWrite := t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
+ if ss.statusCode != codes.OK {
+ return Errorf(ss.statusCode, ss.statusDesc)
+ }
+ return errWrite
}
@@ -759,7 +854,8 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
trInfo.tr.SetError()
}
- if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil {
+ errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
+ if err := t.WriteStatus(stream, codes.InvalidArgument, errDesc); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
@@ -779,7 +875,8 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
trInfo.tr.SetError()
}
- if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil {
+ errDesc := fmt.Sprintf("unknown service %v", service)
+ if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
@@ -804,7 +901,8 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
trInfo.tr.SetError()
}
- if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil {
+ errDesc := fmt.Sprintf("unknown method %v", method)
+ if err := t.WriteStatus(stream, codes.Unimplemented, errDesc); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
trInfo.tr.SetError()
diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
new file mode 100644
index 0000000..ce47786
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/handlers.go
@@ -0,0 +1,146 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package stats
+
+import (
+ "net"
+ "sync/atomic"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/grpclog"
+)
+
+// ConnTagInfo defines the relevant information needed by connection context tagger.
+type ConnTagInfo struct {
+ // RemoteAddr is the remote address of the corresponding connection.
+ RemoteAddr net.Addr
+ // LocalAddr is the local address of the corresponding connection.
+ LocalAddr net.Addr
+ // TODO add QOS related fields.
+}
+
+// RPCTagInfo defines the relevant information needed by RPC context tagger.
+type RPCTagInfo struct {
+ // FullMethodName is the RPC method in the format of /package.service/method.
+ FullMethodName string
+}
+
+var (
+ on = new(int32)
+ rpcHandler func(context.Context, RPCStats)
+ connHandler func(context.Context, ConnStats)
+ connTagger func(context.Context, *ConnTagInfo) context.Context
+ rpcTagger func(context.Context, *RPCTagInfo) context.Context
+)
+
+// HandleRPC processes the RPC stats using the rpc handler registered by the user.
+func HandleRPC(ctx context.Context, s RPCStats) {
+ if rpcHandler == nil {
+ return
+ }
+ rpcHandler(ctx, s)
+}
+
+// RegisterRPCHandler registers the user handler function for RPC stats processing.
+// It should be called only once. The later call will overwrite the former value if it is called multiple times.
+// This handler function will be called to process the rpc stats.
+func RegisterRPCHandler(f func(context.Context, RPCStats)) {
+ rpcHandler = f
+}
+
+// HandleConn processes the stats using the call back function registered by user.
+func HandleConn(ctx context.Context, s ConnStats) {
+ if connHandler == nil {
+ return
+ }
+ connHandler(ctx, s)
+}
+
+// RegisterConnHandler registers the user handler function for conn stats.
+// It should be called only once. The later call will overwrite the former value if it is called multiple times.
+// This handler function will be called to process the conn stats.
+func RegisterConnHandler(f func(context.Context, ConnStats)) {
+ connHandler = f
+}
+
+// TagConn calls user registered connection context tagger.
+func TagConn(ctx context.Context, info *ConnTagInfo) context.Context {
+ if connTagger == nil {
+ return ctx
+ }
+ return connTagger(ctx, info)
+}
+
+// RegisterConnTagger registers the user connection context tagger function.
+// The connection context tagger can attach some information to the given context.
+// The returned context will be used for stats handling.
+// For conn stats handling, the context used in connHandler for this
+// connection will be derived from the context returned.
+// For RPC stats handling,
+// - On server side, the context used in rpcHandler for all RPCs on this
+// connection will be derived from the context returned.
+// - On client side, the context is not derived from the context returned.
+func RegisterConnTagger(t func(context.Context, *ConnTagInfo) context.Context) {
+ connTagger = t
+}
+
+// TagRPC calls the user registered RPC context tagger.
+func TagRPC(ctx context.Context, info *RPCTagInfo) context.Context {
+ if rpcTagger == nil {
+ return ctx
+ }
+ return rpcTagger(ctx, info)
+}
+
+// RegisterRPCTagger registers the user RPC context tagger function.
+// The RPC context tagger can attach some information to the given context.
+// The context used in stats rpcHandler for this RPC will be derived from the
+// context returned.
+func RegisterRPCTagger(t func(context.Context, *RPCTagInfo) context.Context) {
+ rpcTagger = t
+}
+
+// Start starts the stats collection and processing if there is a registered stats handle.
+func Start() {
+ if rpcHandler == nil && connHandler == nil {
+ grpclog.Println("rpcHandler and connHandler are both nil when starting stats. Stats is not started")
+ return
+ }
+ atomic.StoreInt32(on, 1)
+}
+
+// On indicates whether the stats collection and processing is on.
+func On() bool {
+ return atomic.CompareAndSwapInt32(on, 1, 1)
+}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
new file mode 100644
index 0000000..a82448a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -0,0 +1,223 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package stats is for collecting and reporting various network and RPC stats.
+// This package is for monitoring purpose only. All fields are read-only.
+// All APIs are experimental.
+package stats // import "google.golang.org/grpc/stats"
+
+import (
+ "net"
+ "time"
+)
+
+// RPCStats contains stats information about RPCs.
+type RPCStats interface {
+ isRPCStats()
+ // IsClient returns true if this RPCStats is from client side.
+ IsClient() bool
+}
+
+// Begin contains stats when an RPC begins.
+// FailFast are only valid if Client is true.
+type Begin struct {
+ // Client is true if this Begin is from client side.
+ Client bool
+ // BeginTime is the time when the RPC begins.
+ BeginTime time.Time
+ // FailFast indicates if this RPC is failfast.
+ FailFast bool
+}
+
+// IsClient indicates if this is from client side.
+func (s *Begin) IsClient() bool { return s.Client }
+
+func (s *Begin) isRPCStats() {}
+
+// InPayload contains the information for an incoming payload.
+type InPayload struct {
+ // Client is true if this InPayload is from client side.
+ Client bool
+ // Payload is the payload with original type.
+ Payload interface{}
+ // Data is the serialized message payload.
+ Data []byte
+ // Length is the length of uncompressed data.
+ Length int
+ // WireLength is the length of data on wire (compressed, signed, encrypted).
+ WireLength int
+ // RecvTime is the time when the payload is received.
+ RecvTime time.Time
+}
+
+// IsClient indicates if this is from client side.
+func (s *InPayload) IsClient() bool { return s.Client }
+
+func (s *InPayload) isRPCStats() {}
+
+// InHeader contains stats when a header is received.
+// FullMethod, addresses and Compression are only valid if Client is false.
+type InHeader struct {
+ // Client is true if this InHeader is from client side.
+ Client bool
+ // WireLength is the wire length of header.
+ WireLength int
+
+ // FullMethod is the full RPC method string, i.e., /package.service/method.
+ FullMethod string
+ // RemoteAddr is the remote address of the corresponding connection.
+ RemoteAddr net.Addr
+ // LocalAddr is the local address of the corresponding connection.
+ LocalAddr net.Addr
+ // Compression is the compression algorithm used for the RPC.
+ Compression string
+}
+
+// IsClient indicates if this is from client side.
+func (s *InHeader) IsClient() bool { return s.Client }
+
+func (s *InHeader) isRPCStats() {}
+
+// InTrailer contains stats when a trailer is received.
+type InTrailer struct {
+ // Client is true if this InTrailer is from client side.
+ Client bool
+ // WireLength is the wire length of trailer.
+ WireLength int
+}
+
+// IsClient indicates if this is from client side.
+func (s *InTrailer) IsClient() bool { return s.Client }
+
+func (s *InTrailer) isRPCStats() {}
+
+// OutPayload contains the information for an outgoing payload.
+type OutPayload struct {
+ // Client is true if this OutPayload is from client side.
+ Client bool
+ // Payload is the payload with original type.
+ Payload interface{}
+ // Data is the serialized message payload.
+ Data []byte
+ // Length is the length of uncompressed data.
+ Length int
+ // WireLength is the length of data on wire (compressed, signed, encrypted).
+ WireLength int
+ // SentTime is the time when the payload is sent.
+ SentTime time.Time
+}
+
+// IsClient indicates if this is from client side.
+func (s *OutPayload) IsClient() bool { return s.Client }
+
+func (s *OutPayload) isRPCStats() {}
+
+// OutHeader contains stats when a header is sent.
+// FullMethod, addresses and Compression are only valid if Client is true.
+type OutHeader struct {
+ // Client is true if this OutHeader is from client side.
+ Client bool
+ // WireLength is the wire length of header.
+ WireLength int
+
+ // FullMethod is the full RPC method string, i.e., /package.service/method.
+ FullMethod string
+ // RemoteAddr is the remote address of the corresponding connection.
+ RemoteAddr net.Addr
+ // LocalAddr is the local address of the corresponding connection.
+ LocalAddr net.Addr
+ // Compression is the compression algorithm used for the RPC.
+ Compression string
+}
+
+// IsClient indicates if this is from client side.
+func (s *OutHeader) IsClient() bool { return s.Client }
+
+func (s *OutHeader) isRPCStats() {}
+
+// OutTrailer contains stats when a trailer is sent.
+type OutTrailer struct {
+ // Client is true if this OutTrailer is from client side.
+ Client bool
+ // WireLength is the wire length of trailer.
+ WireLength int
+}
+
+// IsClient indicates if this is from client side.
+func (s *OutTrailer) IsClient() bool { return s.Client }
+
+func (s *OutTrailer) isRPCStats() {}
+
+// End contains stats when an RPC ends.
+type End struct {
+ // Client is true if this End is from client side.
+ Client bool
+ // EndTime is the time when the RPC ends.
+ EndTime time.Time
+ // Error is the error just happened. Its type is gRPC error.
+ Error error
+}
+
+// IsClient indicates if this is from client side.
+func (s *End) IsClient() bool { return s.Client }
+
+func (s *End) isRPCStats() {}
+
+// ConnStats contains stats information about connections.
+type ConnStats interface {
+ isConnStats()
+ // IsClient returns true if this ConnStats is from client side.
+ IsClient() bool
+}
+
+// ConnBegin contains the stats of a connection when it is established.
+type ConnBegin struct {
+ // Client is true if this ConnBegin is from client side.
+ Client bool
+}
+
+// IsClient indicates if this is from client side.
+func (s *ConnBegin) IsClient() bool { return s.Client }
+
+func (s *ConnBegin) isConnStats() {}
+
+// ConnEnd contains the stats of a connection when it ends.
+type ConnEnd struct {
+ // Client is true if this ConnEnd is from client side.
+ Client bool
+}
+
+// IsClient indicates if this is from client side.
+func (s *ConnEnd) IsClient() bool { return s.Client }
+
+func (s *ConnEnd) isConnStats() {}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 4681054..d3a4deb 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -45,6 +45,7 @@ import (
"golang.org/x/net/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/stats"
"google.golang.org/grpc/transport"
)
@@ -97,7 +98,7 @@ type ClientStream interface {
// NewClientStream creates a new Stream for the client side. This is called
// by generated code.
-func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
if cc.dopts.streamInt != nil {
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
}
@@ -106,11 +107,18 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
var (
- t transport.ClientTransport
- s *transport.Stream
- put func()
+ t transport.ClientTransport
+ s *transport.Stream
+ put func()
+ cancel context.CancelFunc
)
c := defaultCallInfo
+ if mc, ok := cc.getMethodConfig(method); ok {
+ c.failFast = !mc.WaitForReady
+ if mc.Timeout > 0 {
+ ctx, cancel = context.WithTimeout(ctx, mc.Timeout)
+ }
+ }
for _, o := range opts {
if err := o.before(&c); err != nil {
return nil, toRPCErr(err)
@@ -143,6 +151,25 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
}()
}
+ if stats.On() {
+ ctx = stats.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method})
+ begin := &stats.Begin{
+ Client: true,
+ BeginTime: time.Now(),
+ FailFast: c.failFast,
+ }
+ stats.HandleRPC(ctx, begin)
+ }
+ defer func() {
+ if err != nil && stats.On() {
+ // Only handle end stats if err != nil.
+ end := &stats.End{
+ Client: true,
+ Error: err,
+ }
+ stats.HandleRPC(ctx, end)
+ }
+ }()
gopts := BalancerGetOptions{
BlockingWait: !c.failFast,
}
@@ -180,12 +207,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
break
}
cs := &clientStream{
- opts: opts,
- c: c,
- desc: desc,
- codec: cc.dopts.codec,
- cp: cc.dopts.cp,
- dc: cc.dopts.dc,
+ opts: opts,
+ c: c,
+ desc: desc,
+ codec: cc.dopts.codec,
+ cp: cc.dopts.cp,
+ dc: cc.dopts.dc,
+ cancel: cancel,
put: put,
t: t,
@@ -194,6 +222,8 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
tracing: EnableTracing,
trInfo: trInfo,
+
+ statsCtx: ctx,
}
if cc.dopts.cp != nil {
cs.cbuf = new(bytes.Buffer)
@@ -227,16 +257,17 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
// clientStream implements a client side Stream.
type clientStream struct {
- opts []CallOption
- c callInfo
- t transport.ClientTransport
- s *transport.Stream
- p *parser
- desc *StreamDesc
- codec Codec
- cp Compressor
- cbuf *bytes.Buffer
- dc Decompressor
+ opts []CallOption
+ c callInfo
+ t transport.ClientTransport
+ s *transport.Stream
+ p *parser
+ desc *StreamDesc
+ codec Codec
+ cp Compressor
+ cbuf *bytes.Buffer
+ dc Decompressor
+ cancel context.CancelFunc
tracing bool // set to EnableTracing when the clientStream is created.
@@ -246,6 +277,11 @@ type clientStream struct {
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
// and is set to nil when the clientStream's finish method is called.
trInfo traceInfo
+
+ // statsCtx keeps the user context for stats handling.
+ // All stats collection should use the statsCtx (instead of the stream context)
+ // so that all the generated stats for a particular RPC can be associated in the processing phase.
+ statsCtx context.Context
}
func (cs *clientStream) Context() context.Context {
@@ -274,6 +310,8 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
}
cs.mu.Unlock()
}
+ // TODO Investigate how to signal the stats handling party.
+ // generate error stats if err != nil && err != io.EOF?
defer func() {
if err != nil {
cs.finish(err)
@@ -296,7 +334,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
}
err = toRPCErr(err)
}()
- out, err := encode(cs.codec, m, cs.cp, cs.cbuf)
+ var outPayload *stats.OutPayload
+ if stats.On() {
+ outPayload = &stats.OutPayload{
+ Client: true,
+ }
+ }
+ out, err := encode(cs.codec, m, cs.cp, cs.cbuf, outPayload)
defer func() {
if cs.cbuf != nil {
cs.cbuf.Reset()
@@ -305,11 +349,37 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
if err != nil {
return Errorf(codes.Internal, "grpc: %v", err)
}
- return cs.t.Write(cs.s, out, &transport.Options{Last: false})
+ err = cs.t.Write(cs.s, out, &transport.Options{Last: false})
+ if err == nil && outPayload != nil {
+ outPayload.SentTime = time.Now()
+ stats.HandleRPC(cs.statsCtx, outPayload)
+ }
+ return err
}
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32)
+ defer func() {
+ if err != nil && stats.On() {
+ // Only generate End if err != nil.
+ // If err == nil, it's not the last RecvMsg.
+ // The last RecvMsg gets either an RPC error or io.EOF.
+ end := &stats.End{
+ Client: true,
+ EndTime: time.Now(),
+ }
+ if err != io.EOF {
+ end.Error = toRPCErr(err)
+ }
+ stats.HandleRPC(cs.statsCtx, end)
+ }
+ }()
+ var inPayload *stats.InPayload
+ if stats.On() {
+ inPayload = &stats.InPayload{
+ Client: true,
+ }
+ }
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, inPayload)
defer func() {
// err != nil indicates the termination of the stream.
if err != nil {
@@ -324,11 +394,15 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
}
cs.mu.Unlock()
}
+ if inPayload != nil {
+ stats.HandleRPC(cs.statsCtx, inPayload)
+ }
if !cs.desc.ClientStreams || cs.desc.ServerStreams {
return
}
// Special handling for client streaming rpc.
- err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32)
+ // This recv expects EOF or errors, so we don't collect inPayload.
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32, nil)
cs.closeTransportStream(err)
if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
@@ -384,6 +458,11 @@ func (cs *clientStream) closeTransportStream(err error) {
}
func (cs *clientStream) finish(err error) {
+ defer func() {
+ if cs.cancel != nil {
+ cs.cancel()
+ }
+ }()
cs.mu.Lock()
defer cs.mu.Unlock()
for _, o := range cs.opts {
@@ -482,7 +561,11 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
ss.mu.Unlock()
}
}()
- out, err := encode(ss.codec, m, ss.cp, ss.cbuf)
+ var outPayload *stats.OutPayload
+ if stats.On() {
+ outPayload = &stats.OutPayload{}
+ }
+ out, err := encode(ss.codec, m, ss.cp, ss.cbuf, outPayload)
defer func() {
if ss.cbuf != nil {
ss.cbuf.Reset()
@@ -495,6 +578,10 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
if err := ss.t.Write(ss.s, out, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
+ if outPayload != nil {
+ outPayload.SentTime = time.Now()
+ stats.HandleRPC(ss.s.Context(), outPayload)
+ }
return nil
}
@@ -513,7 +600,11 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
ss.mu.Unlock()
}
}()
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize); err != nil {
+ var inPayload *stats.InPayload
+ if stats.On() {
+ inPayload = &stats.InPayload{}
+ }
+ if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize, inPayload); err != nil {
if err == io.EOF {
return err
}
@@ -522,5 +613,8 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
}
return toRPCErr(err)
}
+ if inPayload != nil {
+ stats.HandleRPC(ss.s.Context(), inPayload)
+ }
return nil
}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
new file mode 100644
index 0000000..0f36647
--- /dev/null
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package tap defines the function handles which are executed on the transport
+// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL.
+package tap
+
+import (
+ "golang.org/x/net/context"
+)
+
+// Info defines the relevant information needed by the handles.
+type Info struct {
+ // FullMethodName is the string of grpc method (in the format of
+ // /package.service/method).
+ FullMethodName string
+ // TODO: More to be added.
+}
+
+// ServerInHandle defines the function which runs when a new stream is created
+// on the server side. Note that it is executed in the per-connection I/O goroutine(s) instead
+// of per-RPC goroutine. Therefore, users should NOT have any blocking/time-consuming
+// work in this handle. Otherwise all the RPCs would slow down.
+type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)
diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go
index 4ef0830..2586cba 100644
--- a/vendor/google.golang.org/grpc/transport/control.go
+++ b/vendor/google.golang.org/grpc/transport/control.go
@@ -111,35 +111,9 @@ func newQuotaPool(q int) *quotaPool {
return qb
}
-// add adds n to the available quota and tries to send it on acquire.
-func (qb *quotaPool) add(n int) {
- qb.mu.Lock()
- defer qb.mu.Unlock()
- qb.quota += n
- if qb.quota <= 0 {
- return
- }
- select {
- case qb.c <- qb.quota:
- qb.quota = 0
- default:
- }
-}
-
-// cancel cancels the pending quota sent on acquire, if any.
-func (qb *quotaPool) cancel() {
- qb.mu.Lock()
- defer qb.mu.Unlock()
- select {
- case n := <-qb.c:
- qb.quota += n
- default:
- }
-}
-
-// reset cancels the pending quota sent on acquired, incremented by v and sends
+// add cancels the pending quota sent on acquired, incremented by v and sends
// it back on acquire.
-func (qb *quotaPool) reset(v int) {
+func (qb *quotaPool) add(v int) {
qb.mu.Lock()
defer qb.mu.Unlock()
select {
@@ -151,6 +125,10 @@ func (qb *quotaPool) reset(v int) {
if qb.quota <= 0 {
return
}
+ // After the pool has been created, this is the only place that sends on
+ // the channel. Since mu is held at this point and any quota that was sent
+ // on the channel has been retrieved, we know that this code will always
+ // place any positive quota value on the channel.
select {
case qb.c <- qb.quota:
qb.quota = 0
diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go
index 114e349..10b6dc0 100644
--- a/vendor/google.golang.org/grpc/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/transport/handler_server.go
@@ -268,7 +268,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
})
}
-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
+func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
// With this transport type there will be exactly 1 stream: this HTTP request.
var ctx context.Context
diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go
index 2b0f680..605b1e5 100644
--- a/vendor/google.golang.org/grpc/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/transport/http2_client.go
@@ -51,16 +51,20 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
+ "google.golang.org/grpc/stats"
)
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
- target string // server name/addr
- userAgent string
- md interface{}
- conn net.Conn // underlying communication channel
- authInfo credentials.AuthInfo // auth info about the connection
- nextID uint32 // the next stream ID to be used
+ ctx context.Context
+ target string // server name/addr
+ userAgent string
+ md interface{}
+ conn net.Conn // underlying communication channel
+ remoteAddr net.Addr
+ localAddr net.Addr
+ authInfo credentials.AuthInfo // auth info about the connection
+ nextID uint32 // the next stream ID to be used
// writableChan synchronizes write access to the transport.
// A writer acquires the write lock by sending a value on writableChan
@@ -150,6 +154,9 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
scheme := "http"
conn, err := dial(ctx, opts.Dialer, addr.Addr)
if err != nil {
+ if opts.FailOnNonTempDialError {
+ return nil, connectionErrorf(isTemporary(err), err, "transport: %v", err)
+ }
return nil, connectionErrorf(true, err, "transport: %v", err)
}
// Any further errors will close the underlying connection
@@ -175,11 +182,14 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
}
var buf bytes.Buffer
t := &http2Client{
- target: addr.Addr,
- userAgent: ua,
- md: addr.Metadata,
- conn: conn,
- authInfo: authInfo,
+ ctx: ctx,
+ target: addr.Addr,
+ userAgent: ua,
+ md: addr.Metadata,
+ conn: conn,
+ remoteAddr: conn.RemoteAddr(),
+ localAddr: conn.LocalAddr(),
+ authInfo: authInfo,
// The client initiated stream id is odd starting from 1.
nextID: 1,
writableChan: make(chan int, 1),
@@ -199,6 +209,16 @@ func newHTTP2Client(ctx context.Context, addr TargetInfo, opts ConnectOptions) (
maxStreams: math.MaxInt32,
streamSendQuota: defaultWindowSize,
}
+ if stats.On() {
+ t.ctx = stats.TagConn(t.ctx, &stats.ConnTagInfo{
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ })
+ connBegin := &stats.ConnBegin{
+ Client: true,
+ }
+ stats.HandleConn(t.ctx, connBegin)
+ }
// Start the reader goroutine for incoming message. Each transport has
// a dedicated goroutine which reads HTTP2 frame from network. Then it
// dispatches the frame to the corresponding stream entity.
@@ -270,12 +290,13 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
// streams.
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
pr := &peer.Peer{
- Addr: t.conn.RemoteAddr(),
+ Addr: t.remoteAddr,
}
// Attach Auth info if there is any.
if t.authInfo != nil {
pr.AuthInfo = t.authInfo
}
+ userCtx := ctx
ctx = peer.NewContext(ctx, pr)
authData := make(map[string]string)
for _, c := range t.creds {
@@ -347,6 +368,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return nil, ErrConnClosing
}
s := t.newStream(ctx, callHdr)
+ s.clientStatsCtx = userCtx
t.activeStreams[s.id] = s
// This stream is not counted when applySetings(...) initialize t.streamsQuota.
@@ -357,7 +379,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
t.mu.Unlock()
if reset {
- t.streamsQuota.reset(-1)
+ t.streamsQuota.add(-1)
}
// HPACK encodes various headers. Note that once WriteField(...) is
@@ -413,6 +435,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
}
}
first := true
+ bufLen := t.hBuf.Len()
// Sends the headers in a single batch even when they span multiple frames.
for !endHeaders {
size := t.hBuf.Len()
@@ -447,6 +470,17 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
return nil, connectionErrorf(true, err, "transport: %v", err)
}
}
+ if stats.On() {
+ outHeader := &stats.OutHeader{
+ Client: true,
+ WireLength: bufLen,
+ FullMethod: callHdr.Method,
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ Compression: callHdr.SendCompress,
+ }
+ stats.HandleRPC(s.clientStatsCtx, outHeader)
+ }
t.writableChan <- 0
return s, nil
}
@@ -525,6 +559,12 @@ func (t *http2Client) Close() (err error) {
s.mu.Unlock()
s.write(recvMsg{err: ErrConnClosing})
}
+ if stats.On() {
+ connEnd := &stats.ConnEnd{
+ Client: true,
+ }
+ stats.HandleConn(t.ctx, connEnd)
+ }
return
}
@@ -582,19 +622,14 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
var p []byte
if r.Len() > 0 {
size := http2MaxFrameLen
- s.sendQuotaPool.add(0)
// Wait until the stream has some quota to send the data.
sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire())
if err != nil {
return err
}
- t.sendQuotaPool.add(0)
// Wait until the transport has some quota to send the data.
tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire())
if err != nil {
- if _, ok := err.(StreamError); ok || err == io.EOF {
- t.sendQuotaPool.cancel()
- }
return err
}
if sq < size {
@@ -874,6 +909,24 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
endStream := frame.StreamEnded()
+ var isHeader bool
+ defer func() {
+ if stats.On() {
+ if isHeader {
+ inHeader := &stats.InHeader{
+ Client: true,
+ WireLength: int(frame.Header().Length),
+ }
+ stats.HandleRPC(s.clientStatsCtx, inHeader)
+ } else {
+ inTrailer := &stats.InTrailer{
+ Client: true,
+ WireLength: int(frame.Header().Length),
+ }
+ stats.HandleRPC(s.clientStatsCtx, inTrailer)
+ }
+ }
+ }()
s.mu.Lock()
if !endStream {
@@ -885,6 +938,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
close(s.headerChan)
s.headerDone = true
+ isHeader = true
}
if !endStream || s.state == streamDone {
s.mu.Unlock()
@@ -994,13 +1048,13 @@ func (t *http2Client) applySettings(ss []http2.Setting) {
t.maxStreams = int(s.Val)
t.mu.Unlock()
if reset {
- t.streamsQuota.reset(int(s.Val) - ms)
+ t.streamsQuota.add(int(s.Val) - ms)
}
case http2.SettingInitialWindowSize:
t.mu.Lock()
for _, stream := range t.activeStreams {
// Adjust the sending quota for each stream.
- stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
+ stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
}
t.streamSendQuota = s.Val
t.mu.Unlock()
diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go
index a62fb7c..316188e 100644
--- a/vendor/google.golang.org/grpc/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/transport/http2_server.go
@@ -50,6 +50,8 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/tap"
)
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
@@ -58,9 +60,13 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
+ ctx context.Context
conn net.Conn
+ remoteAddr net.Addr
+ localAddr net.Addr
maxStreamID uint32 // max stream ID ever seen
authInfo credentials.AuthInfo // auth info about the connection
+ inTapHandle tap.ServerInHandle
// writableChan synchronizes write access to the transport.
// A writer acquires the write lock by receiving a value on writableChan
// and releases it by sending on writableChan.
@@ -91,12 +97,13 @@ type http2Server struct {
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
// returned if something goes wrong.
-func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) {
+func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
framer := newFramer(conn)
// Send initial settings as connection preface to client.
var settings []http2.Setting
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
// permitted in the HTTP2 spec.
+ maxStreams := config.MaxStreams
if maxStreams == 0 {
maxStreams = math.MaxUint32
} else {
@@ -121,12 +128,16 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI
}
var buf bytes.Buffer
t := &http2Server{
+ ctx: context.Background(),
conn: conn,
- authInfo: authInfo,
+ remoteAddr: conn.RemoteAddr(),
+ localAddr: conn.LocalAddr(),
+ authInfo: config.AuthInfo,
framer: framer,
hBuf: &buf,
hEnc: hpack.NewEncoder(&buf),
maxStreams: maxStreams,
+ inTapHandle: config.InTapHandle,
controlBuf: newRecvBuffer(),
fc: &inFlow{limit: initialConnWindowSize},
sendQuotaPool: newQuotaPool(defaultWindowSize),
@@ -136,13 +147,21 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthI
activeStreams: make(map[uint32]*Stream),
streamSendQuota: defaultWindowSize,
}
+ if stats.On() {
+ t.ctx = stats.TagConn(t.ctx, &stats.ConnTagInfo{
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ })
+ connBegin := &stats.ConnBegin{}
+ stats.HandleConn(t.ctx, connBegin)
+ }
go t.controller()
t.writableChan <- 0
return t, nil
}
// operateHeader takes action on the decoded headers.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) (close bool) {
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
buf := newRecvBuffer()
s := &Stream{
id: frame.Header().StreamID,
@@ -168,12 +187,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
s.recvCompress = state.encoding
if state.timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout)
+ s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout)
} else {
- s.ctx, s.cancel = context.WithCancel(context.TODO())
+ s.ctx, s.cancel = context.WithCancel(t.ctx)
}
pr := &peer.Peer{
- Addr: t.conn.RemoteAddr(),
+ Addr: t.remoteAddr,
}
// Attach Auth info if there is any.
if t.authInfo != nil {
@@ -195,6 +214,18 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
s.recvCompress = state.encoding
s.method = state.method
+ if t.inTapHandle != nil {
+ var err error
+ info := &tap.Info{
+ FullMethodName: state.method,
+ }
+ s.ctx, err = t.inTapHandle(s.ctx, info)
+ if err != nil {
+ // TODO: Log the real error.
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
+ return
+ }
+ }
t.mu.Lock()
if t.state != reachable {
t.mu.Unlock()
@@ -218,13 +249,26 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.windowHandler = func(n int) {
t.updateWindow(s, uint32(n))
}
+ s.ctx = traceCtx(s.ctx, s.method)
+ if stats.On() {
+ s.ctx = stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
+ inHeader := &stats.InHeader{
+ FullMethod: s.method,
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ Compression: s.recvCompress,
+ WireLength: int(frame.Header().Length),
+ }
+ stats.HandleRPC(s.ctx, inHeader)
+ }
handle(s)
return
}
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
-func (t *http2Server) HandleStreams(handle func(*Stream)) {
+// traceCtx attaches trace to ctx and returns the new context.
+func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
@@ -279,7 +323,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
- if t.operateHeaders(frame, handle) {
+ if t.operateHeaders(frame, handle, traceCtx) {
t.Close()
break
}
@@ -492,9 +536,16 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
}
}
+ bufLen := t.hBuf.Len()
if err := t.writeHeaders(s, t.hBuf, false); err != nil {
return err
}
+ if stats.On() {
+ outHeader := &stats.OutHeader{
+ WireLength: bufLen,
+ }
+ stats.HandleRPC(s.Context(), outHeader)
+ }
t.writableChan <- 0
return nil
}
@@ -547,10 +598,17 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
}
}
+ bufLen := t.hBuf.Len()
if err := t.writeHeaders(s, t.hBuf, true); err != nil {
t.Close()
return err
}
+ if stats.On() {
+ outTrailer := &stats.OutTrailer{
+ WireLength: bufLen,
+ }
+ stats.HandleRPC(s.Context(), outTrailer)
+ }
t.closeStream(s)
t.writableChan <- 0
return nil
@@ -579,19 +637,14 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
return nil
}
size := http2MaxFrameLen
- s.sendQuotaPool.add(0)
// Wait until the stream has some quota to send the data.
sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire())
if err != nil {
return err
}
- t.sendQuotaPool.add(0)
// Wait until the transport has some quota to send the data.
tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire())
if err != nil {
- if _, ok := err.(StreamError); ok {
- t.sendQuotaPool.cancel()
- }
return err
}
if sq < size {
@@ -659,7 +712,7 @@ func (t *http2Server) applySettings(ss []http2.Setting) {
t.mu.Lock()
defer t.mu.Unlock()
for _, stream := range t.activeStreams {
- stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
+ stream.sendQuotaPool.add(int(s.Val - t.streamSendQuota))
}
t.streamSendQuota = s.Val
}
@@ -736,6 +789,10 @@ func (t *http2Server) Close() (err error) {
for _, s := range streams {
s.cancel()
}
+ if stats.On() {
+ connEnd := &stats.ConnEnd{}
+ stats.HandleConn(t.ctx, connEnd)
+ }
return
}
@@ -767,7 +824,7 @@ func (t *http2Server) closeStream(s *Stream) {
}
func (t *http2Server) RemoteAddr() net.Addr {
- return t.conn.RemoteAddr()
+ return t.remoteAddr
}
func (t *http2Server) Drain() {
diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go
index 413f749..4726bb2 100644
--- a/vendor/google.golang.org/grpc/transport/transport.go
+++ b/vendor/google.golang.org/grpc/transport/transport.go
@@ -45,10 +45,10 @@ import (
"sync"
"golang.org/x/net/context"
- "golang.org/x/net/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/tap"
)
// recvMsg represents the received msg from the transport. All transport
@@ -167,6 +167,11 @@ type Stream struct {
id uint32
// nil for client side Stream.
st ServerTransport
+ // clientStatsCtx keeps the user context for stats handling.
+ // It's only valid on client side. Server side stats context is same as s.ctx.
+ // All client side stats collection should use the clientStatsCtx (instead of the stream context)
+ // so that all the generated stats for a particular RPC can be associated in the processing phase.
+ clientStatsCtx context.Context
// ctx is the associated context of the stream.
ctx context.Context
// cancel is always nil for client side Stream.
@@ -266,11 +271,6 @@ func (s *Stream) Context() context.Context {
return s.ctx
}
-// TraceContext recreates the context of s with a trace.Trace.
-func (s *Stream) TraceContext(tr trace.Trace) {
- s.ctx = trace.NewContext(s.ctx, tr)
-}
-
// Method returns the method for the stream.
func (s *Stream) Method() string {
return s.method
@@ -355,10 +355,17 @@ const (
draining
)
+// ServerConfig consists of all the configurations to establish a server transport.
+type ServerConfig struct {
+ MaxStreams uint32
+ AuthInfo credentials.AuthInfo
+ InTapHandle tap.ServerInHandle
+}
+
// NewServerTransport creates a ServerTransport with conn or non-nil error
// if it fails.
-func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) {
- return newHTTP2Server(conn, maxStreams, authInfo)
+func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
+ return newHTTP2Server(conn, config)
}
// ConnectOptions covers all relevant options for communicating with the server.
@@ -367,6 +374,8 @@ type ConnectOptions struct {
UserAgent string
// Dialer specifies how to dial a network address.
Dialer func(context.Context, string) (net.Conn, error)
+ // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
+ FailOnNonTempDialError bool
// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
PerRPCCredentials []credentials.PerRPCCredentials
// TransportCredentials stores the Authenticator required to setup a client connection.
@@ -466,7 +475,7 @@ type ClientTransport interface {
// Write methods for a given Stream will be called serially.
type ServerTransport interface {
// HandleStreams receives incoming streams using the given handler.
- HandleStreams(func(*Stream))
+ HandleStreams(func(*Stream), func(context.Context, string) context.Context)
// WriteHeader sends the header metadata for the given stream.
// WriteHeader may not be called on all streams.
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 1412468..f8af349 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -21,176 +21,176 @@
{
"checksumSHA1": "ZLRh6zW4/DnVsGpgtt+ZiIaEFKc=",
"path": "cloud.google.com/go/compute/metadata",
- "revision": "7ee19e74f7fd11897ae3b7c4782d8205e810faa7",
- "revisionTime": "2016-10-31T01:45:47Z"
+ "revision": "4e436992a513aa1dec28f8adda5c8173724d493e",
+ "revisionTime": "2016-12-26T12:39:03Z"
},
{
- "checksumSHA1": "hiJXjkFEGy+sDFf6O58Ocdy9Rnk=",
+ "checksumSHA1": "4iounbuF7SMZdx/MlKSUuhnV848=",
"path": "cloud.google.com/go/internal",
- "revision": "7ee19e74f7fd11897ae3b7c4782d8205e810faa7",
- "revisionTime": "2016-10-31T01:45:47Z"
+ "revision": "4e436992a513aa1dec28f8adda5c8173724d493e",
+ "revisionTime": "2016-12-26T12:39:03Z"
},
{
"checksumSHA1": "W2xJ0+fvugRhRi1PMi64bYofBbU=",
"path": "cloud.google.com/go/internal/optional",
- "revision": "7ee19e74f7fd11897ae3b7c4782d8205e810faa7",
- "revisionTime": "2016-10-31T01:45:47Z"
+ "revision": "4e436992a513aa1dec28f8adda5c8173724d493e",
+ "revisionTime": "2016-12-26T12:39:03Z"
},
{
- "checksumSHA1": "0l3lg9DRQ16WhfwQhhT3Q/dQdMU=",
+ "checksumSHA1": "cHbMVt14iTMFZHeZMM9Q06dSV+M=",
"path": "cloud.google.com/go/storage",
- "revision": "7ee19e74f7fd11897ae3b7c4782d8205e810faa7",
- "revisionTime": "2016-10-31T01:45:47Z"
+ "revision": "4e436992a513aa1dec28f8adda5c8173724d493e",
+ "revisionTime": "2016-12-26T12:39:03Z"
},
{
- "checksumSHA1": "NeKH+twA+3z7EzaKQQdN5FIhJP4=",
+ "checksumSHA1": "ViE0bliPiLZR8jWVm/lkDay45C4=",
"path": "github.com/aws/aws-sdk-go/aws",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=",
"path": "github.com/aws/aws-sdk-go/aws/awserr",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "+q4vdl3l1Wom8K1wfIpJ4jlFsbY=",
+ "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=",
"path": "github.com/aws/aws-sdk-go/aws/awsutil",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "/232RBWA3KnT7U+wciPS2+wmvR0=",
+ "checksumSHA1": "7cQU8tU9hBgsG23XZmko1GePqjQ=",
"path": "github.com/aws/aws-sdk-go/aws/client",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=",
"path": "github.com/aws/aws-sdk-go/aws/client/metadata",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "c1N3Loy3AS9zD+m5CzpPNAED39U=",
+ "checksumSHA1": "Fl8vRSCY0MbM04cmiz/0MID+goA=",
"path": "github.com/aws/aws-sdk-go/aws/corehandlers",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "zu5C95rmCZff6NYZb62lEaT5ibE=",
"path": "github.com/aws/aws-sdk-go/aws/credentials",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=",
+ "checksumSHA1": "u3GOAJLmdvbuNUeUEcZSEAOeL/0=",
"path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=",
"path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "4Ipx+5xN0gso+cENC2MHMWmQlR4=",
"path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "DwhFsNluCFEwqzyp3hbJR3q2Wqs=",
+ "checksumSHA1": "lqh3fG7wCochvB4iHAZJuhhEJW0=",
"path": "github.com/aws/aws-sdk-go/aws/defaults",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "8E0fEBUJY/1lJOyVxzTxMGQGInk=",
+ "checksumSHA1": "/EXbk/z2TWjWc1Hvb4QYs3Wmhb8=",
"path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "5Ac22YMTBmrX/CXaEIXzWljr8UY=",
+ "checksumSHA1": "6R/PDscI/I+Vd/xHdcLUrTz56gc=",
+ "path": "github.com/aws/aws-sdk-go/aws/endpoints",
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
+ },
+ {
+ "checksumSHA1": "M78rTxU55Qagqr3MYj91im2031E=",
"path": "github.com/aws/aws-sdk-go/aws/request",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "eOo6evLMAxQfo7Qkc5/h5euN1Sw=",
+ "checksumSHA1": "HynfxYegMG8sq9MpFfPu7h1EOvI=",
"path": "github.com/aws/aws-sdk-go/aws/session",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "diXvBs1LRC0RJ9WK6sllWKdzC04=",
+ "checksumSHA1": "0FvPLvkBUpTElfUc/FZtPsJfuV0=",
"path": "github.com/aws/aws-sdk-go/aws/signer/v4",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
- },
- {
- "checksumSHA1": "Esab5F8KswqkTdB4TtjSvZgs56k=",
- "path": "github.com/aws/aws-sdk-go/private/endpoints",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=",
"path": "github.com/aws/aws-sdk-go/private/protocol",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "ZqY5RWavBLWTo6j9xqdyBEaNFRk=",
"path": "github.com/aws/aws-sdk-go/private/protocol/query",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=",
+ "checksumSHA1": "hqTEmgtchF9SwVTW0IQId2eLUKM=",
"path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=",
+ "checksumSHA1": "szZSLm3BlYkL3vqlZhNAlYk8iwM=",
"path": "github.com/aws/aws-sdk-go/private/protocol/rest",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "ODo+ko8D6unAxZuN1jGzMcN4QCc=",
"path": "github.com/aws/aws-sdk-go/private/protocol/restxml",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "eUEkjyMPAuekKBE4ou+nM9tXEas=",
+ "checksumSHA1": "lZ1z4xAbT8euCzKoAsnEYic60VE=",
"path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=",
"path": "github.com/aws/aws-sdk-go/private/waiter",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "qePzsxMg/6Y4mf02KUl8mnJJWFE=",
+ "checksumSHA1": "T7mlZERNJ2RpbazA5BvvXKA7MJc=",
"path": "github.com/aws/aws-sdk-go/service/s3",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
- "checksumSHA1": "eOOeajRwz6FG6QF+F4LQvbm3Ilk=",
+ "checksumSHA1": "kZvAnT2KFYZBxgUHXzV0kv3KccA=",
"path": "github.com/aws/aws-sdk-go/service/sts",
- "revision": "4742f4bc1f93f618a2298544cb5f44a5fe91652c",
- "revisionTime": "2016-10-27T22:14:16Z"
+ "revision": "34836244f5c4e694b99d1a81898af5559a3c9c6d",
+ "revisionTime": "2016-12-27T23:41:05Z"
},
{
"checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=",
@@ -211,28 +211,28 @@
"revisionTime": "2016-10-26T20:31:22Z"
},
{
- "checksumSHA1": "cVyhKIRI2gQrgpn5qrBeAqErmWM=",
+ "checksumSHA1": "w4ksomRMCZjbwnPupNvmTt5E6v8=",
"path": "github.com/go-ini/ini",
- "revision": "6e4869b434bd001f6983749881c7ead3545887d8",
- "revisionTime": "2016-08-27T06:11:18Z"
+ "revision": "6f66b0e091edb3c7b380f7c4f0f884274d550b67",
+ "revisionTime": "2016-12-21T12:03:40Z"
},
{
- "checksumSHA1": "++NbuL6/cpFR712EXGnWusURqC0=",
+ "checksumSHA1": "QD6LqgLz2JMxXqns8TaxtK9AuHs=",
"path": "github.com/go-sql-driver/mysql",
- "revision": "2a6c6079c7eff49a7e9d641e109d922f124a3e4c",
- "revisionTime": "2016-10-26T16:05:54Z"
+ "revision": "2e00b5cd70399450106cec6431c2e2ce3cae5034",
+ "revisionTime": "2016-12-24T12:10:19Z"
},
{
- "checksumSHA1": "SVXOQdpDBh0ihdZ5aIflgdA+Rpw=",
+ "checksumSHA1": "kBeNcaKk56FguvPSUCEaH6AxpRc=",
"path": "github.com/golang/protobuf/proto",
- "revision": "98fa357170587e470c5f27d3c3ea0947b71eb455",
- "revisionTime": "2016-10-12T20:53:35Z"
+ "revision": "8ee79997227bf9b34611aee7946ae64735e6fd93",
+ "revisionTime": "2016-11-17T03:31:26Z"
},
{
- "checksumSHA1": "TSS9EqiOoJcVUMNb46zVngVrAO8=",
+ "checksumSHA1": "YoBBqJws4yiZkuyRNkie90Qvc4o=",
"path": "github.com/google/go-github/github",
- "revision": "f7fcf6f52ff94adf1cc0ded41e7768d2ad729972",
- "revisionTime": "2016-10-28T15:10:40Z"
+ "revision": "4b026b323c199177ad865434c0266e8bcd5ec83d",
+ "revisionTime": "2016-12-27T18:43:37Z"
},
{
"checksumSHA1": "yyAzHoiVLu+xywYI2BDyRq6sOqE=",
@@ -241,10 +241,10 @@
"revisionTime": "2016-03-11T01:20:12Z"
},
{
- "checksumSHA1": "aOMWp7Ut7soV3u/pf5DgBOuZ+3E=",
+ "checksumSHA1": "V/53BpqgOkSDZCX6snQCAkdO2fM=",
"path": "github.com/googleapis/gax-go",
- "revision": "4f7da601ca02aa546c5ef911a0b6595fdadf4e18",
- "revisionTime": "2016-10-18T00:53:01Z"
+ "revision": "da06d194a00e19ce00d9011a13931c3f6f6887c7",
+ "revisionTime": "2016-11-07T00:24:06Z"
},
{
"checksumSHA1": "g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=",
@@ -253,16 +253,16 @@
"revisionTime": "2016-08-17T18:46:32Z"
},
{
- "checksumSHA1": "h6Nxp2ashaaMJYWE//+X83Hpi4A=",
+ "checksumSHA1": "2ieXR7YGRBDmQWVeUpjk5ay8iCE=",
"path": "github.com/gorilla/csrf",
- "revision": "fdae182b1882857ae6a246467084c30af79be824",
- "revisionTime": "2016-10-23T17:09:07Z"
+ "revision": "69581736821c33d85bbf378f42f6ad864dbd85de",
+ "revisionTime": "2016-11-22T16:45:00Z"
},
{
- "checksumSHA1": "wu3jCY1Ny2tPdsj36W6YH1Bm2No=",
+ "checksumSHA1": "I+GdpjQMP4tAYwF4CAklylRWeYI=",
"path": "github.com/gorilla/handlers",
- "revision": "e1b2144f2167de0e1042d1d35e5cba5119d4fb5d",
- "revisionTime": "2016-10-28T13:32:15Z"
+ "revision": "3a5767ca75ece5f7f1440b1d16975247f8d8b221",
+ "revisionTime": "2016-12-06T05:51:44Z"
},
{
"checksumSHA1": "urMd7A9QPAJYY0GZJL9qBhlUmD8=",
@@ -277,10 +277,10 @@
"revisionTime": "2016-10-03T05:16:01Z"
},
{
- "checksumSHA1": "f1dfF3vmO95G/hbwMYlFru/FfMo=",
+ "checksumSHA1": "vYfmVqC/UFKA66ZZdUJsnIcXR4E=",
"path": "github.com/gorilla/sessions",
- "revision": "ca9ada44574153444b00d3fd9c8559e4cc95f896",
- "revisionTime": "2016-09-22T14:58:04Z"
+ "revision": "83c8db3bdc9be789e57e3756ffbcffd2d7d40176",
+ "revisionTime": "2016-12-22T01:33:42Z"
},
{
"checksumSHA1": "cdOCt0Yb+hdErz8NAQqayxPmRsY=",
@@ -295,10 +295,10 @@
"revisionTime": "2016-04-07T17:41:26Z"
},
{
- "checksumSHA1": "KS9lmJV8Z7KHdtSIhbafQLU1hC4=",
+ "checksumSHA1": "4fgV2vzKKVyIlQ9nMyXIP5muq9E=",
"path": "github.com/hashicorp/go-multierror",
- "revision": "8c5f0ad9360406a3807ce7de6bc73269a91a6e51",
- "revisionTime": "2016-08-11T01:57:21Z"
+ "revision": "ed905158d87462226a13fe39ddf685ea65f1c11f",
+ "revisionTime": "2016-12-16T18:43:04Z"
},
{
"checksumSHA1": "A1PcINvF3UiwHRKn8UcgARgvGRs=",
@@ -307,76 +307,76 @@
"revisionTime": "2016-05-03T14:34:40Z"
},
{
- "checksumSHA1": "8OPDk+bKyRGJoKcS4QNw9F7dpE8=",
+ "checksumSHA1": "Ok3Csn6Voou7pQT6Dv2mkwpqFtw=",
"path": "github.com/hashicorp/hcl",
- "revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
- "revisionTime": "2016-10-28T23:32:40Z"
+ "revision": "80e628d796135357b3d2e33a985c666b9f35eee1",
+ "revisionTime": "2016-12-15T22:58:39Z"
},
{
"checksumSHA1": "XQmjDva9JCGGkIecOgwtBEMCJhU=",
"path": "github.com/hashicorp/hcl/hcl/ast",
- "revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
- "revisionTime": "2016-10-28T23:32:40Z"
+ "revision": "80e628d796135357b3d2e33a985c666b9f35eee1",
+ "revisionTime": "2016-12-15T22:58:39Z"
},
{
- "checksumSHA1": "croNloscHsjX87X+4/cKOURf1EY=",
+ "checksumSHA1": "vF6LLywGDoAaccTcAGrcY7mYvZc=",
"path": "github.com/hashicorp/hcl/hcl/parser",
- "revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
- "revisionTime": "2016-10-28T23:32:40Z"
+ "revision": "80e628d796135357b3d2e33a985c666b9f35eee1",
+ "revisionTime": "2016-12-15T22:58:39Z"
},
{
- "checksumSHA1": "lgR7PSAZ0RtvAc9OCtCnNsF/x8g=",
+ "checksumSHA1": "z6wdP4mRw4GVjShkNHDaOWkbxS0=",
"path": "github.com/hashicorp/hcl/hcl/scanner",
- "revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
- "revisionTime": "2016-10-28T23:32:40Z"
+ "revision": "80e628d796135357b3d2e33a985c666b9f35eee1",
+ "revisionTime": "2016-12-15T22:58:39Z"
},
{
- "checksumSHA1": "JlZmnzqdmFFyb1+2afLyR3BOE/8=",
+ "checksumSHA1": "oS3SCN9Wd6D8/LG0Yx1fu84a7gI=",
"path": "github.com/hashicorp/hcl/hcl/strconv",
- "revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
- "revisionTime": "2016-10-28T23:32:40Z"
+ "revision": "80e628d796135357b3d2e33a985c666b9f35eee1",
+ "revisionTime": "2016-12-15T22:58:39Z"
},
{
"checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=",
"path": "github.com/hashicorp/hcl/hcl/token",
- "revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
- "revisionTime": "2016-10-28T23:32:40Z"
+ "revision": "80e628d796135357b3d2e33a985c666b9f35eee1",
+ "revisionTime": "2016-12-15T22:58:39Z"
},
{
"checksumSHA1": "138aCV5n8n7tkGYMsMVQQnnLq+0=",
"path": "github.com/hashicorp/hcl/json/parser",
- "revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
- "revisionTime": "2016-10-28T23:32:40Z"
+ "revision": "80e628d796135357b3d2e33a985c666b9f35eee1",
+ "revisionTime": "2016-12-15T22:58:39Z"
},
{
"checksumSHA1": "YdvFsNOMSWMLnY6fcliWQa0O5Fw=",
"path": "github.com/hashicorp/hcl/json/scanner",
- "revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
- "revisionTime": "2016-10-28T23:32:40Z"
+ "revision": "80e628d796135357b3d2e33a985c666b9f35eee1",
+ "revisionTime": "2016-12-15T22:58:39Z"
},
{
"checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=",
"path": "github.com/hashicorp/hcl/json/token",
- "revision": "8fa153c5b4e9d1ccecda7075821ffc7c1f6d912b",
- "revisionTime": "2016-10-28T23:32:40Z"
+ "revision": "80e628d796135357b3d2e33a985c666b9f35eee1",
+ "revisionTime": "2016-12-15T22:58:39Z"
},
{
- "checksumSHA1": "2fkVZIzvxIGBLhSiVnkTgGiqpQ4=",
+ "checksumSHA1": "31yBeS6U3xm7VJ7ZvDxRgBxXP0A=",
"path": "github.com/hashicorp/vault/api",
- "revision": "e78065ec4ea0f022789e60f0c10c8a996f6d23d4",
- "revisionTime": "2016-10-30T17:09:56Z"
+ "revision": "e9519ebd2692409e85c6ad13a0dc2d2c3db488bb",
+ "revisionTime": "2016-12-28T17:41:50Z"
},
{
"checksumSHA1": "ft77GtqeZEeCXioGpF/s6DlGm/U=",
"path": "github.com/hashicorp/vault/helper/compressutil",
- "revision": "e78065ec4ea0f022789e60f0c10c8a996f6d23d4",
- "revisionTime": "2016-10-30T17:09:56Z"
+ "revision": "e9519ebd2692409e85c6ad13a0dc2d2c3db488bb",
+ "revisionTime": "2016-12-28T17:41:50Z"
},
{
"checksumSHA1": "yUiSTPf0QUuL2r/81sjuytqBoeQ=",
"path": "github.com/hashicorp/vault/helper/jsonutil",
- "revision": "e78065ec4ea0f022789e60f0c10c8a996f6d23d4",
- "revisionTime": "2016-10-30T17:09:56Z"
+ "revision": "e9519ebd2692409e85c6ad13a0dc2d2c3db488bb",
+ "revisionTime": "2016-12-28T17:41:50Z"
},
{
"checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=",
@@ -385,34 +385,28 @@
"revisionTime": "2016-08-03T19:07:31Z"
},
{
- "checksumSHA1": "KQhA4EQp4Ldwj9nJZnEURlE6aQw=",
- "path": "github.com/kr/fs",
- "revision": "2788f0dbd16903de03cb8186e5c7d97b69ad387b",
- "revisionTime": "2013-11-06T22:25:44Z"
- },
- {
- "checksumSHA1": "S6PDDQMYaKwLDIP/NsRYb4FRAqQ=",
+ "checksumSHA1": "506eXGmFfB7mgzbMcsdT/UAXJgI=",
"path": "github.com/magiconair/properties",
- "revision": "0723e352fa358f9322c938cc2dadda874e9151a9",
- "revisionTime": "2016-09-08T09:36:58Z"
+ "revision": "9c47895dc1ce54302908ab8a43385d1f5df2c11c",
+ "revisionTime": "2016-11-28T00:34:34Z"
},
{
- "checksumSHA1": "9FJUwn3EIgASVki+p8IHgWVC5vQ=",
+ "checksumSHA1": "9NOEwusDSfhREf0ix7ULW9f3hhw=",
"path": "github.com/mattn/go-sqlite3",
- "revision": "86681de00adef4f8040947b7d35f97000fc5a230",
- "revisionTime": "2016-10-28T14:22:18Z"
+ "revision": "2d44decb4941c9cdf72c22297b7890faf7da9bcb",
+ "revisionTime": "2016-12-15T04:15:57Z"
},
{
- "checksumSHA1": "AXacfEchaUqT5RGmPmMXsOWRhv8=",
+ "checksumSHA1": "V/quM7+em2ByJbWBLOsEwnY3j/Q=",
"path": "github.com/mitchellh/go-homedir",
- "revision": "756f7b183b7ab78acdbbee5c7f392838ed459dda",
- "revisionTime": "2016-06-21T17:42:43Z"
+ "revision": "b8bc1bf767474819792c23f32d8286a45736f1c6",
+ "revisionTime": "2016-12-03T19:45:07Z"
},
{
- "checksumSHA1": "UuXgD2dDojfS8AViUEe15gLIWZE=",
+ "checksumSHA1": "LAR/G/IY1GviHYkGAoi6kVXq1Jg=",
"path": "github.com/mitchellh/mapstructure",
- "revision": "f3009df150dadf309fdee4a54ed65c124afad715",
- "revisionTime": "2016-10-20T16:18:36Z"
+ "revision": "bfdb1a85537d60bc7e954e600c250219ea497417",
+ "revisionTime": "2016-12-11T22:23:15Z"
},
{
"checksumSHA1": "8Y05Pz7onrQPcVWW6JStSsYRh6E=",
@@ -421,10 +415,10 @@
"revisionTime": "2016-01-24T19:35:03Z"
},
{
- "checksumSHA1": "CtI2rBQw2rxHNIU+a0rcAf29Sco=",
+ "checksumSHA1": "8HO0U5Iblu0bhhmmp6kSGwHBBak=",
"path": "github.com/pelletier/go-toml",
- "revision": "45932ad32dfdd20826f5671da37a5f3ce9f26a8d",
- "revisionTime": "2016-09-20T07:07:15Z"
+ "revision": "017119f7a78a0b5fc0ea39ef6be09f03acf3345d",
+ "revisionTime": "2016-12-13T14:20:06Z"
},
{
"checksumSHA1": "F6aWtRaqjbiLqtReuNIgl08JqhI=",
@@ -439,12 +433,6 @@
"revisionTime": "2016-10-29T09:36:37Z"
},
{
- "checksumSHA1": "k9SlQdp/DTB72G/u4aNecX/fFIg=",
- "path": "github.com/pkg/sftp",
- "revision": "4d0e916071f68db74f8a73926335f809396d6b42",
- "revisionTime": "2016-09-30T22:07:58Z"
- },
- {
"checksumSHA1": "LuFv4/jlrmFNnDb/5SCSEPAM9vU=",
"path": "github.com/pmezard/go-difflib/difflib",
"revision": "792786c7400a136282c1664665ae0a8db921c6c2",
@@ -453,8 +441,8 @@
{
"checksumSHA1": "Qm7DuiE3Cn0+CelfV9tggSPNG0k=",
"path": "github.com/sethgrid/pester",
- "revision": "2a102734c18c43c74fd0664e06cd414cf9602b93",
- "revisionTime": "2016-09-16T18:34:45Z"
+ "revision": "ab3a58f3fbc32058b25cf9e416272cf0097be523",
+ "revisionTime": "2016-12-02T16:40:13Z"
},
{
"checksumSHA1": "wwt9oTMyWLdPZhkTipVJnZcamFU=",
@@ -469,28 +457,22 @@
"revisionTime": "2016-08-27T17:29:50Z"
},
{
- "checksumSHA1": "59wTbS4fE2282Q88NrBYImbFGbo=",
+ "checksumSHA1": "iGmLeUVcmiFgd4/iK2Tmx7WIoa4=",
"path": "github.com/spf13/afero",
- "revision": "52e4a6cfac46163658bd4f123c49b6ee7dc75f78",
- "revisionTime": "2016-09-19T21:01:14Z"
+ "revision": "90dd71edc4d0a8b3511dc12ea15d617d03be09e0",
+ "revisionTime": "2016-12-26T09:19:39Z"
},
{
"checksumSHA1": "u6B0SEgZ/TUEfIvF6w/HnFVQbII=",
"path": "github.com/spf13/afero/mem",
- "revision": "52e4a6cfac46163658bd4f123c49b6ee7dc75f78",
- "revisionTime": "2016-09-19T21:01:14Z"
- },
- {
- "checksumSHA1": "sLyAUiIT7V0DNVp6yBhW4Ms5BEs=",
- "path": "github.com/spf13/afero/sftp",
- "revision": "52e4a6cfac46163658bd4f123c49b6ee7dc75f78",
- "revisionTime": "2016-09-19T21:01:14Z"
+ "revision": "90dd71edc4d0a8b3511dc12ea15d617d03be09e0",
+ "revisionTime": "2016-12-26T09:19:39Z"
},
{
- "checksumSHA1": "M4qI7Ul0vf2uj3fF69DvRnwqfd0=",
+ "checksumSHA1": "WASQjQVYHiVtAOxvRbl6I23etGE=",
"path": "github.com/spf13/cast",
- "revision": "2580bc98dc0e62908119e4737030cc2fdfc45e4c",
- "revisionTime": "2016-09-26T08:42:49Z"
+ "revision": "56a7ecbeb18dde53c6db4bd96b541fd9741b8d44",
+ "revisionTime": "2016-12-24T17:25:03Z"
},
{
"checksumSHA1": "dkruahfhuLXXuyeCuRpsWlcRK+8=",
@@ -499,22 +481,22 @@
"revisionTime": "2016-03-01T12:00:06Z"
},
{
- "checksumSHA1": "GxPD7A0NjMDom1xte0mghkpzr0E=",
+ "checksumSHA1": "AxfxmmBpbjQoaQKXbERcEw9pv+U=",
"path": "github.com/spf13/pflag",
- "revision": "5ccb023bc27df288a957c5e994cd44fd19619465",
- "revisionTime": "2016-10-24T13:13:51Z"
+ "revision": "25f8b5b07aece3207895bf19f7ab517eb3b22a40",
+ "revisionTime": "2016-12-14T04:49:49Z"
},
{
- "checksumSHA1": "802GjFNHMmnFXEIkQ137ucUUacI=",
+ "checksumSHA1": "Ru5RGygreAp5OW9B6kO5Zawjt08=",
"path": "github.com/spf13/viper",
- "revision": "651d9d916abc3c3d6a91a12549495caba5edffd2",
- "revisionTime": "2016-10-29T21:33:52Z"
+ "revision": "5ed0fc31f7f453625df314d8e66b9791e8d13003",
+ "revisionTime": "2016-12-13T09:38:49Z"
},
{
- "checksumSHA1": "Q2V7Zs3diLmLfmfbiuLpSxETSuY=",
+ "checksumSHA1": "JXUVA1jky8ZX8w09p2t5KLs97Nc=",
"path": "github.com/stretchr/testify/assert",
- "revision": "976c720a22c8eb4eb6a0b4348ad85ad12491a506",
- "revisionTime": "2016-09-25T22:06:09Z"
+ "revision": "2402e8e7a02fc811447d11f881aa9746cdc57983",
+ "revisionTime": "2016-12-17T20:04:45Z"
},
{
"checksumSHA1": "Mqb+Cn1S2ZN/nOfn8FDaAiXTdhU=",
@@ -525,14 +507,14 @@
{
"checksumSHA1": "BS9oue0y6JjMzz3spKlMTVmxZxo=",
"path": "go4.org/wkfs",
- "revision": "399a9d7bfe85437346a5ec4ef0450fc2f1084e61",
- "revisionTime": "2016-09-23T17:06:11Z"
+ "revision": "09d86de304dc27e636298361bbfee4ac6ab04f21",
+ "revisionTime": "2016-11-18T21:00:15Z"
},
{
"checksumSHA1": "VcZWSieqrSxETQY2EP97rg4kLAw=",
"path": "go4.org/wkfs/gcs",
- "revision": "399a9d7bfe85437346a5ec4ef0450fc2f1084e61",
- "revisionTime": "2016-09-23T17:06:11Z"
+ "revision": "09d86de304dc27e636298361bbfee4ac6ab04f21",
+ "revisionTime": "2016-11-18T21:00:15Z"
},
{
"checksumSHA1": "TK1Yr8BbwionaaAvM+77lwAAx/8=",
@@ -549,344 +531,362 @@
{
"checksumSHA1": "dwOedwBJ1EIK9+S3t108Bx054Y8=",
"path": "golang.org/x/crypto/curve25519",
- "revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
- "revisionTime": "2016-10-28T17:07:08Z"
+ "revision": "f6b343c37ca80bfa8ea539da67a0b621f84fab1d",
+ "revisionTime": "2016-12-21T04:54:10Z"
},
{
"checksumSHA1": "wGb//LjBPNxYHqk+dcLo7BjPXK8=",
"path": "golang.org/x/crypto/ed25519",
- "revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
- "revisionTime": "2016-10-28T17:07:08Z"
+ "revision": "f6b343c37ca80bfa8ea539da67a0b621f84fab1d",
+ "revisionTime": "2016-12-21T04:54:10Z"
},
{
"checksumSHA1": "LXFcVx8I587SnWmKycSDEq9yvK8=",
"path": "golang.org/x/crypto/ed25519/internal/edwards25519",
- "revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
- "revisionTime": "2016-10-28T17:07:08Z"
+ "revision": "f6b343c37ca80bfa8ea539da67a0b621f84fab1d",
+ "revisionTime": "2016-12-21T04:54:10Z"
},
{
- "checksumSHA1": "LlElMHeTC34ng8eHzjvtUhAgrr8=",
+ "checksumSHA1": "RnJfaFwKpC0h06J2MZ8UZX2eohc=",
"path": "golang.org/x/crypto/ssh",
- "revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
- "revisionTime": "2016-10-28T17:07:08Z"
+ "revision": "f6b343c37ca80bfa8ea539da67a0b621f84fab1d",
+ "revisionTime": "2016-12-21T04:54:10Z"
},
{
"checksumSHA1": "SJ3Ma3Ozavxpbh1usZWBCnzMKIc=",
"path": "golang.org/x/crypto/ssh/agent",
- "revision": "b2fa06b6af4b7c9bfeb8569ab7b17f04550717bf",
- "revisionTime": "2016-10-28T17:07:08Z"
+ "revision": "f6b343c37ca80bfa8ea539da67a0b621f84fab1d",
+ "revisionTime": "2016-12-21T04:54:10Z"
},
{
- "checksumSHA1": "4hQNaJUg67lF/QcO0NKzUeqlaew=",
+ "checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=",
"path": "golang.org/x/net/context",
- "revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
- "revisionTime": "2016-10-30T07:22:20Z"
+ "revision": "45e771701b814666a7eb299e6c7a57d0b1799e91",
+ "revisionTime": "2016-12-15T19:42:18Z"
},
{
"checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=",
"path": "golang.org/x/net/context/ctxhttp",
- "revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
- "revisionTime": "2016-10-30T07:22:20Z"
+ "revision": "45e771701b814666a7eb299e6c7a57d0b1799e91",
+ "revisionTime": "2016-12-15T19:42:18Z"
},
{
- "checksumSHA1": "9h3b5EBNNgpvZbn6B+AbdRAmcIg=",
+ "checksumSHA1": "xeNBHVPCJv8N8DzrYJQbzRNRZbs=",
"path": "golang.org/x/net/http2",
- "revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
- "revisionTime": "2016-10-30T07:22:20Z"
+ "revision": "45e771701b814666a7eb299e6c7a57d0b1799e91",
+ "revisionTime": "2016-12-15T19:42:18Z"
},
{
"checksumSHA1": "HzuGD7AwgC0p1az1WAQnEFnEk98=",
"path": "golang.org/x/net/http2/hpack",
- "revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
- "revisionTime": "2016-10-30T07:22:20Z"
+ "revision": "45e771701b814666a7eb299e6c7a57d0b1799e91",
+ "revisionTime": "2016-12-15T19:42:18Z"
},
{
"checksumSHA1": "GIGmSrYACByf5JDIP9ByBZksY80=",
"path": "golang.org/x/net/idna",
- "revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
- "revisionTime": "2016-10-30T07:22:20Z"
+ "revision": "45e771701b814666a7eb299e6c7a57d0b1799e91",
+ "revisionTime": "2016-12-15T19:42:18Z"
},
{
"checksumSHA1": "/k7k6eJDkxXx6K9Zpo/OwNm58XM=",
"path": "golang.org/x/net/internal/timeseries",
- "revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
- "revisionTime": "2016-10-30T07:22:20Z"
+ "revision": "45e771701b814666a7eb299e6c7a57d0b1799e91",
+ "revisionTime": "2016-12-15T19:42:18Z"
},
{
"checksumSHA1": "3xyuaSNmClqG4YWC7g0isQIbUTc=",
"path": "golang.org/x/net/lex/httplex",
- "revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
- "revisionTime": "2016-10-30T07:22:20Z"
+ "revision": "45e771701b814666a7eb299e6c7a57d0b1799e91",
+ "revisionTime": "2016-12-15T19:42:18Z"
},
{
- "checksumSHA1": "4MMbG0LI3ghvWooRn36RmDrFIB0=",
+ "checksumSHA1": "P9qTIn8a6L6Q9wd1IJBCuhno1Q8=",
"path": "golang.org/x/net/trace",
- "revision": "4cfeeeb61ae8daa2a4b06572024786e56f23504c",
- "revisionTime": "2016-10-30T07:22:20Z"
+ "revision": "45e771701b814666a7eb299e6c7a57d0b1799e91",
+ "revisionTime": "2016-12-15T19:42:18Z"
},
{
"checksumSHA1": "XH7CgbL5Z8COUc+MKrYqS3FFosY=",
"path": "golang.org/x/oauth2",
- "revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
- "revisionTime": "2016-10-25T17:59:40Z"
+ "revision": "314dd2c0bf3ebd592ec0d20847d27e79d0dbe8dd",
+ "revisionTime": "2016-12-14T09:25:55Z"
},
{
"checksumSHA1": "Yokz/Wl4zeuOZG2ev8LuaLtMotE=",
"path": "golang.org/x/oauth2/github",
- "revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
- "revisionTime": "2016-10-25T17:59:40Z"
+ "revision": "314dd2c0bf3ebd592ec0d20847d27e79d0dbe8dd",
+ "revisionTime": "2016-12-14T09:25:55Z"
},
{
- "checksumSHA1": "92TBjKPPMEcAfNqc2xWF8fSfZMg=",
+ "checksumSHA1": "1TbkNRLtD4BFSBT1+dZ19RbGd/g=",
"path": "golang.org/x/oauth2/google",
- "revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
- "revisionTime": "2016-10-25T17:59:40Z"
+ "revision": "314dd2c0bf3ebd592ec0d20847d27e79d0dbe8dd",
+ "revisionTime": "2016-12-14T09:25:55Z"
},
{
- "checksumSHA1": "D3v/aqfB9swlaZcSksCoF+lbOqo=",
+ "checksumSHA1": "Wqm34oALxi3GkTSHIBa/EcfE37Y=",
"path": "golang.org/x/oauth2/internal",
- "revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
- "revisionTime": "2016-10-25T17:59:40Z"
+ "revision": "314dd2c0bf3ebd592ec0d20847d27e79d0dbe8dd",
+ "revisionTime": "2016-12-14T09:25:55Z"
},
{
"checksumSHA1": "huVltYnXdRFDJLgp/ZP9IALzG7g=",
"path": "golang.org/x/oauth2/jws",
- "revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
- "revisionTime": "2016-10-25T17:59:40Z"
+ "revision": "314dd2c0bf3ebd592ec0d20847d27e79d0dbe8dd",
+ "revisionTime": "2016-12-14T09:25:55Z"
},
{
- "checksumSHA1": "McqNj0/805YfYQJQGomeB0s+EcU=",
+ "checksumSHA1": "/eV4E08BY+f1ZikiR7OOMJAj3m0=",
"path": "golang.org/x/oauth2/jwt",
- "revision": "25b4fb1468cb89700c7c060cb99f30581a61f5e3",
- "revisionTime": "2016-10-25T17:59:40Z"
+ "revision": "314dd2c0bf3ebd592ec0d20847d27e79d0dbe8dd",
+ "revisionTime": "2016-12-14T09:25:55Z"
},
{
- "checksumSHA1": "aVgPDgwY3/t4J/JOw9H3FVMHqh0=",
+ "checksumSHA1": "uTQtOqR0ePMMcvuvAIksiIZxhqU=",
"path": "golang.org/x/sys/unix",
- "revision": "c200b10b5d5e122be351b67af224adc6128af5bf",
- "revisionTime": "2016-10-22T18:22:21Z"
+ "revision": "d75a52659825e75fff6158388dddc6a5b04f9ba5",
+ "revisionTime": "2016-12-14T18:38:57Z"
},
{
- "checksumSHA1": "B3d1je1cb+pDWpniE8CCuC6xJQI=",
+ "checksumSHA1": "kv3jbPJGCczHVQ7g51am1MxlD1c=",
"path": "golang.org/x/text/internal/gen",
- "revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
- "revisionTime": "2016-10-19T13:35:53Z"
+ "revision": "a49bea13b776691cb1b49873e5d8df96ec74831a",
+ "revisionTime": "2016-12-16T03:07:57Z"
},
{
"checksumSHA1": "47nwiUyVBY2RKoEGXmCSvusY4Js=",
"path": "golang.org/x/text/internal/triegen",
- "revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
- "revisionTime": "2016-10-19T13:35:53Z"
+ "revision": "a49bea13b776691cb1b49873e5d8df96ec74831a",
+ "revisionTime": "2016-12-16T03:07:57Z"
},
{
"checksumSHA1": "Yd5wMObzagIfCiKLpZbtBIrOUA4=",
"path": "golang.org/x/text/internal/ucd",
- "revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
- "revisionTime": "2016-10-19T13:35:53Z"
+ "revision": "a49bea13b776691cb1b49873e5d8df96ec74831a",
+ "revisionTime": "2016-12-16T03:07:57Z"
},
{
"checksumSHA1": "ziMb9+ANGRJSSIuxYdRbA+cDRBQ=",
"path": "golang.org/x/text/transform",
- "revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
- "revisionTime": "2016-10-19T13:35:53Z"
+ "revision": "a49bea13b776691cb1b49873e5d8df96ec74831a",
+ "revisionTime": "2016-12-16T03:07:57Z"
},
{
"checksumSHA1": "i14IZXKECObKRUNvTr7xivSL1IU=",
"path": "golang.org/x/text/unicode/cldr",
- "revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
- "revisionTime": "2016-10-19T13:35:53Z"
+ "revision": "a49bea13b776691cb1b49873e5d8df96ec74831a",
+ "revisionTime": "2016-12-16T03:07:57Z"
},
{
"checksumSHA1": "Vircurgvsnt4k26havmxPM67PUA=",
"path": "golang.org/x/text/unicode/norm",
- "revision": "a8b38433e35b65ba247bb267317037dee1b70cea",
- "revisionTime": "2016-10-19T13:35:53Z"
+ "revision": "a49bea13b776691cb1b49873e5d8df96ec74831a",
+ "revisionTime": "2016-12-16T03:07:57Z"
},
{
- "checksumSHA1": "TFmTijDJtWhpUwG86ER0n2Xp+/U=",
+ "checksumSHA1": "I1JSeU5OMapl+4s2VrnBkMon3Bw=",
"path": "google.golang.org/api/gensupport",
- "revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
- "revisionTime": "2016-10-27T16:31:49Z"
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
},
{
- "checksumSHA1": "okBeNPclS/nvIkPEH/tZa3cxDnM=",
+ "checksumSHA1": "BWKmb7kGYbfbvXO6E7tCpTh9zKE=",
"path": "google.golang.org/api/googleapi",
- "revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
- "revisionTime": "2016-10-27T16:31:49Z"
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
},
{
"checksumSHA1": "1K0JxrUfDqAB3MyRiU1LKjfHyf4=",
"path": "google.golang.org/api/googleapi/internal/uritemplates",
- "revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
- "revisionTime": "2016-10-27T16:31:49Z"
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
},
{
- "checksumSHA1": "kFHmY+4xvvjiLwhkuFg7ZIaGHmE=",
+ "checksumSHA1": "Mr2fXhMRzlQCgANFm91s536pG7E=",
+ "path": "google.golang.org/api/googleapi/transport",
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
+ },
+ {
+ "checksumSHA1": "nefIfmUzE2DJD4Tpodz+WHQLfeE=",
"path": "google.golang.org/api/internal",
- "revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
- "revisionTime": "2016-10-27T16:31:49Z"
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
},
{
"checksumSHA1": "slcGOTGSdukEPPSN81Q5WZGmhog=",
"path": "google.golang.org/api/iterator",
- "revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
- "revisionTime": "2016-10-27T16:31:49Z"
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
},
{
- "checksumSHA1": "2veoulDfsbD5iUCMhnXAnPNHDNM=",
+ "checksumSHA1": "H/eWxTktOkG5X/1JcIIwSi/DnFg=",
"path": "google.golang.org/api/oauth2/v2",
- "revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
- "revisionTime": "2016-10-27T16:31:49Z"
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
},
{
- "checksumSHA1": "Lc4yBmQAP9J3f8XYa9nH/D0FVhE=",
+ "checksumSHA1": "kEQSHsbkLxyIijEvp5W2SF3uqsU=",
"path": "google.golang.org/api/option",
- "revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
- "revisionTime": "2016-10-27T16:31:49Z"
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
},
{
- "checksumSHA1": "GLBNLgjn9wWpwkPPmjZaYk5auAI=",
+ "checksumSHA1": "xygm9BwoCg7vc0PPgAPdxNKJ38c=",
"path": "google.golang.org/api/storage/v1",
- "revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
- "revisionTime": "2016-10-27T16:31:49Z"
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
},
{
- "checksumSHA1": "Ms24PFj1glBAb69TtGuq9qeqcYA=",
+ "checksumSHA1": "zRukwFw3vpQ1QXjf0ryq7fLoSCY=",
"path": "google.golang.org/api/transport",
- "revision": "037d03010933c9fa8e57559c1b7faeec971688d1",
- "revisionTime": "2016-10-27T16:31:49Z"
+ "revision": "55146ba61254fdb1c26d65ff3c04bc1611ad73fb",
+ "revisionTime": "2016-12-12T20:09:13Z"
},
{
"checksumSHA1": "BYNXzv50n5HWU4OpKBw9JlrKIRI=",
"path": "google.golang.org/appengine",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
- "checksumSHA1": "vIZ71Qe81RHec1vNHpKG+CSx/es=",
+ "checksumSHA1": "gYHoPrPncGO926bN0jr1rzDxBQU=",
"path": "google.golang.org/appengine/internal",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "x6Thdfyasqd68dWZWqzWWeIfAfI=",
"path": "google.golang.org/appengine/internal/app_identity",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "TsNO8P0xUlLNyh3Ic/tzSp/fDWM=",
"path": "google.golang.org/appengine/internal/base",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "5QsV5oLGSfKZqTCVXP6NRz5T4Tw=",
"path": "google.golang.org/appengine/internal/datastore",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "Gep2T9zmVYV8qZfK2gu3zrmG6QE=",
"path": "google.golang.org/appengine/internal/log",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "eLZVX1EHLclFtQnjDIszsdyWRHo=",
"path": "google.golang.org/appengine/internal/modules",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "a1XY7rz3BieOVqVI2Et6rKiwQCk=",
"path": "google.golang.org/appengine/internal/remote_api",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "VA88sOHmVuIslrbHaWx9yEvjGjM=",
"path": "google.golang.org/appengine/internal/socket",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "QtAbHtHmDzcf6vOV9eqlCpKgjiw=",
"path": "google.golang.org/appengine/internal/urlfetch",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "MharNMGnQusRPdmBYXDxz2cCHPU=",
"path": "google.golang.org/appengine/socket",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
"checksumSHA1": "akOV9pYnCbcPA8wJUutSQVibdyg=",
"path": "google.golang.org/appengine/urlfetch",
- "revision": "46239ca616842c00f41b8cbc6bbf2bd6ffbfcdad",
- "revisionTime": "2016-10-25T16:43:32Z"
+ "revision": "08a149cfaee099e6ce4be01c0113a78c85ee1dee",
+ "revisionTime": "2016-12-16T21:42:33Z"
},
{
- "checksumSHA1": "r2jmzn/o6RN6PT9FRRtBeAfgNEk=",
+ "checksumSHA1": "oRN8PJBvyiuz7EpA3lkBo3zFNko=",
"path": "google.golang.org/grpc",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
"checksumSHA1": "08icuA15HRkdYCt6H+Cs90RPQsY=",
"path": "google.golang.org/grpc/codes",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
"checksumSHA1": "Vd1MU+Ojs7GeS6jE52vlxtXvIrI=",
"path": "google.golang.org/grpc/credentials",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
- "checksumSHA1": "5R1jXc9mAXky9tPEuWMikTrwCgE=",
+ "checksumSHA1": "bg3wIPzajKt3QZfTG70EPaxDtpk=",
"path": "google.golang.org/grpc/credentials/oauth",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
"checksumSHA1": "3Lt5hNAG8qJAYSsNghR5uA1zQns=",
"path": "google.golang.org/grpc/grpclog",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
"checksumSHA1": "T3Q0p8kzvXFnRkMaK/G8mCv6mc0=",
"path": "google.golang.org/grpc/internal",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
- "checksumSHA1": "P64GkSdsTZ8Nxop5HYqZJ6e+iHs=",
+ "checksumSHA1": "XXpD8+S3gLrfmCLOf+RbxblOQkU=",
"path": "google.golang.org/grpc/metadata",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
"checksumSHA1": "4GSUFhOQ0kdFlBH4D5OTeKy78z0=",
"path": "google.golang.org/grpc/naming",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
"checksumSHA1": "3RRoLeH6X2//7tVClOVzxW2bY+E=",
"path": "google.golang.org/grpc/peer",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
+ },
+ {
+ "checksumSHA1": "4zzPK1BUgnOcugiN2vnkhUal4ls=",
+ "path": "google.golang.org/grpc/stats",
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
+ },
+ {
+ "checksumSHA1": "N0TftT6/CyWqp6VRi2DqDx60+Fo=",
+ "path": "google.golang.org/grpc/tap",
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
- "checksumSHA1": "HQJrtiTtr5eiRsXQLut2R1Q9kuY=",
+ "checksumSHA1": "x+eyD2YGMYn973r3dQwGOMdi4mA=",
"path": "google.golang.org/grpc/transport",
- "revision": "396f8ba2a6b9039070f4ff97561f5e408828fccd",
- "revisionTime": "2016-10-26T23:20:24Z"
+ "revision": "9d682f9293b408c42d17c587d0e2a31237ac3f10",
+ "revisionTime": "2016-12-22T00:19:25Z"
},
{
"checksumSHA1": "1D8GzeoFGUs5FZOoyC2DpQg8c5Y=",