From 921818bca208f0c70e85ec670074cb3905cbbc82 Mon Sep 17 00:00:00 2001 From: Niall Sheridan Date: Sat, 27 Aug 2016 01:32:30 +0100 Subject: Update dependencies --- vendor/cloud.google.com/go/LICENSE | 202 + .../go/compute/metadata/metadata.go | 438 + vendor/cloud.google.com/go/internal/cloud.go | 64 + vendor/github.com/BurntSushi/toml/COMPATIBLE | 3 - vendor/github.com/BurntSushi/toml/COPYING | 14 - vendor/github.com/BurntSushi/toml/Makefile | 19 - vendor/github.com/BurntSushi/toml/README.md | 220 - vendor/github.com/BurntSushi/toml/decode.go | 509 - vendor/github.com/BurntSushi/toml/decode_meta.go | 121 - vendor/github.com/BurntSushi/toml/doc.go | 27 - vendor/github.com/BurntSushi/toml/encode.go | 568 - .../github.com/BurntSushi/toml/encoding_types.go | 19 - .../BurntSushi/toml/encoding_types_1.1.go | 18 - vendor/github.com/BurntSushi/toml/lex.go | 858 -- vendor/github.com/BurntSushi/toml/parse.go | 557 - vendor/github.com/BurntSushi/toml/session.vim | 1 - vendor/github.com/BurntSushi/toml/type_check.go | 91 - vendor/github.com/BurntSushi/toml/type_fields.go | 242 - .../github.com/aws/aws-sdk-go/aws/awserr/error.go | 4 +- .../github.com/aws/aws-sdk-go/aws/awserr/types.go | 2 +- .../github.com/aws/aws-sdk-go/aws/awsutil/copy.go | 10 +- .../github.com/aws/aws-sdk-go/aws/client/client.go | 23 +- vendor/github.com/aws/aws-sdk-go/aws/config.go | 129 +- .../aws-sdk-go/aws/credentials/static_provider.go | 11 +- .../credentials/stscreds/assume_role_provider.go | 161 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 8 +- .../github.com/aws/aws-sdk-go/aws/session/doc.go | 223 + .../aws/aws-sdk-go/aws/session/env_config.go | 188 + .../aws/aws-sdk-go/aws/session/session.go | 344 +- .../aws/aws-sdk-go/aws/session/shared_config.go | 294 + .../github.com/aws/aws-sdk-go/aws/signer/v4/v4.go | 2 +- vendor/github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/private/endpoints/endpoints.go | 19 +- .../aws-sdk-go/private/endpoints/endpoints.json | 3 + .../aws-sdk-go/private/endpoints/endpoints_map.go | 3 + .../aws/aws-sdk-go/private/protocol/query/build.go | 2 +- .../aws-sdk-go/private/protocol/restxml/restxml.go | 2 +- .../private/protocol/xml/xmlutil/build.go | 2 +- vendor/github.com/aws/aws-sdk-go/service/s3/api.go | 2 +- .../aws/aws-sdk-go/service/s3/host_style_bucket.go | 8 + .../aws/aws-sdk-go/service/s3/unmarshal_error.go | 36 +- .../github.com/aws/aws-sdk-go/service/sts/api.go | 1653 ++ .../aws/aws-sdk-go/service/sts/customizations.go | 12 + .../aws/aws-sdk-go/service/sts/service.go | 130 + vendor/github.com/davecgh/go-spew/LICENSE | 2 + vendor/github.com/go-ini/ini/Makefile | 12 + vendor/github.com/go-ini/ini/README.md | 167 +- vendor/github.com/go-ini/ini/README_ZH.md | 161 +- vendor/github.com/go-ini/ini/error.go | 32 + vendor/github.com/go-ini/ini/ini.go | 957 +- vendor/github.com/go-ini/ini/key.go | 633 + vendor/github.com/go-ini/ini/parser.go | 325 + vendor/github.com/go-ini/ini/section.go | 206 + vendor/github.com/go-ini/ini/struct.go | 186 +- vendor/github.com/go-sql-driver/mysql/packets.go | 19 +- .../github.com/golang/protobuf/proto/extensions.go | 3 + vendor/github.com/golang/protobuf/proto/lib.go | 2 +- .../golang/protobuf/proto/text_parser.go | 11 + .../go-github/github/activity_notifications.go | 21 +- .../google/go-github/github/authorizations.go | 4 +- vendor/github.com/google/go-github/github/doc.go | 4 +- .../google/go-github/github/event_types.go | 5 +- .../github.com/google/go-github/github/github.go | 27 +- .../go-github/github/migrations_source_import.go | 2 +- vendor/github.com/google/go-github/github/orgs.go | 3 + vendor/github.com/google/go-github/github/pulls.go | 3 +- vendor/github.com/google/go-github/github/repos.go | 2 + .../google/go-github/github/repos_pages.go | 26 + .../google/go-github/github/repos_traffic.go | 173 + .../github.com/google/go-github/github/search.go | 20 +- vendor/github.com/gorilla/context/README.md | 3 + vendor/github.com/gorilla/context/doc.go | 6 + vendor/github.com/gorilla/csrf/context.go | 4 + vendor/github.com/gorilla/csrf/context_legacy.go | 4 + vendor/github.com/gorilla/csrf/csrf.go | 15 +- vendor/github.com/gorilla/csrf/helpers.go | 6 +- .../github.com/gorilla/handlers/proxy_headers.go | 23 +- vendor/github.com/gorilla/mux/README.md | 65 +- vendor/github.com/gorilla/mux/doc.go | 25 + vendor/github.com/gorilla/mux/mux.go | 29 +- vendor/github.com/gorilla/mux/regexp.go | 11 +- vendor/github.com/gorilla/sessions/README.md | 2 +- vendor/github.com/hashicorp/go-multierror/Makefile | 31 + .../github.com/hashicorp/go-multierror/README.md | 6 + vendor/github.com/hashicorp/hcl/decoder.go | 2 +- vendor/github.com/kr/fs/LICENSE | 27 + vendor/github.com/kr/fs/Readme | 3 + vendor/github.com/kr/fs/filesystem.go | 36 + vendor/github.com/kr/fs/walk.go | 95 + vendor/github.com/mattn/go-sqlite3/callback.go | 4 + .../github.com/mattn/go-sqlite3/sqlite3-binding.c | 15321 +++++++++++++++---- .../github.com/mattn/go-sqlite3/sqlite3-binding.h | 1701 +- .../mattn/go-sqlite3/sqlite3_load_extension.go | 4 + vendor/github.com/mattn/go-sqlite3/sqlite3ext.h | 32 +- .../mitchellh/mapstructure/mapstructure.go | 19 + .../github.com/pelletier/go-buffruneio/README.md | 62 + .../pelletier/go-buffruneio/buffruneio.go | 110 + vendor/github.com/pelletier/go-toml/LICENSE | 22 + vendor/github.com/pelletier/go-toml/README.md | 120 + vendor/github.com/pelletier/go-toml/clean.sh | 6 + vendor/github.com/pelletier/go-toml/doc.go | 250 + .../github.com/pelletier/go-toml/example-crlf.toml | 29 + vendor/github.com/pelletier/go-toml/example.toml | 29 + vendor/github.com/pelletier/go-toml/keysparsing.go | 81 + vendor/github.com/pelletier/go-toml/lexer.go | 655 + vendor/github.com/pelletier/go-toml/match.go | 234 + vendor/github.com/pelletier/go-toml/parser.go | 392 + vendor/github.com/pelletier/go-toml/position.go | 29 + vendor/github.com/pelletier/go-toml/query.go | 153 + vendor/github.com/pelletier/go-toml/querylexer.go | 356 + vendor/github.com/pelletier/go-toml/queryparser.go | 275 + vendor/github.com/pelletier/go-toml/test.sh | 79 + vendor/github.com/pelletier/go-toml/token.go | 139 + vendor/github.com/pelletier/go-toml/toml.go | 285 + .../pelletier/go-toml/tomltree_conversions.go | 146 + vendor/github.com/pkg/errors/README.md | 2 + vendor/github.com/pkg/errors/errors.go | 134 +- vendor/github.com/pkg/errors/stack.go | 13 + vendor/github.com/pkg/sftp/CONTRIBUTORS | 2 + vendor/github.com/pkg/sftp/LICENSE | 9 + vendor/github.com/pkg/sftp/README.md | 27 + vendor/github.com/pkg/sftp/attrs.go | 237 + vendor/github.com/pkg/sftp/attrs_stubs.go | 11 + vendor/github.com/pkg/sftp/attrs_unix.go | 17 + vendor/github.com/pkg/sftp/client.go | 1133 ++ vendor/github.com/pkg/sftp/conn.go | 122 + vendor/github.com/pkg/sftp/debug.go | 9 + vendor/github.com/pkg/sftp/packet.go | 901 ++ vendor/github.com/pkg/sftp/release.go | 5 + vendor/github.com/pkg/sftp/server.go | 607 + .../github.com/pkg/sftp/server_statvfs_darwin.go | 21 + vendor/github.com/pkg/sftp/server_statvfs_impl.go | 25 + vendor/github.com/pkg/sftp/server_statvfs_linux.go | 23 + vendor/github.com/pkg/sftp/server_statvfs_stubs.go | 11 + vendor/github.com/pkg/sftp/server_stubs.go | 12 + vendor/github.com/pkg/sftp/server_unix.go | 143 + vendor/github.com/pkg/sftp/sftp.go | 217 + .../pmezard/go-difflib/difflib/difflib.go | 54 +- vendor/github.com/spf13/afero/LICENSE.txt | 174 + vendor/github.com/spf13/afero/README.md | 449 + vendor/github.com/spf13/afero/afero.go | 108 + vendor/github.com/spf13/afero/appveyor.yml | 15 + vendor/github.com/spf13/afero/basepath.go | 145 + vendor/github.com/spf13/afero/cacheOnReadFs.go | 296 + vendor/github.com/spf13/afero/const_bsds.go | 22 + vendor/github.com/spf13/afero/const_win_unix.go | 25 + vendor/github.com/spf13/afero/copyOnWriteFs.go | 253 + vendor/github.com/spf13/afero/httpFs.go | 110 + vendor/github.com/spf13/afero/ioutil.go | 230 + vendor/github.com/spf13/afero/mem/dir.go | 37 + vendor/github.com/spf13/afero/mem/dirmap.go | 43 + vendor/github.com/spf13/afero/mem/file.go | 285 + vendor/github.com/spf13/afero/memmap.go | 349 + vendor/github.com/spf13/afero/memradix.go | 14 + vendor/github.com/spf13/afero/os.go | 94 + vendor/github.com/spf13/afero/path.go | 108 + vendor/github.com/spf13/afero/readonlyfs.go | 70 + vendor/github.com/spf13/afero/regexpfs.go | 214 + vendor/github.com/spf13/afero/sftp.go | 128 + vendor/github.com/spf13/afero/sftp/file.go | 95 + vendor/github.com/spf13/afero/sftp_test_go | 286 + vendor/github.com/spf13/afero/unionFile.go | 274 + vendor/github.com/spf13/afero/util.go | 331 + vendor/github.com/spf13/cast/caste.go | 28 +- vendor/github.com/spf13/pflag/flag.go | 51 +- vendor/github.com/spf13/pflag/string_slice.go | 23 +- vendor/github.com/spf13/viper/README.md | 2 +- vendor/github.com/spf13/viper/util.go | 11 +- vendor/github.com/spf13/viper/viper.go | 32 +- vendor/golang.org/x/crypto/ssh/channel.go | 2 + vendor/golang.org/x/crypto/ssh/connection.go | 1 - vendor/golang.org/x/crypto/ssh/server.go | 1 - vendor/golang.org/x/net/http2/client_conn_pool.go | 7 +- vendor/golang.org/x/net/http2/errors.go | 8 + vendor/golang.org/x/net/http2/frame.go | 44 +- vendor/golang.org/x/net/http2/http2.go | 13 +- vendor/golang.org/x/net/http2/server.go | 79 +- vendor/golang.org/x/net/http2/transport.go | 374 +- vendor/golang.org/x/oauth2/README.md | 1 + vendor/golang.org/x/oauth2/google/default.go | 2 +- vendor/golang.org/x/oauth2/google/google.go | 2 +- vendor/golang.org/x/oauth2/jws/jws.go | 44 +- vendor/golang.org/x/oauth2/oauth2.go | 6 +- vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + vendor/golang.org/x/text/internal/gen/code.go | 338 + vendor/golang.org/x/text/internal/gen/gen.go | 226 + .../golang.org/x/text/internal/triegen/compact.go | 58 + vendor/golang.org/x/text/internal/triegen/print.go | 251 + .../golang.org/x/text/internal/triegen/triegen.go | 494 + vendor/golang.org/x/text/internal/ucd/ucd.go | 363 + vendor/golang.org/x/text/transform/transform.go | 661 + vendor/golang.org/x/text/unicode/cldr/base.go | 110 + vendor/golang.org/x/text/unicode/cldr/cldr.go | 130 + vendor/golang.org/x/text/unicode/cldr/collate.go | 359 + vendor/golang.org/x/text/unicode/cldr/decode.go | 171 + vendor/golang.org/x/text/unicode/cldr/makexml.go | 400 + vendor/golang.org/x/text/unicode/cldr/resolve.go | 602 + vendor/golang.org/x/text/unicode/cldr/slice.go | 144 + vendor/golang.org/x/text/unicode/cldr/xml.go | 1440 ++ .../golang.org/x/text/unicode/norm/composition.go | 514 + vendor/golang.org/x/text/unicode/norm/forminfo.go | 256 + vendor/golang.org/x/text/unicode/norm/input.go | 105 + vendor/golang.org/x/text/unicode/norm/iter.go | 450 + .../golang.org/x/text/unicode/norm/maketables.go | 978 ++ vendor/golang.org/x/text/unicode/norm/normalize.go | 576 + .../golang.org/x/text/unicode/norm/readwriter.go | 126 + vendor/golang.org/x/text/unicode/norm/tables.go | 7627 +++++++++ vendor/golang.org/x/text/unicode/norm/transform.go | 88 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + vendor/golang.org/x/text/unicode/norm/triegen.go | 117 + .../google.golang.org/api/gensupport/resumable.go | 5 +- vendor/google.golang.org/api/gensupport/send.go | 55 + vendor/google.golang.org/api/internal/pool.go | 59 + .../google.golang.org/api/oauth2/v2/oauth2-gen.go | 20 +- vendor/google.golang.org/api/option/option.go | 14 + .../api/storage/v1/storage-api.json | 42 +- .../api/storage/v1/storage-gen.go | 265 +- vendor/google.golang.org/appengine/appengine.go | 36 + vendor/google.golang.org/appengine/appengine_vm.go | 36 - .../appengine/internal/internal.go | 34 - .../google.golang.org/appengine/internal/main.go | 15 + .../appengine/internal/main_vm.go | 44 + vendor/google.golang.org/cloud/CONTRIBUTORS | 1 + vendor/google.golang.org/cloud/README.md | 21 +- .../cloud/compute/metadata/metadata.go | 3 +- vendor/google.golang.org/cloud/option.go | 7 + vendor/google.golang.org/cloud/storage/storage.go | 251 +- vendor/google.golang.org/grpc/README.md | 2 +- vendor/google.golang.org/grpc/call.go | 29 +- vendor/google.golang.org/grpc/clientconn.go | 359 +- .../grpc/credentials/credentials.go | 60 +- .../grpc/credentials/credentials_util_go17.go | 76 + .../grpc/credentials/credentials_util_pre_go17.go | 74 + vendor/google.golang.org/grpc/metadata/metadata.go | 14 +- vendor/google.golang.org/grpc/rpc_util.go | 18 +- vendor/google.golang.org/grpc/server.go | 100 +- vendor/google.golang.org/grpc/stream.go | 115 +- vendor/google.golang.org/grpc/transport/control.go | 5 + vendor/google.golang.org/grpc/transport/go16.go | 46 + vendor/google.golang.org/grpc/transport/go17.go | 46 + .../grpc/transport/handler_server.go | 10 +- .../grpc/transport/http2_client.go | 237 +- .../grpc/transport/http2_server.go | 76 +- .../google.golang.org/grpc/transport/http_util.go | 85 +- .../google.golang.org/grpc/transport/pre_go16.go | 51 + .../google.golang.org/grpc/transport/transport.go | 93 +- vendor/gopkg.in/mgo.v2/Makefile | 4 +- vendor/gopkg.in/mgo.v2/bson/bson.go | 23 +- vendor/gopkg.in/mgo.v2/bson/decimal.go | 310 + vendor/gopkg.in/mgo.v2/bson/decode.go | 5 + vendor/gopkg.in/mgo.v2/bson/encode.go | 71 +- vendor/gopkg.in/mgo.v2/bson/json.go | 380 + vendor/gopkg.in/mgo.v2/cluster.go | 5 +- vendor/gopkg.in/mgo.v2/gridfs.go | 2 +- vendor/gopkg.in/mgo.v2/internal/json/LICENSE | 27 + vendor/gopkg.in/mgo.v2/internal/json/decode.go | 1685 ++ vendor/gopkg.in/mgo.v2/internal/json/encode.go | 1256 ++ vendor/gopkg.in/mgo.v2/internal/json/extension.go | 95 + vendor/gopkg.in/mgo.v2/internal/json/fold.go | 143 + vendor/gopkg.in/mgo.v2/internal/json/indent.go | 141 + vendor/gopkg.in/mgo.v2/internal/json/scanner.go | 697 + vendor/gopkg.in/mgo.v2/internal/json/stream.go | 510 + vendor/gopkg.in/mgo.v2/internal/json/tags.go | 44 + .../gopkg.in/mgo.v2/internal/sasl/sasl_windows.c | 20 +- .../gopkg.in/mgo.v2/internal/sasl/sasl_windows.go | 12 +- .../gopkg.in/mgo.v2/internal/sasl/sasl_windows.h | 2 +- vendor/gopkg.in/mgo.v2/server.go | 11 + vendor/gopkg.in/mgo.v2/session.go | 137 +- vendor/vendor.json | 680 +- 270 files changed, 56004 insertions(+), 9144 deletions(-) create mode 100644 vendor/cloud.google.com/go/LICENSE create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 vendor/cloud.google.com/go/internal/cloud.go delete mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE delete mode 100644 vendor/github.com/BurntSushi/toml/COPYING delete mode 100644 vendor/github.com/BurntSushi/toml/Makefile delete mode 100644 vendor/github.com/BurntSushi/toml/README.md delete mode 100644 vendor/github.com/BurntSushi/toml/decode.go delete mode 100644 vendor/github.com/BurntSushi/toml/decode_meta.go delete mode 100644 vendor/github.com/BurntSushi/toml/doc.go delete mode 100644 vendor/github.com/BurntSushi/toml/encode.go delete mode 100644 vendor/github.com/BurntSushi/toml/encoding_types.go delete mode 100644 vendor/github.com/BurntSushi/toml/encoding_types_1.1.go delete mode 100644 vendor/github.com/BurntSushi/toml/lex.go delete mode 100644 vendor/github.com/BurntSushi/toml/parse.go delete mode 100644 vendor/github.com/BurntSushi/toml/session.vim delete mode 100644 vendor/github.com/BurntSushi/toml/type_check.go delete mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/go-ini/ini/Makefile create mode 100644 vendor/github.com/go-ini/ini/error.go create mode 100644 vendor/github.com/go-ini/ini/key.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/section.go create mode 100644 vendor/github.com/google/go-github/github/repos_traffic.go create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/kr/fs/LICENSE create mode 100644 vendor/github.com/kr/fs/Readme create mode 100644 vendor/github.com/kr/fs/filesystem.go create mode 100644 vendor/github.com/kr/fs/walk.go create mode 100644 vendor/github.com/pelletier/go-buffruneio/README.md create mode 100644 vendor/github.com/pelletier/go-buffruneio/buffruneio.go create mode 100644 vendor/github.com/pelletier/go-toml/LICENSE create mode 100644 vendor/github.com/pelletier/go-toml/README.md create mode 100755 vendor/github.com/pelletier/go-toml/clean.sh create mode 100644 vendor/github.com/pelletier/go-toml/doc.go create mode 100644 vendor/github.com/pelletier/go-toml/example-crlf.toml create mode 100644 vendor/github.com/pelletier/go-toml/example.toml create mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go create mode 100644 vendor/github.com/pelletier/go-toml/lexer.go create mode 100644 vendor/github.com/pelletier/go-toml/match.go create mode 100644 vendor/github.com/pelletier/go-toml/parser.go create mode 100644 vendor/github.com/pelletier/go-toml/position.go create mode 100644 vendor/github.com/pelletier/go-toml/query.go create mode 100644 vendor/github.com/pelletier/go-toml/querylexer.go create mode 100644 vendor/github.com/pelletier/go-toml/queryparser.go create mode 100755 vendor/github.com/pelletier/go-toml/test.sh create mode 100644 vendor/github.com/pelletier/go-toml/token.go create mode 100644 vendor/github.com/pelletier/go-toml/toml.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_conversions.go create mode 100644 vendor/github.com/pkg/sftp/CONTRIBUTORS create mode 100644 vendor/github.com/pkg/sftp/LICENSE create mode 100644 vendor/github.com/pkg/sftp/README.md create mode 100644 vendor/github.com/pkg/sftp/attrs.go create mode 100644 vendor/github.com/pkg/sftp/attrs_stubs.go create mode 100644 vendor/github.com/pkg/sftp/attrs_unix.go create mode 100644 vendor/github.com/pkg/sftp/client.go create mode 100644 vendor/github.com/pkg/sftp/conn.go create mode 100644 vendor/github.com/pkg/sftp/debug.go create mode 100644 vendor/github.com/pkg/sftp/packet.go create mode 100644 vendor/github.com/pkg/sftp/release.go create mode 100644 vendor/github.com/pkg/sftp/server.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_darwin.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_impl.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_linux.go create mode 100644 vendor/github.com/pkg/sftp/server_statvfs_stubs.go create mode 100644 vendor/github.com/pkg/sftp/server_stubs.go create mode 100644 vendor/github.com/pkg/sftp/server_unix.go create mode 100644 vendor/github.com/pkg/sftp/sftp.go create mode 100644 vendor/github.com/spf13/afero/LICENSE.txt create mode 100644 vendor/github.com/spf13/afero/README.md create mode 100644 vendor/github.com/spf13/afero/afero.go create mode 100644 vendor/github.com/spf13/afero/appveyor.yml create mode 100644 vendor/github.com/spf13/afero/basepath.go create mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go create mode 100644 vendor/github.com/spf13/afero/const_bsds.go create mode 100644 vendor/github.com/spf13/afero/const_win_unix.go create mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go create mode 100644 vendor/github.com/spf13/afero/httpFs.go create mode 100644 vendor/github.com/spf13/afero/ioutil.go create mode 100644 vendor/github.com/spf13/afero/mem/dir.go create mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go create mode 100644 vendor/github.com/spf13/afero/mem/file.go create mode 100644 vendor/github.com/spf13/afero/memmap.go create mode 100644 vendor/github.com/spf13/afero/memradix.go create mode 100644 vendor/github.com/spf13/afero/os.go create mode 100644 vendor/github.com/spf13/afero/path.go create mode 100644 vendor/github.com/spf13/afero/readonlyfs.go create mode 100644 vendor/github.com/spf13/afero/regexpfs.go create mode 100644 vendor/github.com/spf13/afero/sftp.go create mode 100644 vendor/github.com/spf13/afero/sftp/file.go create mode 100644 vendor/github.com/spf13/afero/sftp_test_go create mode 100644 vendor/github.com/spf13/afero/unionFile.go create mode 100644 vendor/github.com/spf13/afero/util.go create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/internal/gen/code.go create mode 100644 vendor/golang.org/x/text/internal/gen/gen.go create mode 100644 vendor/golang.org/x/text/internal/triegen/compact.go create mode 100644 vendor/golang.org/x/text/internal/triegen/print.go create mode 100644 vendor/golang.org/x/text/internal/triegen/triegen.go create mode 100644 vendor/golang.org/x/text/internal/ucd/ucd.go create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/base.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/cldr.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/collate.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/decode.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/makexml.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/resolve.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/slice.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/xml.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go create mode 100644 vendor/google.golang.org/api/gensupport/send.go create mode 100644 vendor/google.golang.org/api/internal/pool.go create mode 100644 vendor/google.golang.org/appengine/internal/main.go create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_go17.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go create mode 100644 vendor/google.golang.org/grpc/transport/go16.go create mode 100644 vendor/google.golang.org/grpc/transport/go17.go create mode 100644 vendor/google.golang.org/grpc/transport/pre_go16.go create mode 100644 vendor/gopkg.in/mgo.v2/bson/decimal.go create mode 100644 vendor/gopkg.in/mgo.v2/bson/json.go create mode 100644 vendor/gopkg.in/mgo.v2/internal/json/LICENSE create mode 100644 vendor/gopkg.in/mgo.v2/internal/json/decode.go create mode 100644 vendor/gopkg.in/mgo.v2/internal/json/encode.go create mode 100644 vendor/gopkg.in/mgo.v2/internal/json/extension.go create mode 100644 vendor/gopkg.in/mgo.v2/internal/json/fold.go create mode 100644 vendor/gopkg.in/mgo.v2/internal/json/indent.go create mode 100644 vendor/gopkg.in/mgo.v2/internal/json/scanner.go create mode 100644 vendor/gopkg.in/mgo.v2/internal/json/stream.go create mode 100644 vendor/gopkg.in/mgo.v2/internal/json/tags.go (limited to 'vendor') diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 0000000..a4c5efd --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 0000000..b5d6091 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,438 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" + + "cloud.google.com/go/internal" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }, + } + subscribeClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/gcloud-golang/issues/194 + go func() { + res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go new file mode 100644 index 0000000..8e0c8f8 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/cloud.go @@ -0,0 +1,64 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" +) + +const userAgent = "gcloud-golang/0.1" + +// Transport is an http.RoundTripper that appends Google Cloud client's +// user-agent to the original request's user-agent header. +type Transport struct { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 21e0938..0000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 5a8e332..0000000 --- a/vendor/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848..0000000 --- a/vendor/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md deleted file mode 100644 index 5a5df63..0000000 --- a/vendor/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,220 +0,0 @@ -## TOML parser and encoder for Go with reflection - -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/mojombo/toml - -Compatible with TOML version -[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md) - -Documentation: http://godoc.org/github.com/BurntSushi/toml - -Installation: - -```bash -go get github.com/BurntSushi/toml -``` - -Try the toml validator: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) - - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. - -### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which could be defined in Go as: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` -} -``` - -And then decoded with: - -```go -var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} -``` - -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Here's an example that automatically parses duration strings into -`time.Duration` values: - -```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} -``` - -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: - -```go -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_examples/example.{go,toml}`. - diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index b0fd51d..0000000 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,509 +0,0 @@ -package toml - -import ( - "fmt" - "io" - "io/ioutil" - "math" - "reflect" - "strings" - "time" -) - -func e(format string, args ...interface{}) error { - return fmt.Errorf("toml: "+format, args...) -} - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) - return err -} - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. -// -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. -// -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. -// -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. -type Primitive struct { - undecoded interface{} - context Key -} - -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. -// -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) -// -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. -// -// TOML datetimes correspond to Go `time.Time` values. -// -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. -// -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. -// -// Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. -// -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) - } - if rv.IsNil() { - return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) - } - p, err := parse(data) - if err != nil { - return MetaData{}, err - } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, - } - return md, md.unify(p.mapping, indirect(rv)) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } - } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { - return md.unifyText(data, v) - } - // BUG(burntsushi) - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). - - k := rv.Kind() - - // laziness - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("unsupported type %s", rv.Type()) - } - return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: - return md.unifyFloat64(data, rv) - } - return e("unsupported type %s", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if mapping == nil { - return nil - } - return e("type mismatch for %s: expected table but found %T", - rv.Type().String(), mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true - md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - // Bad user! No soup for you! - return e("cannot write unexported field %s.%s", - rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - if tmap == nil { - return nil - } - return badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true - md.context = append(md.context, k) - - rvkey := indirect(reflect.New(rv.Type().Key())) - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey.SetString(k) - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return badtype("slice", data) - } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - if !datav.IsValid() { - return nil - } - return badtype("slice", data) - } - n := datav.Len() - if rv.IsNil() || rv.Cap() < n { - rv.Set(reflect.MakeSlice(rv.Type(), n, n)) - } - rv.SetLen(n) - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) - return nil - } - return badtype("time.Time", data) -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - if num, ok := data.(float64); ok { - switch rv.Kind() { - case reflect.Float32: - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - return badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("value %d is out of range for int8", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("value %d is out of range for int16", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("value %d is out of range for int32", num) - } - } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("value %d is out of range for uint8", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("value %d is out of range for uint16", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("value %d is out of range for uint32", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") - } - return nil - } - return badtype("integer", data) -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanSet() { - pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - if _, ok := rv.Interface().(TextUnmarshaler); ok { - return true - } - return false -} - -func badtype(expected string, data interface{}) error { - return e("cannot load TOML value of type %T into a Go %s", data, expected) -} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go deleted file mode 100644 index b9914a6..0000000 --- a/vendor/github.com/BurntSushi/toml/decode_meta.go +++ /dev/null @@ -1,121 +0,0 @@ -package toml - -import "strings" - -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. -type MetaData struct { - mapping map[string]interface{} - types map[string]tomlType - keys []Key - decoded map[string]bool - context Key // Used only during decoding. -} - -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., -// -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") -// -// IsDefined will return false if an empty key given. Keys are case sensitive. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() - } - return "" -} - -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } - return k[i] -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. -// -// The list will have the same order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a Primitive value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if !md.decoded[key.String()] { - undecoded = append(undecoded, key) - } - } - return undecoded -} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index fe26800..0000000 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/mojombo/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ -package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index 0f2558b..0000000 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,568 +0,0 @@ -package toml - -import ( - "bufio" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayMixedElementTypes = errors.New( - "toml: cannot encode array with mixed element types") - errArrayNilElement = errors.New( - "toml: cannot encode array with nil element") - errNonString = errors.New( - "toml: cannot encode a map with non-string key type") - errAnonNonStruct = errors.New( - "toml: cannot encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "toml: TOML array element cannot contain a table") - errNoKey = errors.New( - "toml: top-level values must be Go maps or structs") - errAnything = errors.New("") // used in testing -) - -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", - "\"", "\\\"", - "\\", "\\\\", -) - -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. -// -// The indentation level can be controlled with the Indent field. -type Encoder struct { - // A single indentation level. By default it is two spaces. - Indent string - - // hasWritten is whether we have written any output to w yet. - hasWritten bool - w *bufio.Writer -} - -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.keyEqElement(key, rv) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - panic(e("unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element (primitives and -// arrays). -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) - return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { - encPanic(err) - } else { - enc.writeQuoted(string(s)) - } - return - } - switch rv.Kind() { - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) - case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Interface: - enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) - default: - panic(e("unexpected primitive type: %s", rv.Kind())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := rv.Index(i) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) - if isNil(trv) { - continue - } - panicIfInvalidKey(key) - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - enc.eMapOrStruct(key, trv) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) - if len(key) == 1 { - // Output an extra new line between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - } - enc.eMapOrStruct(key, rv) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { - case reflect.Map: - enc.eMap(key, rv) - case reflect.Struct: - enc.eStruct(key, rv) - default: - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string) { - sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. - continue - } - enc.encode(key.add(mapKey), mrv) - } - } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that - // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - // skip unexported fields - if f.PkgPath != "" && !f.Anonymous { - continue - } - frv := rv.Field(i) - if f.Anonymous { - t := f.Type - switch t.Kind() { - case reflect.Struct: - // Treat anonymous struct fields with - // tag names as though they are not - // anonymous, like encoding/json does. - if getOptions(f.Tag).name == "" { - addFields(t, frv, f.Index) - continue - } - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && - getOptions(f.Tag).name == "" { - if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), f.Index) - } - continue - } - // Fall through to the normal field encoding logic below - // for non-struct anonymous fields. - } - } - - if typeIsHash(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - addFields(rt, rv, nil) - - var writeFields = func(fields [][]int) { - for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. - continue - } - - opts := getOptions(sft.Tag) - if opts.skip { - continue - } - keyName := sft.Name - if opts.name != "" { - keyName = opts.name - } - if opts.omitempty && isEmpty(sf) { - continue - } - if opts.omitzero && isZero(sf) { - continue - } - - enc.encode(key.add(keyName), sf) - } - } - writeFields(fieldsDirect) - writeFields(fieldsSub) -} - -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { - return tomlArrayHash - } - return tomlArray - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } - default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) - } -} - -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) - } - - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: - encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) - } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) - } - } - return firstType -} - -type tagOptions struct { - skip bool // "-" - name string - omitempty bool - omitzero bool -} - -func getOptions(tag reflect.StructTag) tagOptions { - t := tag.Get("toml") - if t == "-" { - return tagOptions{skip: true} - } - var opts tagOptions - parts := strings.Split(t, ",") - opts.name = parts[0] - for _, s := range parts[1:] { - switch s { - case "omitempty": - opts.omitempty = true - case "omitzero": - opts.omitzero = true - } - } - return opts -} - -func isZero(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return rv.Uint() == 0 - case reflect.Float32, reflect.Float64: - return rv.Float() == 0.0 - } - return false -} - -func isEmpty(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return rv.Len() == 0 - case reflect.Bool: - return !rv.Bool() - } - return false -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - panicIfInvalidKey(key) - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) - enc.eElement(val) - enc.newline() -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: - return v - } -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d..0000000 --- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index 104ebda..0000000 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,858 +0,0 @@ -package toml - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemRawString - itemMultilineString - itemRawMultilineString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemCommentStart -) - -const ( - eof = 0 - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - arrayValTerm = ',' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' -) - -type stateFn func(lx *lexer) stateFn - -type lexer struct { - input string - start int - pos int - width int - line int - state stateFn - items chan item - - // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - line int -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - } - } -} - -func lex(input string) *lexer { - lx := &lexer{ - input: input + "\n", - state: lexTop, - line: 1, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop.") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.pos >= len(lx.input) { - lx.width = 0 - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) - lx.pos += lx.width - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called only once per call of next. -func (lx *lexer) backup() { - lx.pos -= lx.width - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// skip ignores all input that matches the given predicate. -func (lx *lexer) skip(pred func(rune) bool) { - for { - r := lx.next() - if pred(r) { - continue - } - lx.backup() - lx.ignore() - return - } -} - -// errorf stops all lexing by emitting an error and returning `nil`. -// Note that any value that is a character is escaped if it's a special -// character (new lines, tabs, etc.). -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, - } - return nil -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - - switch r { - case commentStart: - lx.push(lexTop) - return lexCommentStart - case tableStart: - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("Unexpected EOF.") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a new line. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == commentStart: - // a comment will read to a new line for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.ignore() - return lexTop - } - return lx.errorf("Expected a top-level item to end with a new line, "+ - "comment or EOF, but got %q instead.", r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("Expected end of table array name delimiter %q, "+ - "but got %q instead.", arrayTableEnd, r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("Unexpected end of table name. (Table names cannot " + - "be empty.)") - case r == tableSep: - return lx.errorf("Unexpected table separator. (Table names cannot " + - "be empty.)") - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.push(lexTableNameEnd) - return lexValue // reuse string lexing - default: - return lexBareTableName - } -} - -// lexBareTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - r := lx.next() - if isBareKeyChar(r) { - return lexBareTableName - } - lx.backup() - lx.emit(itemText) - return lexTableNameEnd -} - -// lexTableNameEnd reads the end of a piece of a table name, optionally -// consuming whitespace. -func lexTableNameEnd(lx *lexer) stateFn { - lx.skip(isWhitespace) - switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd - case r == tableSep: - lx.ignore() - return lexTableNameStart - case r == tableEnd: - return lx.pop() - default: - return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ - "instead.", r) - } -} - -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() - switch { - case r == keySep: - return lx.errorf("Unexpected key separator %q.", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing - default: - lx.ignore() - lx.emit(itemKeyStart) - return lexBareKey - } -} - -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.backup() - lx.emit(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emit(itemText) - return lexKeyEnd - default: - return lx.errorf("Bare keys cannot contain %q.", r) - } -} - -// lexKeyEnd consumes the end of a key and trims whitespace (up to the key -// separator). -func lexKeyEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) - case isWhitespace(r): - return lexSkip(lx, lexKeyEnd) - default: - return lx.errorf("Expected key separator %q, but got %q instead.", - keySep, r) - } -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT new lines. - // In array syntax, the array states are responsible for ignoring new - // lines. - r := lx.next() - switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - } - switch r { - case arrayStart: - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { - lx.ignore() // Ignore """ - return lexMultilineString - } - lx.backup() - } - lx.ignore() // ignore the '"' - return lexString - case rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { - lx.ignore() // Ignore """ - return lexMultilineRawString - } - lx.backup() - } - lx.ignore() // ignore the "'" - return lexRawString - case '+', '-': - return lexNumberStart - case '.': // special error case, be kind to users - return lx.errorf("Floats must start with a digit, not '.'.") - } - if unicode.IsLetter(r) { - // Be permissive here; lexBool will give a nice error if the - // user wrote something like - // x = foo - // (i.e. not 'true' or 'false' but is something else word-like.) - lx.backup() - return lexBool - } - return lx.errorf("Expected value but found %q instead.", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and new lines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == commentStart: - lx.push(lexArrayValue) - return lexCommentStart - case r == arrayValTerm: - return lx.errorf("Unexpected array value terminator %q.", - arrayValTerm) - case r == arrayEnd: - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes the cruft between values of an array. Namely, -// it ignores whitespace and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == arrayValTerm: - lx.ignore() - return lexArrayValue // move on to the next value - case r == arrayEnd: - return lexArrayEnd - } - return lx.errorf("Expected an array value terminator %q or an array "+ - "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) -} - -// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has -// just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == '\\': - lx.push(lexString) - return lexStringEscape - case r == stringEnd: - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexMultilineString consumes the inner contents of a string. It assumes that -// the beginning '"""' has already been consumed and ignored. -func lexMultilineString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '\\': - return lexMultilineStringEscape - case r == stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineString -} - -// lexRawString consumes a raw string. Nothing can be escaped in such a string. -// It assumes that the beginning "'" has already been consumed and ignored. -func lexRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == rawStringEnd: - lx.backup() - lx.emit(itemRawString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexRawString -} - -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'" has already been consumed and -// ignored. -func lexMultilineRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemRawMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineRawString -} - -// lexMultilineStringEscape consumes an escaped character. It assumes that the -// preceding '\\' has already been consumed. -func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { - return lexMultilineString - } - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) -} - -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case '\\': - return lx.pop() - case 'u': - return lexShortUnicodeEscape - case 'U': - return lexLongUnicodeEscape - } - return lx.errorf("Invalid escape character %q. Only the following "+ - "escape characters are allowed: "+ - "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ - "\\uXXXX and \\UXXXXXXXX.", r) -} - -func lexShortUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected four hexadecimal digits after '\\u', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -func lexLongUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 8; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -// lexNumberOrDateStart consumes either an integer, a float, or datetime. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '_': - return lexNumber - case 'e', 'E': - return lexFloat - case '.': - return lx.errorf("Floats must start with a digit, not '.'.") - } - return lx.errorf("Expected a digit but got %q.", r) -} - -// lexNumberOrDate consumes either an integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumberOrDate - } - switch r { - case '-': - return lexDatetime - case '_': - return lexNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDatetime consumes a Datetime, to a first approximation. -// The parser validates that it matches one of the accepted formats. -func lexDatetime(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexDatetime - } - switch r { - case '-', 'T', ':', '.', 'Z': - return lexDatetime - } - - lx.backup() - lx.emit(itemDatetime) - return lx.pop() -} - -// lexNumberStart consumes either an integer or a float. It assumes that a sign -// has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. -func lexNumberStart(lx *lexer) stateFn { - // We MUST see a digit. Even floats have to start with a digit. - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } - return lx.errorf("Expected a digit but got %q.", r) - } - return lexNumber -} - -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexNumber - } - switch r { - case '_': - return lexNumber - case '.', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloat consumes the elements of a float. It allows any sequence of -// float-like characters, so floats emitted by the lexer are only a first -// approximation and must be validated by the parser. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - switch r { - case '_', '.', '-', '+', 'e', 'E': - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexBool consumes a bool string: 'true' or 'false. -func lexBool(lx *lexer) stateFn { - var rs []rune - for { - r := lx.next() - if r == eof || isWhitespace(r) || isNL(r) { - lx.backup() - break - } - rs = append(rs, r) - } - s := string(rs) - switch s { - case "true", "false": - lx.emit(itemBool) - return lx.pop() - } - return lx.errorf("Expected value but found %q instead.", s) -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first new line character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { - lx.emit(itemText) - return lx.pop() - } - lx.next() - return lexComment -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} - -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString, itemRawString, itemMultilineString, itemRawMultilineString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index a562555..0000000 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,557 +0,0 @@ -package toml - -import ( - "fmt" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key - - // the base key name for everything except hashes - currentKey string - - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool -} - -type parseError string - -func (pe parseError) Error() string { - return string(pe) -} - -func parse(data string) (p *parser, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { - return - } - panic(r) - } - }() - - p = &parser{ - mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), - ordered: make([]Key, 0), - implicits: make(map[string]bool), - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - if it.typ == itemError { - p.panicf("%s", it.val) - } - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: - p.approxLine = item.line - p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemTableEnd, kg.typ) - - p.establishContext(key, false) - p.setType("", tomlHash) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemArrayTableEnd, kg.typ) - - p.establishContext(key, true) - p.setType("", tomlArrayHash) - p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// Gets a string for a key (or part of a key in a table name). -func (p *parser) keyString(it item) string { - switch it.typ { - case itemText: - return it.val - case itemString, itemMultilineString, - itemRawString, itemRawMultilineString: - s, _ := p.value(it) - return s.(string) - default: - p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") - } -} - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) - case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) - case itemRawString: - return it.val, p.typeOfPrimitive(it) - case itemRawMultilineString: - return stripFirstNewline(it.val), p.typeOfPrimitive(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - if !numUnderscoresOK(it.val) { - p.panicf("Invalid integer %q: underscores must be surrounded by digits", - it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseInt(val, 10, 64) - if err != nil { - // Distinguish integer values. Normally, it'd be a bug if the lexer - // provides an invalid integer, but it's possible that the number is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemFloat: - parts := strings.FieldsFunc(it.val, func(r rune) bool { - switch r { - case '.', 'e', 'E': - return true - } - return false - }) - for _, part := range parts { - if !numUnderscoresOK(part) { - p.panicf("Invalid float %q: underscores must be "+ - "surrounded by digits", it.val) - } - } - if !numPeriodsOK(it.val) { - // As a special case, numbers like '123.' or '1.e2', - // which are valid as far as Go/strconv are concerned, - // must be rejected because TOML says that a fractional - // part consists of '.' followed by 1+ digits. - p.panicf("Invalid float %q: '.' must be followed "+ - "by one or more digits", it.val) - } - val := strings.Replace(it.val, "_", "", -1) - num, err := strconv.ParseFloat(val, 64) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.panicf("Invalid float value: %q", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemDatetime: - var t time.Time - var ok bool - var err error - for _, format := range []string{ - "2006-01-02T15:04:05Z07:00", - "2006-01-02T15:04:05", - "2006-01-02", - } { - t, err = time.ParseInLocation(format, it.val, time.Local) - if err == nil { - ok = true - break - } - } - if !ok { - p.panicf("Invalid TOML Datetime: %q.", it.val) - } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) - - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) - } - return array, p.typeOfArray(types) - } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") -} - -// numUnderscoresOK checks whether each underscore in s is surrounded by -// characters that are not underscores. -func numUnderscoresOK(s string) bool { - accept := false - for _, r := range s { - if r == '_' { - if !accept { - return false - } - accept = false - continue - } - accept = true - } - return accept -} - -// numPeriodsOK checks whether every period in s is followed by a digit. -func numPeriodsOK(s string) bool { - period := false - for _, r := range s { - if period && !isDigit(r) { - return false - } - period = r == '.' - } - return !period -} - -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - hash[key] = value -} - -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { - keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false -} - -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] -} - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s - } - return s[1:] -} - -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) - } - } - return strings.Join(esc, "") -} - -func (p *parser) replaceEscapes(str string) string { - var replaced []rune - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) - continue - } - r += 1 - if r >= len(s) { - p.bug("Escape sequence at end of string.") - return "" - } - switch s[r] { - default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" - case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 - case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 - case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 - case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 - case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 - case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 - case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 - case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) - replaced = append(replaced, escaped) - r += 5 - case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) - replaced = append(replaced, escaped) - r += 9 - } - } - return string(replaced) -} - -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { - s := string(bs) - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) - } - if !utf8.ValidRune(rune(hex)) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) - } - return rune(hex) -} - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164b..0000000 --- a/vendor/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go deleted file mode 100644 index c73f8af..0000000 --- a/vendor/github.com/BurntSushi/toml/type_check.go +++ /dev/null @@ -1,91 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsHash(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemMultilineString: - return tomlString - case itemRawString: - return tomlString - case itemRawMultilineString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 608997c..0000000 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,242 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" && !sf.Anonymous { // unexported - continue - } - opts := getOptions(sf.Tag) - if opts.skip { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := opts.name != "" - name := opts.name - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index e50771f..56fdfc2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -44,7 +44,7 @@ type Error interface { // BatchError is a batch of errors which also wraps lower level errors with // code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. +// that occurred in the batch. // // Deprecated: Replaced with BatchedErrors. Only defined for backwards // compatibility. @@ -64,7 +64,7 @@ type BatchError interface { // BatchedErrors is a batch of errors which also wraps lower level errors with // code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. +// that occurred in the batch. // // Replaces BatchError type BatchedErrors interface { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index e2d333b..0202a00 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -98,7 +98,7 @@ func (b baseError) OrigErr() error { return NewBatchError(err.Code(), err.Message(), b.errs[1:]) } return NewBatchError("BatchedErrors", - "multiple errors occured", b.errs) + "multiple errors occurred", b.errs) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go index 8429470..1a3d106 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -3,6 +3,7 @@ package awsutil import ( "io" "reflect" + "time" ) // Copy deeply copies a src structure to dst. Useful for copying request and @@ -49,7 +50,14 @@ func rcopy(dst, src reflect.Value, root bool) { } else { e := src.Type().Elem() if dst.CanSet() && !src.IsNil() { - dst.Set(reflect.New(e)) + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } } if src.Elem().IsValid() { // Keep the current root state since the depth hasn't changed diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index c8d0564..4003c04 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -87,9 +87,18 @@ const logReqMsg = `DEBUG: Request %s/%s Details: %s -----------------------------------------------------` +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + func logRequest(r *request.Request) { logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } if logBody { // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's @@ -107,11 +116,21 @@ const logRespMsg = `DEBUG: Response %s/%s Details: %s -----------------------------------------------------` +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + func logResponse(r *request.Request) { var msg = "no response data" if r.HTTPResponse != nil { logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + msg = string(dumpedBody) } else if r.Error != nil { msg = r.Error.Error() diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index da72935..fca9225 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -7,24 +7,36 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" ) -// UseServiceDefaultRetries instructs the config to use the service's own default -// number of retries. This will be the default action if Config.MaxRetries -// is nil also. +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. const UseServiceDefaultRetries = -1 -// RequestRetryer is an alias for a type that implements the request.Retryer interface. +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, -// all clients will use the {defaults.DefaultConfig} structure. +// all clients will use the defaults.DefaultConfig tructure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess, err := session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// }) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) type Config struct { // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to retreive - // credentials. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. CredentialsChainVerboseErrors *bool - // The credentials object to use when signing requests. Defaults to - // a chain of credential providers to search for credentials in environment + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment // variables, shared credential file, and EC2 Instance Roles. Credentials *credentials.Credentials @@ -63,11 +75,12 @@ type Config struct { Logger Logger // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service specific - // configuration. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. MaxRetries *int - // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, // the request.DefaultRetryer will be used. @@ -82,8 +95,8 @@ type Config struct { // Retryer RequestRetryer - // Disables semantic parameter validation, which validates input for missing - // required fields and/or other semantic request input errors. + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. DisableParamValidation *bool // Disables the computation of request and response checksums, e.g., @@ -91,8 +104,8 @@ type Config struct { DisableComputeChecksums *bool // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will - // use virtual hosted bucket addressing when possible + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible // (`http://BUCKET.s3.amazonaws.com/KEY`). // // @note This configuration option is specific to the Amazon S3 service. @@ -109,36 +122,63 @@ type Config struct { // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html // // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait timeout. - // https://golang.org/pkg/net/http/#Transport + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport // - // You should use this flag to disble 100-Continue if you experiance issues - // with proxies or thrid party S3 compatible services. + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. S3Disable100Continue *bool - // Set this to `true` to enable S3 Accelerate feature. For all operations compatible - // with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible - // will fall back to normal S3 requests. + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. // - // The bucket must be enable for accelerate to be used with S3 client with accelerate - // enabled. If the bucket is not enabled for accelerate an error will be returned. - // The bucket name must be DNS compatible to also work with accelerate. + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + // + // Not compatible with UseDualStack requests will fail if both flags are + // specified. S3UseAccelerate *bool // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata - // client to create a new http.Client. This options is only meaningful if you're not - // already using a custom HTTP client with the SDK. Enabled by default. + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. // - // Must be set and provided to the session.New() in order to disable the EC2Metadata - // overriding the timeout for default credentials chain. + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. // // Example: - // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // // svc := s3.New(sess) // EC2MetadataDisableTimeoutOverride *bool + // Instructs the endpiont to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess, err := session.NewSession() + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + // SleepDelay is an override for the func the SDK will call when sleeping // during the lifecycle of a request. Specifically this will be used for // request delays. This value should only be used for testing. To adjust @@ -147,11 +187,19 @@ type Config struct { SleepDelay func(time.Duration) } -// NewConfig returns a new Config pointer that can be chained with builder methods to -// set multiple configuration values inline without using pointers. +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. // -// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess, err := session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// ) // +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) func NewConfig() *Config { return &Config{} } @@ -254,6 +302,13 @@ func (c *Config) WithS3UseAccelerate(enable bool) *Config { return c } +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + // WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value // returning a Config pointer for chaining. func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { @@ -340,6 +395,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3UseAccelerate = other.S3UseAccelerate } + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + if other.EC2MetadataDisableTimeoutOverride != nil { dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 6f07560..4f5dab3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -30,13 +30,22 @@ func NewStaticCredentials(id, secret, token string) *Credentials { }}) } +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + // Retrieve returns the credentials or error if the credentials are invalid. func (s *StaticProvider) Retrieve() (Value, error) { if s.AccessKeyID == "" || s.SecretAccessKey == "" { return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty } - s.Value.ProviderName = StaticProviderName + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } return s.Value, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 0000000..30c847a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,161 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. This provider must be used explicitly, +// as it is not included in the credentials chain. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + TokenCode *string + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfiede by the STS client. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil && p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } + roleOutput, err := p.Client.AssumeRole(input) + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 570417f..10b7d86 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -90,12 +90,14 @@ func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credenti Providers: []credentials.Provider{ &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - remoteCredProvider(*cfg, handlers), + RemoteCredProvider(*cfg, handlers), }, }) } -func remoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { +// RemoteCredProvider returns a credenitials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") if len(ecsCredURI) > 0 { @@ -118,7 +120,7 @@ func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) cred func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, - aws.StringValue(cfg.Region), true) + aws.StringValue(cfg.Region), true, false) return &ec2rolecreds.EC2RoleProvider{ Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion), diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 0000000..097d323 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,223 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess, err := session.NewSession() + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")}) + + // Create a S3 client instance from a session + sess, err := session.NewSession() + if err != nil { + // Handle Session creation error + } + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.New + sess, err := session.NewSessionWithOptions(session.Options{}) + + // Specify profile to load for the session's config + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + }) + + // Specify profile for config and region for requests + sess, err := session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + }) + + // Force enable Shared Config support + sess, err := session.NewSessionWithOptions(session.Options{ + SharedConfigState: SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess, err := session.NewSession() + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Println("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared credentials +file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They arfrom a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK does not support +assuming a role with MFA token Via the Session's constructor. You can use the +stscreds.AssumeRoleProvider credentials provider to specify custom +configuration and support for MFA. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = not supported! + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + + +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 0000000..d2f0c84 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,188 @@ +package session + +import ( + "os" + "path/filepath" + "strconv" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string +} + +var ( + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = "EnvConfigCredentials" + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + cfg.SharedCredentialsFile = sharedCredentialsFilename() + cfg.SharedConfigFile = sharedConfigFilename() + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func sharedCredentialsFilename() string { + if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "credentials") +} + +func sharedConfigFilename() string { + if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "config") +} + +func userHomeDir() string { + homeDir := os.Getenv("HOME") // *nix + if len(homeDir) == 0 { // windows + homeDir = os.Getenv("USERPROFILE") + } + + return homeDir +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 6bc8f1b..2374b1f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -1,17 +1,14 @@ -// Package session provides a way to create service clients with shared configuration -// and handlers. -// -// Generally this package should be used instead of the `defaults` package. -// -// A session should be used to share configurations and request handlers between multiple -// service clients. When service clients need specific configuration aws.Config can be -// used to provide additional configuration directly to the service client. package session import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/endpoints" @@ -21,36 +18,199 @@ import ( // store configurations and request handlers for those services. // // Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the session concurrently. +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ClientConfigProvider. type Session struct { Config *aws.Config Handlers request.Handlers } -// New creates a new instance of the handlers merging in the provided Configs -// on top of the SDK's default configurations. Once the session is created it -// can be mutated to modify Configs or Handlers. The session is safe to be read -// concurrently, but it should not be written to concurrently. +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occured while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functiions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + s, err := newSession(envCfg, cfgs...) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occuring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + return s + } + + return oldNewSession(cfgs...) +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifing the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + envCfg := loadEnvConfig() + + return newSession(envCfg, cfgs...) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevent. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. // -// Example: -// // Create a session with the default config and request handlers. -// sess := session.New() +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. // -// // Create a session with a custom region -// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// // Equivalent to session.New +// sess, err := session.NewSessionWithOptions(session.Options{}) // -// // Create a session, and add additional handlers for all service -// // clients created with the session to inherit. Adds logging handler. -// sess := session.New() -// sess.Handlers.Send.PushFront(func(r *request.Request) { -// // Log every request made and its payload -// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// // Specify profile to load for the session's config +// sess, err := session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", // }) // -// // Create a S3 client instance from a session -// sess := session.New() -// svc := s3.New(sess) -func New(cfgs ...*aws.Config) *Session { +// // Specify profile for config and region for requests +// sess, err := session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// }) +// +// // Force enable Shared Config support +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: SharedConfigEnable, +// }) +func NewSessionWithOptions(opts Options) (*Session, error) { + envCfg := loadEnvConfig() + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func oldNewSession(cfgs ...*aws.Config) *Session { cfg := defaults.Config() handlers := defaults.Handlers() @@ -72,6 +232,115 @@ func New(cfgs ...*aws.Config) *Session { return s } +func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Order config files will be loaded in with later files overwriting + // previous config file values. + cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + // Configure credentials if not already set + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + cfg.Credentials = stscreds.NewCredentials( + &Session{ + Config: &cfgCp, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // MFA not supported + }, + ) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else { + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + } +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) @@ -80,12 +349,11 @@ func initHandlers(s *Session) { } } -// Copy creates and returns a copy of the current session, coping the config +// Copy creates and returns a copy of the current Session, coping the config // and handlers. If any additional configs are provided they will be merged -// on top of the session's copied config. +// on top of the Session's copied config. // -// Example: -// // Create a copy of the current session, configured for the us-west-2 region. +// // Create a copy of the current Session, configured for the us-west-2 region. // sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ @@ -101,15 +369,15 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. -// -// Example: -// sess := session.New() -// s3.New(sess) func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) endpoint, signingRegion := endpoints.NormalizeEndpoint( - aws.StringValue(s.Config.Endpoint), serviceName, - aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + aws.StringValue(s.Config.Endpoint), + serviceName, + aws.StringValue(s.Config.Region), + aws.BoolValue(s.Config.DisableSSL), + aws.BoolValue(s.Config.UseDualStack), + ) return client.Config{ Config: s.Config, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 0000000..0147eed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,294 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/go-ini/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string +} + +type sharedConfigFile struct { + Filename string + IniData *ini.File +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + if _, err := os.Stat(filename); os.IsNotExist(err) { + // Trim files from the list that don't exist. + continue + } + + f, err := ini.Load(filename) + if err != nil { + return nil, SharedConfigLoadError{Filename: filename} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: f, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, err := file.IniData.GetSection(profile) + if err != nil { + // Fallback to to alternate profile name: profile + section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if err != nil { + return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + } + } + + // Shared Credentials + akid := section.Key(accessKeyIDKey).String() + secret := section.Key(secretAccessKey).String() + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.Key(sessionTokenKey).String(), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.Key(roleArnKey).String() + srcProfile := section.Key(sourceProfileKey).String() + if len(roleArn) > 0 && len(srcProfile) > 0 { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + ExternalID: section.Key(externalIDKey).String(), + MFASerial: section.Key(mfaSerialKey).String(), + RoleSessionName: section.Key(roleSessionNameKey).String(), + } + } + + // Region + if v := section.Key(regionKey).String(); len(v) > 0 { + cfg.Region = v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index f040f9c..7d99f54 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -545,7 +545,7 @@ func (ctx *signingCtx) buildBodyDigest() { } else { hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.ServiceName == "s3" { + if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 97a3f57..82f76c4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.2.5" +const SDKVersion = "1.4.4" diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go index 2b279e6..b4ad740 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -14,9 +14,9 @@ import ( // normalized endpoint and signing region. If the endpoint is not an empty string // the service name and region will be used to look up the service's API endpoint. // If the endpoint is provided the scheme will be added if it is not present. -func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) { if endpoint == "" { - return EndpointForRegion(serviceName, region, disableSSL) + return EndpointForRegion(serviceName, region, disableSSL, useDualStack) } return AddScheme(endpoint, disableSSL), "" @@ -24,12 +24,17 @@ func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (n // EndpointForRegion returns an endpoint and its signing region for a service and region. // if the service and region pair are not found endpoint and signingRegion will be empty. -func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { +func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) { + dualStackField := "" + if useDualStack { + dualStackField = "/dualstack" + } + derivedKeys := []string{ - region + "/" + svcName, - region + "/*", - "*/" + svcName, - "*/*", + region + "/" + svcName + dualStackField, + region + "/*" + dualStackField, + "*/" + svcName + dualStackField, + "*/*" + dualStackField, } for _, key := range derivedKeys { diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json index 5f4991c..c5bf3c7 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -65,6 +65,9 @@ "*/s3": { "endpoint": "s3-{region}.amazonaws.com" }, + "*/s3/dualstack": { + "endpoint": "s3.dualstack.{region}.amazonaws.com" + }, "us-east-1/s3": { "endpoint": "s3.amazonaws.com" }, diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go index e995315..a81d158 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -48,6 +48,9 @@ var endpointsMap = endpointStruct{ "*/s3": { Endpoint: "s3-{region}.amazonaws.com", }, + "*/s3/dualstack": { + Endpoint: "s3.dualstack.{region}.amazonaws.com", + }, "*/sts": { Endpoint: "sts.amazonaws.com", SigningRegion: "us-east-1", diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 56d69db..c705481 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -1,4 +1,4 @@ -// Package query provides serialisation of AWS query requests, and responses. +// Package query provides serialization of AWS query requests, and responses. package query //go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index c74088b..c74b97e 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -1,4 +1,4 @@ -// Package restxml provides RESTful XML serialisation of AWS +// Package restxml provides RESTful XML serialization of AWS // requests and responses. package restxml diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index ceb4132..221029b 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -1,4 +1,4 @@ -// Package xmlutil provides XML serialisation of AWS requests and responses. +// Package xmlutil provides XML serialization of AWS requests and responses. package xmlutil import ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 5132954..553b0e4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -1242,7 +1242,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req return } -// Deprecated, see the GetBucketReplicationConfiguration operation. +// Returns the replication configuration of a bucket. func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { req, out := c.GetBucketReplicationRequest(input) err := req.Send() diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index 5172929..ccbf5cc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -37,6 +37,14 @@ var accelerateOpBlacklist = operationBlacklist{ func updateEndpointForS3Config(r *request.Request) { forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + useDualStack := aws.BoolValue(r.Config.UseDualStack) + + if useDualStack && accelerate { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("configuration aws.Config.UseDualStack is not compatible with aws.Config.Accelerate"), + nil) + return + } if accelerate && accelerateOpBlacklist.Continue(r) { if forceHostStyle { diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index 59e4950..ed91c58 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -23,6 +23,8 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + // Bucket exists in a different region, and request needs + // to be made to the correct region. if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { r.Error = awserr.NewRequestFailure( awserr.New("BucketRegionError", @@ -35,25 +37,29 @@ func unmarshalError(r *request.Request) { return } - if r.HTTPResponse.ContentLength == 0 { - // No body, use status code to generate an awserr.Error - r.Error = awserr.NewRequestFailure( - awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } + var errCode, errMsg string + // Attempt to parse error from body if it is known resp := &xmlErrorResponse{} err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) if err != nil && err != io.EOF { - r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) + errCode = "SerializationError" + errMsg = "failed to decode S3 XML error response" } else { - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) + errCode = resp.Code + errMsg = resp.Message } + + // Fallback to status code converted to message if still no error code + if len(errCode) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errCode = strings.Replace(statusText, " ", "", -1) + errMsg = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errCode, errMsg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 0000000..f11e867 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,1653 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package sts provides a client for AWS Security Token Service. +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the IAM User Guide. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a +// maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRole can be used to +// make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to +// further restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithSAML method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithSAMLOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. The duration +// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). +// The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithSAML can be +// used to make API calls to any AWS service with the following exception: you +// cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure +// your SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + err := req.Send() + return out, err +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithWebIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &AssumeRoleWithWebIdentityOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You +// can use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. +// +// The credentials are valid for the duration that you specified when calling +// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to +// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity +// can be used to make API calls to any AWS service with the following exception: +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you +// choose not to pass a policy, the temporary security credentials that are +// returned by the operation have the permissions that are defined in the access +// policy of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to +// further restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security credentials, +// and then using those credentials to make a request to AWS. +// +// AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample apps +// that show how to invoke the identity providers, and then how to use the information +// from these providers to get and use temporary security credentials. +// +// Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// This article discusses web identity federation and shows an example of how +// to use web identity federation to get access to content in Amazon S3. +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecodeAuthorizationMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + req = c.newRequest(op, input, output) + output = &DecodeAuthorizationMessageOutput{} + req.Data = output + return +} + +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status +// can constitute privileged information that the user who requested the action +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// Whether the request was denied due to an explicit deny or due to the absence +// of an explicit allow. For more information, see Determining Whether a Request +// is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// The principal who made the request. +// +// The requested action. +// +// The requested resource. +// +// The values of condition keys in the context of the user's request. +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + err := req.Send() + return out, err +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCallerIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &GetCallerIdentityOutput{} + req.Data = output + return +} + +// Returns details about the IAM identity whose credentials are used to call +// the API. +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + err := req.Send() + return out, err +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFederationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetFederationTokenOutput{} + req.Data = output + return +} + +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS +// security credentials of an IAM user. You can also call GetFederationToken +// using the security credentials of an AWS root account, but we do not recommended +// it. Instead, we recommend that you create an IAM user for the purpose of +// the proxy application and then attach a policy to the IAM user that limits +// federated users to only the actions and resources that they need access to. +// For more information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be +// used to make API calls to any AWS service with the following exceptions: +// +// You cannot use these credentials to call any IAM APIs. +// +// You cannot call any STS APIs. +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + err := req.Send() + return out, err +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSessionToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSessionTokenOutput{} + req.Data = output + return +} + +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// You cannot call any IAM APIs unless MFA authentication information is +// included in the request. +// +// You cannot call any STS API except AssumeRole. +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + err := req.Send() + return out, err +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might + // request using the returned credentials. The request to the federation endpoint + // for a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@:\/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. An expiration can also be specified in the SAML authentication + // response's SessionNotOnOrAfter value. The actual expiration time is whichever + // value is shorter. + // + // This is separate from the duration of a console session that you might + // request using the returned credentials. The request to the federation endpoint + // for a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Enabling SAML 2.0 Federated + // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict + // the permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might + // request using the returned credentials. The request to the federation endpoint + // for a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict + // the permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// AWS credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The secret access key that can be used to sign requests. + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + // + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters consisting of upper- and lower-case alphanumeric characters + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + // As of this writing, the typical size is less than 4096 bytes, but that can + // vary. Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 0000000..4010cc7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,12 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 0000000..c938e6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,130 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sts" + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index 2a7cfd2..bb67332 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -1,3 +1,5 @@ +ISC License + Copyright (c) 2012-2013 Dave Collins Permission to use, copy, modify, and distribute this software for any diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 0000000..ac034e5 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md index 1272038..a939d75 100644 --- a/vendor/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -1,4 +1,4 @@ -ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) === ![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) @@ -22,13 +22,29 @@ Package ini provides INI file read and write functionality in Go. ## Installation +To use a tagged revision: + go get gopkg.in/ini.v1 +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + ## Getting Started ### Loading from data sources -A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many data sources as you want**. Passing other types will simply return an error. ```go cfg, err := ini.Load([]byte("raw data"), "filename") @@ -40,12 +56,56 @@ Or start with an empty object: cfg := ini.Empty() ``` -When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. ```go err := cfg.Append("other file", []byte("other raw data")) ``` +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + ### Working with sections To get a section, you would need to: @@ -95,6 +155,12 @@ Same rule applies to key operations: key := cfg.Section("").Key("key name") ``` +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + To create a new key: ```go @@ -111,7 +177,7 @@ names := cfg.Section("").KeyStrings() To get a clone hash of keys and corresponding values: ```go -hash := cfg.GetSection("").KeysHash() +hash := cfg.Section("").KeysHash() ``` ### Working with values @@ -133,12 +199,24 @@ val := cfg.Section("").Key("key name").Validate(func(in string) string { }) ``` +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + To get value with types: ```go // For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -212,6 +290,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + Note that single quotes around values will be stripped: ```ini @@ -250,9 +338,13 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -To auto-split value into slice: +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: ```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -262,6 +354,32 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + ### Save your configuration Finally, it's time to save your configuration to somewhere. @@ -323,6 +441,12 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + ### Auto-increment Key Names If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. @@ -407,8 +531,8 @@ Why not? ```go type Embeded struct { Dates []time.Time `delim:"|"` - Places []string - None []int + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` } type Author struct { @@ -443,8 +567,7 @@ GPA = 2.8 [Embeded] Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = +places = HangZhou,Boston ``` #### Name Mapper @@ -464,7 +587,7 @@ type Info struct { } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) @@ -478,6 +601,26 @@ func main() { Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + #### Other Notes On Map/Reflect Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md index 45e19ed..2178e47 100644 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -15,8 +15,24 @@ ## 下载安装 +使用一个特定版本: + go get gopkg.in/ini.v1 +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + ## 开始使用 ### 从数据源加载 @@ -39,6 +55,50 @@ cfg := ini.Empty() err := cfg.Append("other file", []byte("other raw data")) ``` +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + ### 操作分区(Section) 获取指定分区: @@ -88,6 +148,12 @@ key, err := cfg.Section("").GetKey("key name") key := cfg.Section("").Key("key name") ``` +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + 创建一个新的键: ```go @@ -104,7 +170,7 @@ names := cfg.Section("").KeyStrings() 获取分区下的所有键值对的克隆: ```go -hash := cfg.GetSection("").KeysHash() +hash := cfg.Section("").KeysHash() ``` ### 操作键值(Value) @@ -126,12 +192,24 @@ val := cfg.Section("").Key("key name").Validate(func(in string) string { }) ``` +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + 获取其它类型的值: ```go // 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off v, err = cfg.Section("").Key("BOOL").Bool() v, err = cfg.Section("").Key("FLOAT64").Float64() v, err = cfg.Section("").Key("INT").Int() @@ -205,6 +283,16 @@ cfg.Section("advance").Key("two_lines").String() // how about continuation lines cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 ``` +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + 需要注意的是,值两侧的单引号会被自动剔除: ```ini @@ -243,9 +331,13 @@ vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), min vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 ``` -自动分割键值为切片(slice): +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: ```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] vals = cfg.Section("").Key("STRINGS").Strings(",") vals = cfg.Section("").Key("FLOAT64S").Float64s(",") vals = cfg.Section("").Key("INTS").Ints(",") @@ -255,6 +347,32 @@ vals = cfg.Section("").Key("UINT64S").Uint64s(",") vals = cfg.Section("").Key("TIMES").Times(",") ``` +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + ### 保存配置 终于到了这个时刻,是时候保存一下配置了。 @@ -316,6 +434,12 @@ CLONE_URL = https://%(IMPORT_PATH)s cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 ``` +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + #### 读取自增键名 如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 @@ -398,8 +522,8 @@ p := &Person{ ```go type Embeded struct { Dates []time.Time `delim:"|"` - Places []string - None []int + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` } type Author struct { @@ -434,8 +558,7 @@ GPA = 2.8 [Embeded] Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = +places = HangZhou,Boston ``` #### 名称映射器(Name Mapper) @@ -455,7 +578,7 @@ type Info struct{ } func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini")) + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) // ... cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) @@ -469,6 +592,26 @@ func main() { 使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + #### 映射/反射的其它说明 任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 0000000..80afe74 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 1fee789..763f998 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -16,7 +16,6 @@ package ini import ( - "bufio" "bytes" "errors" "fmt" @@ -31,25 +30,35 @@ import ( ) const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - - _VERSION = "1.6.0" + _VERSION = "1.21.0" ) +// Version returns current package version literal. func Version() string { return _VERSION } var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. LineBreak = "\n" // Variable regexp pattern: %(variable)s varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - // Write spaces around "=" to look better. + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false ) func init() { @@ -67,11 +76,12 @@ func inSlice(str string, s []string) bool { return false } -// dataSource is a interface that returns file content. +// dataSource is an interface that returns object which can be read and closed. type dataSource interface { ReadCloser() (io.ReadCloser, error) } +// sourceFile represents an object that contains content on the local file system. type sourceFile struct { name string } @@ -92,6 +102,7 @@ func (rc *bytesReadCloser) Close() error { return nil } +// sourceData represents an object that contains content in memory. type sourceData struct { data []byte } @@ -100,589 +111,6 @@ func (s *sourceData) ReadCloser() (io.ReadCloser, error) { return &bytesReadCloser{bytes.NewReader(s.data)}, nil } -// ____ __. -// | |/ _|____ ___.__. -// | <_/ __ < | | -// | | \ ___/\___ | -// |____|__ \___ > ____| -// \/ \/\/ - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncr bool -} - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// String returns string representation of value. -func (k *Key) String() string { - val := k.value - if strings.Index(val, "%") == -1 { - return val - } - - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string devide by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// Float64s returns list of float64 devide by given delimiter. -func (k *Key) Float64s(delim string) []float64 { - strs := k.Strings(delim) - vals := make([]float64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseFloat(strs[i], 64) - } - return vals -} - -// Ints returns list of int devide by given delimiter. -func (k *Key) Ints(delim string) []int { - strs := k.Strings(delim) - vals := make([]int, len(strs)) - for i := range strs { - vals[i], _ = strconv.Atoi(strs[i]) - } - return vals -} - -// Int64s returns list of int64 devide by given delimiter. -func (k *Key) Int64s(delim string) []int64 { - strs := k.Strings(delim) - vals := make([]int64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseInt(strs[i], 10, 64) - } - return vals -} - -// Uints returns list of uint devide by given delimiter. -func (k *Key) Uints(delim string) []uint { - strs := k.Strings(delim) - vals := make([]uint, len(strs)) - for i := range strs { - u, _ := strconv.ParseUint(strs[i], 10, 64) - vals[i] = uint(u) - } - return vals -} - -// Uint64s returns list of uint64 devide by given delimiter. -func (k *Key) Uint64s(delim string) []uint64 { - strs := k.Strings(delim) - vals := make([]uint64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseUint(strs[i], 10, 64) - } - return vals -} - -// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. -func (k *Key) TimesFormat(format, delim string) []time.Time { - strs := k.Strings(delim) - vals := make([]time.Time, len(strs)) - for i := range strs { - vals[i], _ = time.Parse(format, strs[i]) - } - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - k.value = v -} - -// _________ __ .__ -// / _____/ ____ _____/ |_|__| ____ ____ -// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ -// / \ ___/\ \___| | | ( <_> ) | \ -// /_______ /\___ >\___ >__| |__|\____/|___| / -// \/ \/ \/ \/ - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string -} - -func newSection(f *File, name string) *Section { - return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - s.keys[name].value = val - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = &Key{s, "", name, val, false} - s.keysHash[name] = val - return s.keys[name], nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ___________.__.__ -// \_ _____/|__| | ____ -// | __) | | | _/ __ \ -// | \ | | |_\ ___/ -// \___ / |__|____/\___ > -// \/ \/ - // File represents a combination of a or more INI file(s) in memory. type File struct { // Should make things safe, but sometimes doesn't matter. @@ -698,16 +126,20 @@ type File struct { // To keep data in order. sectionList []string + options LoadOptions + NameMapper + ValueMapper } // newFile initializes File object with given data sources. -func newFile(dataSources []dataSource) *File { +func newFile(dataSources []dataSource, opts LoadOptions) *File { return &File{ BlockMode: true, dataSources: dataSources, sections: make(map[string]*Section), sectionList: make([]string, 0, 10), + options: opts, } } @@ -722,9 +154,19 @@ func parseDataSource(source interface{}) (dataSource, error) { } } -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -func Load(source interface{}, others ...interface{}) (_ *File, err error) { +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { sources := make([]dataSource, len(others)+1) sources[0], err = parseDataSource(source) if err != nil { @@ -736,8 +178,30 @@ func Load(source interface{}, others ...interface{}) (_ *File, err error) { return nil, err } } - f := newFile(sources) - return f, f.Reload() + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) } // Empty returns an empty file object. @@ -751,6 +215,8 @@ func Empty() *File { func (f *File) NewSection(name string) (*Section, error) { if len(name) == 0 { return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) } if f.BlockMode { @@ -781,6 +247,8 @@ func (f *File) NewSections(names ...string) (err error) { func (f *File) GetSection(name string) (*Section, error) { if len(name) == 0 { name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) } if f.BlockMode { @@ -790,7 +258,7 @@ func (f *File) GetSection(name string) (*Section, error) { sec := f.sections[name] if sec == nil { - return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + return nil, fmt.Errorf("section '%s' does not exist", name) } return sec, nil } @@ -843,240 +311,6 @@ func (f *File) DeleteSection(name string) { } } -func cutComment(str string) string { - i := strings.Index(str, "#") - if i == -1 { - return str - } - return str[:i] -} - -func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) { - isEnd := false - for { - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", err - } - isEnd = true - } - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - break - } - val += next - if isEnd { - return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) { - isEnd := false - for { - valLen := len(val) - if valLen == 0 || val[valLen-1] != '\\' { - break - } - val = val[:valLen-1] - - next, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return "", isEnd, err - } - isEnd = true - } - - next = strings.TrimSpace(next) - if len(next) == 0 { - break - } - val += next - } - return val, isEnd, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) error { - buf := bufio.NewReader(reader) - - // Handle BOM-UTF8. - // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding - mask, err := buf.Peek(3) - if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { - buf.Read(mask) - } - - count := 1 - comments := "" - isEnd := false - - section, err := f.NewSection(DEFAULT_SECTION) - if err != nil { - return err - } - - for { - line, err := buf.ReadString('\n') - line = strings.TrimSpace(line) - length := len(line) - - // Check error and ignore io.EOF just for a moment. - if err != nil { - if err != io.EOF { - return fmt.Errorf("error reading next line: %v", err) - } - // The last line of file could be an empty line. - if length == 0 { - break - } - isEnd = true - } - - // Skip empty lines. - if length == 0 { - continue - } - - switch { - case line[0] == '#' || line[0] == ';': // Comments. - if len(comments) == 0 { - comments = line - } else { - comments += LineBreak + line - } - continue - case line[0] == '[' && line[length-1] == ']': // New sction. - section, err = f.NewSection(strings.TrimSpace(line[1 : length-1])) - if err != nil { - return err - } - - if len(comments) > 0 { - section.Comment = comments - comments = "" - } - // Reset counter. - count = 1 - continue - } - - // Other possibilities. - var ( - i int - keyQuote string - kname string - valQuote string - val string - ) - - // Key name surrounded by quotes. - if line[0] == '"' { - if length > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - if len(keyQuote) > 0 { - qLen := len(keyQuote) - pos := strings.Index(line[qLen:], keyQuote) - if pos == -1 { - return fmt.Errorf("error parsing line: missing closing key quote: %s", line) - } - pos = pos + qLen - i = strings.IndexAny(line[pos:], "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == pos { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - i = i + pos - kname = line[qLen:pos] // Just keep spaces inside quotes. - } else { - i = strings.IndexAny(line, "=:") - if i < 0 { - return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line) - } else if i == 0 { - return fmt.Errorf("error parsing line: key is empty: %s", line) - } - kname = strings.TrimSpace(line[0:i]) - } - - isAutoIncr := false - // Auto increment. - if kname == "-" { - isAutoIncr = true - kname = "#" + fmt.Sprint(count) - count++ - } - - lineRight := strings.TrimSpace(line[i+1:]) - lineRightLength := len(lineRight) - firstChar := "" - if lineRightLength >= 2 { - firstChar = lineRight[0:1] - } - if firstChar == "`" { - valQuote = "`" - } else if firstChar == `"` { - if lineRightLength >= 3 && lineRight[0:3] == `"""` { - valQuote = `"""` - } else { - valQuote = `"` - } - } else if firstChar == `'` { - valQuote = `'` - } - - if len(valQuote) > 0 { - qLen := len(valQuote) - pos := strings.LastIndex(lineRight[qLen:], valQuote) - // For multiple-line value check. - if pos == -1 { - if valQuote == `"` || valQuote == `'` { - return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line) - } - - val = lineRight[qLen:] + "\n" - val, err = checkMultipleLines(buf, line, val, valQuote) - if err != nil { - return err - } - } else { - val = lineRight[qLen : pos+qLen] - } - } else { - val = strings.TrimSpace(cutComment(lineRight)) - val, isEnd, err = checkContinuationLines(buf, val) - if err != nil { - return err - } - } - - k, err := section.NewKey(kname, val) - if err != nil { - return err - } - k.isAutoIncr = isAutoIncr - if len(comments) > 0 { - k.Comment = comments - comments = "" - } - - if isEnd { - break - } - } - return nil -} - func (f *File) reload(s dataSource) error { r, err := s.ReadCloser() if err != nil { @@ -1091,6 +325,11 @@ func (f *File) reload(s dataSource) error { func (f *File) Reload() (err error) { for _, s := range f.dataSources { if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } return err } } @@ -1114,7 +353,9 @@ func (f *File) Append(source interface{}, others ...interface{}) error { return f.Reload() } -// WriteToIndent writes file content into io.Writer with given value indention. +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { equalSign := "=" if PrettyFormat { @@ -1134,17 +375,38 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } } - if i > 0 { + if i > 0 || DefaultHeader { if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { return 0, err } } else { - // Write nothing if default section is empty. + // Write nothing if default section is empty if len(sec.keyList) == 0 { continue } } + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + for _, kname := range sec.keyList { key := sec.Key(kname) if len(key.Comment) > 0 { @@ -1164,26 +426,39 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } switch { - case key.isAutoIncr: + case key.isAutoIncrement: kname = "-" - case strings.Contains(kname, "`") || strings.Contains(kname, `"`): - kname = `"""` + kname + `"""` - case strings.Contains(kname, `=`) || strings.Contains(kname, `:`): + case strings.ContainsAny(kname, "\"=:"): kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + continue + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) } val := key.value - // In case key value contains "\n", "`" or "\"". - if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) || - strings.Contains(val, "#") { + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" } - if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { return 0, err } } - // Put a line between sections. + // Put a line between sections if _, err = buf.WriteString(LineBreak); err != nil { return 0, err } diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 0000000..9738c55 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,633 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + Comment string +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.getInts(delim, true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.getUints(delim, true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.getInts(delim, false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.getUints(delim, false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.getFloat64s(delim, false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.getInts(delim, false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.getInt64s(delim, false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.getUints(delim, false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.getUint64s(delim, false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.getTimesFormat(format, delim, false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// getFloat64s returns list of float64 divided by given delimiter. +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { + strs := k.Strings(delim) + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInts returns list of int divided by given delimiter. +func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { + strs := k.Strings(delim) + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInt64s returns list of int64 divided by given delimiter. +func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { + strs := k.Strings(delim) + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getUints returns list of uint divided by given delimiter. +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { + strs := k.Strings(delim) + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// getUint64s returns list of uint64 divided by given delimiter. +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + strs := k.Strings(delim) + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + strs := k.Strings(delim) + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 0000000..dc6df87 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,325 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of BOM-UTF8 format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + p.buf.Read(mask) + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines when desired. + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + key, err := section.NewKey(string(line), "true") + if err != nil { + return err + } + key.isBooleanType = true + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 0000000..bbb73ca --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,206 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{ + s: s, + name: name, + value: val, + } + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go index c118437..eddf7a0 100644 --- a/vendor/github.com/go-ini/ini/struct.go +++ b/vendor/github.com/go-ini/ini/struct.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "reflect" + "strings" "time" "unicode" ) @@ -76,6 +77,59 @@ func parseDelim(actual string) string { var reflectTime = reflect.TypeOf(time.Now()).Kind() +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string) error { + strs := key.Strings(delim) + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals = key.Ints(delim) + case reflect.Int64: + vals = key.Int64s(delim) + case reflect.Uint: + vals = key.Uints(delim) + case reflect.Uint64: + vals = key.Uint64s(delim) + case reflect.Float64: + vals = key.Float64s(delim) + case reflectTime: + vals = key.Times(delim) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + // setWithProperType sets proper value to field based on its type, // but it does not return error for failing parsing, // because we want to use default value that is already assigned to strcut. @@ -94,20 +148,22 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri field.SetBool(boolVal) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: durationVal, err := key.Duration() - if err == nil { + // Skip zero value + if err == nil && int(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } intVal, err := key.Int64() - if err != nil { + if err != nil || intVal == 0 { return nil } field.SetInt(intVal) // byte is an alias for uint8, so supporting uint8 breaks support for byte case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: durationVal, err := key.Duration() - if err == nil { + // Skip zero value + if err == nil && int(durationVal) > 0 { field.Set(reflect.ValueOf(durationVal)) return nil } @@ -131,29 +187,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri } field.Set(reflect.ValueOf(timeVal)) case reflect.Slice: - vals := key.Strings(delim) - numVals := len(vals) - if numVals == 0 { - return nil - } - - sliceOf := field.Type().Elem().Kind() - - var times []time.Time - if sliceOf == reflectTime { - times = key.Times(delim) - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(times[i])) - default: - slice.Index(i).Set(reflect.ValueOf(vals[i])) - } - } - field.Set(slice) + return setSliceWithProperType(key, field, delim) default: return fmt.Errorf("unsupported type '%s'", t) } @@ -238,40 +272,81 @@ func MapTo(v, source interface{}, others ...interface{}) error { return MapToWithMapper(v, nil, source, others...) } -// reflectWithProperType does the opposite thing with setWithProperType. +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { switch t.Kind() { case reflect.String: key.SetValue(field.String()) - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float64, - reflectTime: - key.SetValue(fmt.Sprint(field)) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) case reflect.Slice: - vals := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - isTime := fmt.Sprint(field.Type()) == "[]time.Time" - for i := 0; i < field.Len(); i++ { - if isTime { - buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) - } else { - buf.WriteString(fmt.Sprint(vals.Index(i))) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) + return reflectSliceWithProperType(key, field, delim) default: return fmt.Errorf("unsupported type '%s'", t) } return nil } +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflectTime: + return v.Interface().(time.Time).IsZero() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + func (s *Section) reflectFrom(val reflect.Value) error { if val.Kind() == reflect.Ptr { val = val.Elem() @@ -287,13 +362,18 @@ func (s *Section) reflectFrom(val reflect.Value) error { continue } - fieldName := s.parseFieldName(tpField.Name, tag) + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) if len(fieldName) == 0 || !field.CanSet() { continue } if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct) { + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { // Note: The only error here is section doesn't exist. sec, err := s.f.GetSection(fieldName) if err != nil { @@ -301,7 +381,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { sec, _ = s.f.NewSection(fieldName) } if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } continue } @@ -312,7 +392,7 @@ func (s *Section) reflectFrom(val reflect.Value) error { key, _ = s.NewKey(fieldName, "") } if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) } } diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go index 8d91665..6025399 100644 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -729,16 +729,19 @@ func (rows *textRows) readRow(dest []driver.Value) error { func (mc *mysqlConn) readUntilEOF() error { for { data, err := mc.readPacket() - - // No Err and no EOF Packet - if err == nil && data[0] != iEOF { - continue - } - if err == nil && data[0] == iEOF && len(data) == 5 { - mc.status = readStatus(data[3:]) + if err != nil { + return err } - return err // Err or EOF + switch data[0] { + case iERR: + return mc.handleErrorPacket(data) + case iEOF: + if len(data) == 5 { + mc.status = readStatus(data[3:]) + } + return nil + } } } diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 482f3e9..6b9b363 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -500,6 +500,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { registeredExtensions := RegisteredExtensions(pb) emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } mu.Lock() defer mu.Unlock() extensions := make([]*ExtensionDesc, 0, len(emap)) diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 170b8e8..ac4ddbc 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -308,7 +308,7 @@ func GetStats() Stats { return stats } // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream - index int // write point + index int // read point // pools of basic types to amortize allocation. bools []bool diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 0b8c59f..7e6f145 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -44,6 +44,9 @@ import ( "unicode/utf8" ) +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + type ParseError struct { Message string Line int // 1-based line number @@ -508,8 +511,16 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { if err != nil { return p.errorf("failed to marshal message of type %q: %v", messageName, err) } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } sv.FieldByName("TypeUrl").SetString(extName) sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true continue } diff --git a/vendor/github.com/google/go-github/github/activity_notifications.go b/vendor/github.com/google/go-github/github/activity_notifications.go index 8890388..b538a7b 100644 --- a/vendor/github.com/google/go-github/github/activity_notifications.go +++ b/vendor/github.com/google/go-github/github/activity_notifications.go @@ -96,20 +96,17 @@ func (s *ActivityService) ListRepositoryNotifications(owner, repo string, opt *N } type markReadOptions struct { - LastReadAt time.Time `url:"last_read_at,omitempty"` + LastReadAt time.Time `json:"last_read_at,omitempty"` } // MarkNotificationsRead marks all notifications up to lastRead as read. // // GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-as-read func (s *ActivityService) MarkNotificationsRead(lastRead time.Time) (*Response, error) { - u := fmt.Sprintf("notifications") - u, err := addOptions(u, markReadOptions{lastRead}) - if err != nil { - return nil, err + opts := &markReadOptions{ + LastReadAt: lastRead, } - - req, err := s.client.NewRequest("PUT", u, nil) + req, err := s.client.NewRequest("PUT", "notifications", opts) if err != nil { return nil, err } @@ -122,13 +119,11 @@ func (s *ActivityService) MarkNotificationsRead(lastRead time.Time) (*Response, // // GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-notifications-as-read-in-a-repository func (s *ActivityService) MarkRepositoryNotificationsRead(owner, repo string, lastRead time.Time) (*Response, error) { - u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo) - u, err := addOptions(u, markReadOptions{lastRead}) - if err != nil { - return nil, err + opts := &markReadOptions{ + LastReadAt: lastRead, } - - req, err := s.client.NewRequest("PUT", u, nil) + u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo) + req, err := s.client.NewRequest("PUT", u, opts) if err != nil { return nil, err } diff --git a/vendor/github.com/google/go-github/github/authorizations.go b/vendor/github.com/google/go-github/github/authorizations.go index 58fcc4e..35ce9a9 100644 --- a/vendor/github.com/google/go-github/github/authorizations.go +++ b/vendor/github.com/google/go-github/github/authorizations.go @@ -398,7 +398,7 @@ func (s *AuthorizationsService) DeleteGrant(id int) (*Response, error) { return s.client.Do(req, nil) } -// Create an impersonation OAuth token. +// CreateImpersonation creates an impersonation OAuth token. // // This requires admin permissions. With the returned Authorization.Token // you can e.g. create or delete a user's public SSH key. NOTE: creating a @@ -420,7 +420,7 @@ func (s *AuthorizationsService) CreateImpersonation(username string, authReq *Au return a, resp, err } -// Delete an impersonation OAuth token. +// DeleteImpersonation deletes an impersonation OAuth token. // // NOTE: there can be only one at a time. // diff --git a/vendor/github.com/google/go-github/github/doc.go b/vendor/github.com/google/go-github/github/doc.go index ba7b089..2b61068 100644 --- a/vendor/github.com/google/go-github/github/doc.go +++ b/vendor/github.com/google/go-github/github/doc.go @@ -22,8 +22,8 @@ Some API methods have optional parameters that can be passed. For example: client := github.NewClient(nil) - // list recently updated repositories for org "github" - opt := &github.RepositoryListByOrgOptions{Sort: "updated"} + // list public repositories for org "github" + opt := &github.RepositoryListByOrgOptions{Type: "public"} repos, _, err := client.Repositories.ListByOrg("github", opt) The services of a client divide the API into logical chunks and correspond to diff --git a/vendor/github.com/google/go-github/github/event_types.go b/vendor/github.com/google/go-github/github/event_types.go index f3e163d..20f509a 100644 --- a/vendor/github.com/google/go-github/github/event_types.go +++ b/vendor/github.com/google/go-github/github/event_types.go @@ -123,8 +123,9 @@ type GollumEvent struct { Sender *User `json:"sender,omitempty"` } -// DEPRECATED: IssueActivityEvent represents the payload delivered by Issue webhook -// Use IssuesEvent instead. +// IssueActivityEvent represents the payload delivered by Issue webhook. +// +// Deprecated: Use IssuesEvent instead. type IssueActivityEvent struct { Action *string `json:"action,omitempty"` Issue *Issue `json:"issue,omitempty"` diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go index 4faf09c..8448982 100644 --- a/vendor/github.com/google/go-github/github/github.go +++ b/vendor/github.com/google/go-github/github/github.go @@ -81,6 +81,12 @@ const ( // https://developer.github.com/changes/2016-04-21-oauth-authorizations-grants-api-preview/ mediaTypeOAuthGrantAuthorizationsPreview = "application/vnd.github.damage-preview+json" + + // https://developer.github.com/changes/2016-07-06-github-pages-preiew-api/ + mediaTypePagesPreview = "application/vnd.github.mister-fantastic-preview+json" + + // https://developer.github.com/v3/repos/traffic/ + mediaTypeTrafficPreview = "application/vnd.github.spiderman-preview+json" ) // A Client manages communication with the GitHub API. @@ -220,9 +226,12 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Requ return nil, err } - req.Header.Add("Accept", mediaTypeV3) + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + req.Header.Set("Accept", mediaTypeV3) if c.UserAgent != "" { - req.Header.Add("User-Agent", c.UserAgent) + req.Header.Set("User-Agent", c.UserAgent) } return req, nil } @@ -243,12 +252,12 @@ func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, m } req.ContentLength = size - if len(mediaType) == 0 { + if mediaType == "" { mediaType = defaultMediaType } - req.Header.Add("Content-Type", mediaType) - req.Header.Add("Accept", mediaTypeV3) - req.Header.Add("User-Agent", c.UserAgent) + req.Header.Set("Content-Type", mediaType) + req.Header.Set("Accept", mediaTypeV3) + req.Header.Set("User-Agent", c.UserAgent) return req, nil } @@ -408,7 +417,7 @@ func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { // checkRateLimitBeforeDo does not make any network calls, but uses existing knowledge from // current client state in order to quickly check if *RateLimitError can be immediately returned -// from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unneccessarily. +// from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unnecessarily. // Otherwise it returns nil, and Client.Do should proceed normally. func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rateLimitCategory) error { c.rateMu.Lock() @@ -632,6 +641,8 @@ func category(path string) rateLimitCategory { } } +// RateLimit returns the core rate limit for the current client. +// // Deprecated: RateLimit is deprecated, use RateLimits instead. func (c *Client) RateLimit() (*Rate, *Response, error) { limits, resp, err := c.RateLimits() @@ -756,7 +767,7 @@ func (t *BasicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error req = cloneRequest(req) // per RoundTrip contract req.SetBasicAuth(t.Username, t.Password) if t.OTP != "" { - req.Header.Add(headerOTP, t.OTP) + req.Header.Set(headerOTP, t.OTP) } return t.transport().RoundTrip(req) } diff --git a/vendor/github.com/google/go-github/github/migrations_source_import.go b/vendor/github.com/google/go-github/github/migrations_source_import.go index 6ed4acf..44505fa 100644 --- a/vendor/github.com/google/go-github/github/migrations_source_import.go +++ b/vendor/github.com/google/go-github/github/migrations_source_import.go @@ -163,7 +163,7 @@ func (s *MigrationService) StartImport(owner, repo string, in *Import) (*Import, return out, resp, err } -// QueryImport queries for the status and progress of an ongoing repository import. +// ImportProgress queries for the status and progress of an ongoing repository import. // // GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-import-progress func (s *MigrationService) ImportProgress(owner, repo string) (*Import, *Response, error) { diff --git a/vendor/github.com/google/go-github/github/orgs.go b/vendor/github.com/google/go-github/github/orgs.go index e71055c..d137e3e 100644 --- a/vendor/github.com/google/go-github/github/orgs.go +++ b/vendor/github.com/google/go-github/github/orgs.go @@ -27,6 +27,7 @@ type Organization struct { Blog *string `json:"blog,omitempty"` Location *string `json:"location,omitempty"` Email *string `json:"email,omitempty"` + Description *string `json:"description,omitempty"` PublicRepos *int `json:"public_repos,omitempty"` PublicGists *int `json:"public_gists,omitempty"` Followers *int `json:"followers,omitempty"` @@ -45,6 +46,8 @@ type Organization struct { // API URLs URL *string `json:"url,omitempty"` EventsURL *string `json:"events_url,omitempty"` + HooksURL *string `json:"hooks_url,omitempty"` + IssuesURL *string `json:"issues_url,omitempty"` MembersURL *string `json:"members_url,omitempty"` PublicMembersURL *string `json:"public_members_url,omitempty"` ReposURL *string `json:"repos_url,omitempty"` diff --git a/vendor/github.com/google/go-github/github/pulls.go b/vendor/github.com/google/go-github/github/pulls.go index 0900766..0611507 100644 --- a/vendor/github.com/google/go-github/github/pulls.go +++ b/vendor/github.com/google/go-github/github/pulls.go @@ -42,7 +42,8 @@ type PullRequest struct { StatusesURL *string `json:"statuses_url,omitempty"` DiffURL *string `json:"diff_url,omitempty"` PatchURL *string `json:"patch_url,omitempty"` - Assignee *User `json:"assignee,omitempty"` // probably only in webhooks + Assignee *User `json:"assignee,omitempty"` + Assignees []*User `json:"assignees,omitempty"` Head *PullRequestBranch `json:"head,omitempty"` Base *PullRequestBranch `json:"base,omitempty"` diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go index fb402ee..2bcaacc 100644 --- a/vendor/github.com/google/go-github/github/repos.go +++ b/vendor/github.com/google/go-github/github/repos.go @@ -54,6 +54,7 @@ type Repository struct { Private *bool `json:"private"` HasIssues *bool `json:"has_issues"` HasWiki *bool `json:"has_wiki"` + HasPages *bool `json:"has_pages"` HasDownloads *bool `json:"has_downloads"` // Creating an organization repository. Required for non-owners. TeamID *int `json:"team_id"` @@ -70,6 +71,7 @@ type Repository struct { CompareURL *string `json:"compare_url,omitempty"` ContentsURL *string `json:"contents_url,omitempty"` ContributorsURL *string `json:"contributors_url,omitempty"` + DeploymentsURL *string `json:"deployments_url,omitempty"` DownloadsURL *string `json:"downloads_url,omitempty"` EventsURL *string `json:"events_url,omitempty"` ForksURL *string `json:"forks_url,omitempty"` diff --git a/vendor/github.com/google/go-github/github/repos_pages.go b/vendor/github.com/google/go-github/github/repos_pages.go index 8594edc..ccd24f3 100644 --- a/vendor/github.com/google/go-github/github/repos_pages.go +++ b/vendor/github.com/google/go-github/github/repos_pages.go @@ -13,6 +13,7 @@ type Pages struct { Status *string `json:"status,omitempty"` CNAME *string `json:"cname,omitempty"` Custom404 *bool `json:"custom_404,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` } // PagesError represents a build error for a GitHub Pages site. @@ -42,6 +43,9 @@ func (s *RepositoriesService) GetPagesInfo(owner string, repo string) (*Pages, * return nil, nil, err } + // TODO: remove custom Accept header when this API fully launches. + req.Header.Set("Accept", mediaTypePagesPreview) + site := new(Pages) resp, err := s.client.Do(req, site) if err != nil { @@ -88,3 +92,25 @@ func (s *RepositoriesService) GetLatestPagesBuild(owner string, repo string) (*P return build, resp, err } + +// RequestPageBuild requests a build of a GitHub Pages site without needing to push new commit. +// +// GitHub API docs: https://developer.github.com/v3/repos/pages/#request-a-page-build +func (s *RepositoriesService) RequestPageBuild(owner string, repo string) (*PagesBuild, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo) + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches. + req.Header.Set("Accept", mediaTypePagesPreview) + + build := new(PagesBuild) + resp, err := s.client.Do(req, build) + if err != nil { + return nil, resp, err + } + + return build, resp, err +} diff --git a/vendor/github.com/google/go-github/github/repos_traffic.go b/vendor/github.com/google/go-github/github/repos_traffic.go new file mode 100644 index 0000000..b6c8d83 --- /dev/null +++ b/vendor/github.com/google/go-github/github/repos_traffic.go @@ -0,0 +1,173 @@ +// Copyright 2016 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "fmt" + "strconv" + "time" +) + +// TrafficReferrer represent information about traffic from a referrer . +type TrafficReferrer struct { + Referrer *string `json:"referrer,omitempty"` + Count *int `json:"count,omitempty"` + Uniques *int `json:"uniques,omitempty"` +} + +// TrafficPath represent information about the traffic on a path of the repo. +type TrafficPath struct { + Path *string `json:"path,omitempty"` + Title *string `json:"title,omitempty"` + Count *int `json:"count,omitempty"` + Uniques *int `json:"uniques,omitempty"` +} + +// TimestampMS represents a timestamp as used in datapoint. +// +// It's only used to parse the result given by the API which are unix timestamp in milliseonds. +type TimestampMS struct { + time.Time +} + +// UnmarshalJSON parse unix timestamp. +func (t *TimestampMS) UnmarshalJSON(b []byte) error { + s := string(b) + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + // We can drop the reaminder as returned values are days and it will always be 0 + *t = TimestampMS{time.Unix(i/1000, 0)} + return nil +} + +// TrafficData represent information about a specific timestamp in views or clones list. +type TrafficData struct { + Timestamp *TimestampMS `json:"timestamp,omitempty"` + Count *int `json:"count,omitempty"` + Uniques *int `json:"uniques,omitempty"` +} + +// TrafficViews represent information about the number of views in the last 14 days. +type TrafficViews struct { + Views []*TrafficData `json:"views,omitempty"` + Count *int `json:"count,omitempty"` + Uniques *int `json:"uniques,omitempty"` +} + +// TrafficClones represent information about the number of clones in the last 14 days. +type TrafficClones struct { + Clones []*TrafficData `json:"clones,omitempty"` + Count *int `json:"count,omitempty"` + Uniques *int `json:"uniques,omitempty"` +} + +// TrafficBreakdownOptions specifies the parameters to methods that support breakdown per day or week. +// Can be one of: day, week. Default: day. +type TrafficBreakdownOptions struct { + Per string `url:"per,omitempty"` +} + +// ListTrafficReferrers list the top 10 referrers over the last 14 days. +// +// GitHub API docs: https://developer.github.com/v3/repos/traffic/#list-referrers +func (s *RepositoriesService) ListTrafficReferrers(owner, repo string) ([]*TrafficReferrer, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/traffic/popular/referrers", owner, repo) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches. + req.Header.Set("Accept", mediaTypeTrafficPreview) + + trafficReferrers := new([]*TrafficReferrer) + resp, err := s.client.Do(req, &trafficReferrers) + if err != nil { + return nil, resp, err + } + + return *trafficReferrers, resp, err +} + +// ListTrafficPaths list the top 10 popular content over the last 14 days. +// +// GitHub API docs: https://developer.github.com/v3/repos/traffic/#list-paths +func (s *RepositoriesService) ListTrafficPaths(owner, repo string) ([]*TrafficPath, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/traffic/popular/paths", owner, repo) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches. + req.Header.Set("Accept", mediaTypeTrafficPreview) + + var paths = new([]*TrafficPath) + resp, err := s.client.Do(req, &paths) + if err != nil { + return nil, resp, err + } + + return *paths, resp, err +} + +// ListTrafficViews get total number of views for the last 14 days and breaks it down either per day or week. +// +// GitHub API docs: https://developer.github.com/v3/repos/traffic/#views +func (s *RepositoriesService) ListTrafficViews(owner, repo string, opt *TrafficBreakdownOptions) (*TrafficViews, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/traffic/views", owner, repo) + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches. + req.Header.Set("Accept", mediaTypeTrafficPreview) + + trafficViews := new(TrafficViews) + resp, err := s.client.Do(req, &trafficViews) + if err != nil { + return nil, resp, err + } + + return trafficViews, resp, err +} + +// ListTrafficClones get total number of clones for the last 14 days and breaks it down either per day or week for the last 14 days. +// +// GitHub API docs: https://developer.github.com/v3/repos/traffic/#views +func (s *RepositoriesService) ListTrafficClones(owner, repo string, opt *TrafficBreakdownOptions) (*TrafficClones, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/traffic/clones", owner, repo) + u, err := addOptions(u, opt) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + // TODO: remove custom Accept header when this API fully launches. + req.Header.Set("Accept", mediaTypeTrafficPreview) + + trafficClones := new(TrafficClones) + resp, err := s.client.Do(req, &trafficClones) + if err != nil { + return nil, resp, err + } + + return trafficClones, resp, err +} diff --git a/vendor/github.com/google/go-github/github/search.go b/vendor/github.com/google/go-github/github/search.go index 0c7ffcb..916a2dc 100644 --- a/vendor/github.com/google/go-github/github/search.go +++ b/vendor/github.com/google/go-github/github/search.go @@ -40,8 +40,9 @@ type SearchOptions struct { // RepositoriesSearchResult represents the result of a repositories search. type RepositoriesSearchResult struct { - Total *int `json:"total_count,omitempty"` - Repositories []Repository `json:"items,omitempty"` + Total *int `json:"total_count,omitempty"` + IncompleteResults *bool `json:"incomplete_results,omitempty"` + Repositories []Repository `json:"items,omitempty"` } // Repositories searches repositories via various criteria. @@ -55,8 +56,9 @@ func (s *SearchService) Repositories(query string, opt *SearchOptions) (*Reposit // IssuesSearchResult represents the result of an issues search. type IssuesSearchResult struct { - Total *int `json:"total_count,omitempty"` - Issues []Issue `json:"items,omitempty"` + Total *int `json:"total_count,omitempty"` + IncompleteResults *bool `json:"incomplete_results,omitempty"` + Issues []Issue `json:"items,omitempty"` } // Issues searches issues via various criteria. @@ -70,8 +72,9 @@ func (s *SearchService) Issues(query string, opt *SearchOptions) (*IssuesSearchR // UsersSearchResult represents the result of an issues search. type UsersSearchResult struct { - Total *int `json:"total_count,omitempty"` - Users []User `json:"items,omitempty"` + Total *int `json:"total_count,omitempty"` + IncompleteResults *bool `json:"incomplete_results,omitempty"` + Users []User `json:"items,omitempty"` } // Users searches users via various criteria. @@ -104,8 +107,9 @@ func (tm TextMatch) String() string { // CodeSearchResult represents the result of an code search. type CodeSearchResult struct { - Total *int `json:"total_count,omitempty"` - CodeResults []CodeResult `json:"items,omitempty"` + Total *int `json:"total_count,omitempty"` + IncompleteResults *bool `json:"incomplete_results,omitempty"` + CodeResults []CodeResult `json:"items,omitempty"` } // CodeResult represents a single search result. diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md index c60a31b..08f8669 100644 --- a/vendor/github.com/gorilla/context/README.md +++ b/vendor/github.com/gorilla/context/README.md @@ -4,4 +4,7 @@ context gorilla/context is a general purpose registry for global request variables. +> Note: gorilla/context, having been born well before `context.Context` existed, does not play well +> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`. + Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go index 73c7400..448d1bf 100644 --- a/vendor/github.com/gorilla/context/doc.go +++ b/vendor/github.com/gorilla/context/doc.go @@ -5,6 +5,12 @@ /* Package context stores values shared during a request lifetime. +Note: gorilla/context, having been born well before `context.Context` existed, +does not play well > with the shallow copying of the request that +[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) +(added to net/http Go 1.7 onwards) performs. You should either use *just* +gorilla/context, or moving forward, the new `http.Request.Context()`. + For example, a router can set variables extracted from the URL and later application handlers can access those values, or it can be used to store sessions values to be saved at the end of a request. There are several diff --git a/vendor/github.com/gorilla/csrf/context.go b/vendor/github.com/gorilla/csrf/context.go index fe47270..d8bb42f 100644 --- a/vendor/github.com/gorilla/csrf/context.go +++ b/vendor/github.com/gorilla/csrf/context.go @@ -23,3 +23,7 @@ func contextSave(r *http.Request, key string, val interface{}) *http.Request { ctx = context.WithValue(ctx, key, val) return r.WithContext(ctx) } + +func contextClear(r *http.Request) { + // no-op for go1.7+ +} diff --git a/vendor/github.com/gorilla/csrf/context_legacy.go b/vendor/github.com/gorilla/csrf/context_legacy.go index dabf0a6..f88c9eb 100644 --- a/vendor/github.com/gorilla/csrf/context_legacy.go +++ b/vendor/github.com/gorilla/csrf/context_legacy.go @@ -22,3 +22,7 @@ func contextSave(r *http.Request, key string, val interface{}) *http.Request { context.Set(r, key, val) return r } + +func contextClear(r *http.Request) { + context.Clear(r) +} diff --git a/vendor/github.com/gorilla/csrf/csrf.go b/vendor/github.com/gorilla/csrf/csrf.go index dc4755e..b4b0439 100644 --- a/vendor/github.com/gorilla/csrf/csrf.go +++ b/vendor/github.com/gorilla/csrf/csrf.go @@ -7,7 +7,6 @@ import ( "github.com/pkg/errors" - "github.com/gorilla/context" "github.com/gorilla/securecookie" ) @@ -195,7 +194,7 @@ func (cs *csrf) ServeHTTP(w http.ResponseWriter, r *http.Request) { // as it will no longer match the request token. realToken, err = generateRandomBytes(tokenLength) if err != nil { - envError(r, err) + r = envError(r, err) cs.opts.ErrorHandler.ServeHTTP(w, r) return } @@ -203,7 +202,7 @@ func (cs *csrf) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Save the new (real) token in the session store. err = cs.st.Save(realToken, w) if err != nil { - envError(r, err) + r = envError(r, err) cs.opts.ErrorHandler.ServeHTTP(w, r) return } @@ -225,13 +224,13 @@ func (cs *csrf) ServeHTTP(w http.ResponseWriter, r *http.Request) { // otherwise fails to parse. referer, err := url.Parse(r.Referer()) if err != nil || referer.String() == "" { - envError(r, ErrNoReferer) + r = envError(r, ErrNoReferer) cs.opts.ErrorHandler.ServeHTTP(w, r) return } if sameOrigin(r.URL, referer) == false { - envError(r, ErrBadReferer) + r = envError(r, ErrBadReferer) cs.opts.ErrorHandler.ServeHTTP(w, r) return } @@ -240,7 +239,7 @@ func (cs *csrf) ServeHTTP(w http.ResponseWriter, r *http.Request) { // If the token returned from the session store is nil for non-idempotent // ("unsafe") methods, call the error handler. if realToken == nil { - envError(r, ErrNoToken) + r = envError(r, ErrNoToken) cs.opts.ErrorHandler.ServeHTTP(w, r) return } @@ -250,7 +249,7 @@ func (cs *csrf) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Compare the request token against the real token if !compareTokens(requestToken, realToken) { - envError(r, ErrBadToken) + r = envError(r, ErrBadToken) cs.opts.ErrorHandler.ServeHTTP(w, r) return } @@ -263,7 +262,7 @@ func (cs *csrf) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Call the wrapped handler/router on success. cs.h.ServeHTTP(w, r) // Clear the request context after the handler has completed. - context.Clear(r) + contextClear(r) } // unauthorizedhandler sets a HTTP 403 Forbidden status and writes the diff --git a/vendor/github.com/gorilla/csrf/helpers.go b/vendor/github.com/gorilla/csrf/helpers.go index 7adb5ff..3dacfd2 100644 --- a/vendor/github.com/gorilla/csrf/helpers.go +++ b/vendor/github.com/gorilla/csrf/helpers.go @@ -8,8 +8,6 @@ import ( "html/template" "net/http" "net/url" - - "github.com/gorilla/context" ) // Token returns a masked CSRF token ready for passing into HTML template or @@ -200,6 +198,6 @@ func contains(vals []string, s string) bool { } // envError stores a CSRF error in the request context. -func envError(r *http.Request, err error) { - context.Set(r, errorKey, err) +func envError(r *http.Request, err error) *http.Request { + return contextSave(r, errorKey, err) } diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go index 268de9c..0be750f 100644 --- a/vendor/github.com/gorilla/handlers/proxy_headers.go +++ b/vendor/github.com/gorilla/handlers/proxy_headers.go @@ -8,9 +8,11 @@ import ( var ( // De-facto standard header keys. - xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") - xRealIP = http.CanonicalHeaderKey("X-Real-IP") - xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Scheme") + xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") + xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host") + xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto") + xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme") + xRealIP = http.CanonicalHeaderKey("X-Real-IP") ) var ( @@ -28,9 +30,9 @@ var ( // ProxyHeaders inspects common reverse proxy headers and sets the corresponding // fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP -// for the remote (client) IP address, X-Forwarded-Proto for the scheme -// (http|https) and the RFC7239 Forwarded header, which may include both client -// IPs and schemes. +// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme +// for the scheme (http|https) and the RFC7239 Forwarded header, which may +// include both client IPs and schemes. // // NOTE: This middleware should only be used when behind a reverse // proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are @@ -49,7 +51,10 @@ func ProxyHeaders(h http.Handler) http.Handler { if scheme := getScheme(r); scheme != "" { r.URL.Scheme = scheme } - + // Set the host with the value passed by the proxy + if r.Header.Get(xForwardedHost) != "" { + r.Host = r.Header.Get(xForwardedHost) + } // Call the next handler in the chain. h.ServeHTTP(w, r) } @@ -99,7 +104,9 @@ func getScheme(r *http.Request) string { // Retrieve the scheme from X-Forwarded-Proto. if proto := r.Header.Get(xForwardedProto); proto != "" { scheme = strings.ToLower(proto) - } else if proto := r.Header.Get(forwarded); proto != "" { + } else if proto = r.Header.Get(xForwardedScheme); proto != "" { + scheme = strings.ToLower(proto) + } else if proto = r.Header.Get(forwarded); proto != "" { // match should contain at least two elements if the protocol was // specified in the Forwarded header. The first element will always be // the 'proto=' capture, which we ignore. In the case of multiple proto diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 960ef7c..fa79a6b 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,19 +1,43 @@ -mux +gorilla/mux === [![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) [![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) +![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) + http://www.gorillatoolkit.org/pkg/mux -Package `gorilla/mux` implements a request router and dispatcher. +Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to +their respective handler. The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. * Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. * URL hosts and paths can have variables with an optional regular expression. * Registered URLs can be built, or "reversed", which helps maintaining references to resources. * Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. -* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. + +--- + +* [Install](#install) +* [Examples](#examples) +* [Matching Routes](#matching-routes) +* [Static Files](#static-files) +* [Registered URLs](#registered-urls) +* [Full Example](#full-example) + +--- + +## Install + +With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: + +```sh +go get -u github.com/gorilla/mux +``` + +## Examples Let's start registering a couple of URL paths and handlers: @@ -47,6 +71,8 @@ category := vars["category"] And this is all you need to know about the basic usage. More advanced options are explained below. +### Matching Routes + Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: ```go @@ -118,7 +144,7 @@ Then register routes in the subrouter: ```go s.HandleFunc("/products/", ProductsHandler) s.HandleFunc("/products/{key}", ProductHandler) -s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) ``` The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. @@ -138,6 +164,37 @@ s.HandleFunc("/{key}/", ProductHandler) s.HandleFunc("/{key}/details", ProductDetailsHandler) ``` +### Static Files + +Note that the path provided to `PathPrefix()` represents a "wildcard": calling +`PathPrefix("/static/").Handler(...)` means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + +```go +func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Registered URLs + Now let's see how to build registered URLs. Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index 835f534..291ef5e 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -136,6 +136,31 @@ the inner routes use it as base for their paths: // "/products/{key}/details" s.HandleFunc("/{key}/details", ProductDetailsHandler) +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + Now let's see how to build registered URLs. Routes can be named. All routes that define a name can have their URLs built, diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index f8c10f3..3263a00 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -10,6 +10,7 @@ import ( "net/http" "path" "regexp" + "strings" ) // NewRouter returns a new router instance. @@ -76,8 +77,9 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { // mux.Vars(request). func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { if !r.skipClean { + path := getPath(req) // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { + if p := cleanPath(path); p != path { // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: @@ -285,6 +287,9 @@ func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { if err == SkipRouter { continue } + if err != nil { + return err + } for _, sr := range t.matchers { if h, ok := sr.(*Router); ok { err := h.walk(walkFn, ancestors) @@ -355,6 +360,28 @@ func setCurrentRoute(r *http.Request, val interface{}) *http.Request { // Helpers // ---------------------------------------------------------------------------- +// getPath returns the escaped path if possible; doing what URL.EscapedPath() +// which was added in go1.5 does +func getPath(req *http.Request) string { + if req.RequestURI != "" { + // Extract the path from RequestURI (which is escaped unlike URL.Path) + // as detailed here as detailed in https://golang.org/pkg/net/url/#URL + // for < 1.5 server side workaround + // http://localhost/path/here?v=1 -> /path/here + path := req.RequestURI + path = strings.TrimPrefix(path, req.URL.Scheme+`://`) + path = strings.TrimPrefix(path, req.URL.Host) + if i := strings.LastIndex(path, "?"); i > -1 { + path = path[:i] + } + if i := strings.LastIndex(path, "#"); i > -1 { + path = path[:i] + } + return path + } + return req.URL.Path +} + // cleanPath returns the canonical path for p, eliminating . and .. elements. // Borrowed from the net/http package. func cleanPath(p string) string { diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index 08710bc..99d41a8 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -149,8 +149,8 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { if r.matchQuery { return r.matchQueryString(req) } - - return r.regexp.MatchString(req.URL.Path) + path := getPath(req) + return r.regexp.MatchString(path) } return r.regexp.MatchString(getHost(req)) @@ -253,14 +253,15 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) extractVars(host, matches, v.host.varsN, m.Vars) } } + path := getPath(req) // Store path variables. if v.path != nil { - matches := v.path.regexp.FindStringSubmatchIndex(req.URL.Path) + matches := v.path.regexp.FindStringSubmatchIndex(path) if len(matches) > 0 { - extractVars(req.URL.Path, matches, v.path.varsN, m.Vars) + extractVars(path, matches, v.path.varsN, m.Vars) // Check if we should redirect. if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") + p1 := strings.HasSuffix(path, "/") p2 := strings.HasSuffix(v.path.template, "/") if p1 != p2 { u, _ := url.Parse(req.URL.String()) diff --git a/vendor/github.com/gorilla/sessions/README.md b/vendor/github.com/gorilla/sessions/README.md index 2779904..12f6118 100644 --- a/vendor/github.com/gorilla/sessions/README.md +++ b/vendor/github.com/gorilla/sessions/README.md @@ -64,7 +64,7 @@ Other implementations of the `sessions.Store` interface: * [github.com/srinathgs/couchbasestore](https://github.com/srinathgs/couchbasestore) - Couchbase * [github.com/denizeren/dynamostore](https://github.com/denizeren/dynamostore) - Dynamodb on AWS * [github.com/bradleypeabody/gorilla-sessions-memcache](https://github.com/bradleypeabody/gorilla-sessions-memcache) - Memcache -* [github.com/hnakamur/gaesessions](https://github.com/hnakamur/gaesessions) - Memcache on GAE +* [github.com/dsoprea/goappenginesessioncascade](https://github.com/dsoprea/goappenginesessioncascade) - Memcache/Datastore/Context in AppEngine * [github.com/kidstuff/mongostore](https://github.com/kidstuff/mongostore) - MongoDB * [github.com/srinathgs/mysqlstore](https://github.com/srinathgs/mysqlstore) - MySQL * [github.com/antonlindstrom/pgstore](https://github.com/antonlindstrom/pgstore) - PostgreSQL diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 0000000..b97cd6e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md index e81be50..ead5830 100644 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -1,5 +1,11 @@ # go-multierror +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + `go-multierror` is a package for Go that provides a mechanism for representing a list of `error` values as a single `error`. diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go index cfddbf3..a6938fe 100644 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -489,7 +489,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) // the yacc parser would always ensure top-level elements were arrays. The new // parser does not make the same guarantees, thus we need to convert any // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok { + if _, ok := node.(*ast.LiteralType); ok && item != nil { node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} } diff --git a/vendor/github.com/kr/fs/LICENSE b/vendor/github.com/kr/fs/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/kr/fs/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/kr/fs/Readme b/vendor/github.com/kr/fs/Readme new file mode 100644 index 0000000..c95e13f --- /dev/null +++ b/vendor/github.com/kr/fs/Readme @@ -0,0 +1,3 @@ +Filesystem Package + +http://godoc.org/github.com/kr/fs diff --git a/vendor/github.com/kr/fs/filesystem.go b/vendor/github.com/kr/fs/filesystem.go new file mode 100644 index 0000000..f1c4805 --- /dev/null +++ b/vendor/github.com/kr/fs/filesystem.go @@ -0,0 +1,36 @@ +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// FileSystem defines the methods of an abstract filesystem. +type FileSystem interface { + + // ReadDir reads the directory named by dirname and returns a + // list of directory entries. + ReadDir(dirname string) ([]os.FileInfo, error) + + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. + Lstat(name string) (os.FileInfo, error) + + // Join joins any number of path elements into a single path, adding a + // separator if necessary. The result is Cleaned; in particular, all + // empty strings are ignored. + // + // The separator is FileSystem specific. + Join(elem ...string) string +} + +// fs represents a FileSystem provided by the os package. +type fs struct{} + +func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) } + +func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) } diff --git a/vendor/github.com/kr/fs/walk.go b/vendor/github.com/kr/fs/walk.go new file mode 100644 index 0000000..6ffa1e0 --- /dev/null +++ b/vendor/github.com/kr/fs/walk.go @@ -0,0 +1,95 @@ +// Package fs provides filesystem-related functions. +package fs + +import ( + "os" +) + +// Walker provides a convenient interface for iterating over the +// descendants of a filesystem path. +// Successive calls to the Step method will step through each +// file or directory in the tree, including the root. The files +// are walked in lexical order, which makes the output deterministic +// but means that for very large directories Walker can be inefficient. +// Walker does not follow symbolic links. +type Walker struct { + fs FileSystem + cur item + stack []item + descend bool +} + +type item struct { + path string + info os.FileInfo + err error +} + +// Walk returns a new Walker rooted at root. +func Walk(root string) *Walker { + return WalkFS(root, new(fs)) +} + +// WalkFS returns a new Walker rooted at root on the FileSystem fs. +func WalkFS(root string, fs FileSystem) *Walker { + info, err := fs.Lstat(root) + return &Walker{ + fs: fs, + stack: []item{{root, info, err}}, + } +} + +// Step advances the Walker to the next file or directory, +// which will then be available through the Path, Stat, +// and Err methods. +// It returns false when the walk stops at the end of the tree. +func (w *Walker) Step() bool { + if w.descend && w.cur.err == nil && w.cur.info.IsDir() { + list, err := w.fs.ReadDir(w.cur.path) + if err != nil { + w.cur.err = err + w.stack = append(w.stack, w.cur) + } else { + for i := len(list) - 1; i >= 0; i-- { + path := w.fs.Join(w.cur.path, list[i].Name()) + w.stack = append(w.stack, item{path, list[i], nil}) + } + } + } + + if len(w.stack) == 0 { + return false + } + i := len(w.stack) - 1 + w.cur = w.stack[i] + w.stack = w.stack[:i] + w.descend = true + return true +} + +// Path returns the path to the most recent file or directory +// visited by a call to Step. It contains the argument to Walk +// as a prefix; that is, if Walk is called with "dir", which is +// a directory containing the file "a", Path will return "dir/a". +func (w *Walker) Path() string { + return w.cur.path +} + +// Stat returns info for the most recent file or directory +// visited by a call to Step. +func (w *Walker) Stat() os.FileInfo { + return w.cur.info +} + +// Err returns the error, if any, for the most recent attempt +// by Step to visit a file or directory. If a directory has +// an error, w will not descend into that directory. +func (w *Walker) Err() error { + return w.cur.err +} + +// SkipDir causes the currently visited directory to be skipped. +// If w is not on a directory, SkipDir has no effect. +func (w *Walker) SkipDir() { + w.descend = false +} diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go index e2bf3c6..190b695 100644 --- a/vendor/github.com/mattn/go-sqlite3/callback.go +++ b/vendor/github.com/mattn/go-sqlite3/callback.go @@ -11,7 +11,11 @@ package sqlite3 // code for SQLite custom functions is in here. /* +#ifndef USE_LIBSQLITE3 #include +#else +#include +#endif #include void _sqlite3_result_text(sqlite3_context* ctx, const char* s); diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c index 7b82c55..1f085b0 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c @@ -1,6 +1,6 @@ /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.12.2. By combining all the individual C code files into this +** version 3.14.0. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -37,8 +37,43 @@ ** Internal interface definitions for SQLite. ** */ -#ifndef _SQLITEINT_H_ -#define _SQLITEINT_H_ +#ifndef SQLITEINT_H +#define SQLITEINT_H + +/* Special Comments: +** +** Some comments have special meaning to the tools that measure test +** coverage: +** +** NO_TEST - The branches on this line are not +** measured by branch coverage. This is +** used on lines of code that actually +** implement parts of coverage testing. +** +** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false +** and the correct answer is still obtained, +** though perhaps more slowly. +** +** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true +** and the correct answer is still obtained, +** though perhaps more slowly. +** +** PREVENTS-HARMLESS-OVERREAD - This branch prevents a buffer overread +** that would be harmless and undetectable +** if it did occur. +** +** In all cases, the special comment must be enclosed in the usual +** slash-asterisk...asterisk-slash comment marks, with no spaces between the +** asterisks and the comment text. +*/ + +/* +** Make sure the Tcl calling convention macro is defined. This macro is +** only used by test code and Tcl integration code. +*/ +#ifndef SQLITE_TCLAPI +# define SQLITE_TCLAPI +#endif /* ** Make sure that rand_s() is available on Windows systems with MSVC 2005 @@ -70,8 +105,8 @@ ** ** This file contains code that is specific to MSVC. */ -#ifndef _MSVC_H_ -#define _MSVC_H_ +#ifndef SQLITE_MSVC_H +#define SQLITE_MSVC_H #if defined(_MSC_VER) #pragma warning(disable : 4054) @@ -91,7 +126,7 @@ #pragma warning(disable : 4706) #endif /* defined(_MSC_VER) */ -#endif /* _MSVC_H_ */ +#endif /* SQLITE_MSVC_H */ /************** End of msvc.h ************************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -255,8 +290,8 @@ ** the version number) and changes its name to "sqlite3.h" as ** part of the build process. */ -#ifndef _SQLITE3_H_ -#define _SQLITE3_H_ +#ifndef SQLITE3_H +#define SQLITE3_H #include /* Needed for the definition of va_list */ /* @@ -279,8 +314,17 @@ extern "C" { #ifndef SQLITE_CDECL # define SQLITE_CDECL #endif +#ifndef SQLITE_APICALL +# define SQLITE_APICALL +#endif #ifndef SQLITE_STDCALL -# define SQLITE_STDCALL +# define SQLITE_STDCALL SQLITE_APICALL +#endif +#ifndef SQLITE_CALLBACK +# define SQLITE_CALLBACK +#endif +#ifndef SQLITE_SYSAPI +# define SQLITE_SYSAPI #endif /* @@ -336,9 +380,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.12.2" -#define SQLITE_VERSION_NUMBER 3012002 -#define SQLITE_SOURCE_ID "2016-04-18 17:30:31 92dc59fd5ad66f646666042eb04195e3a61a9e8e" +#define SQLITE_VERSION "3.14.0" +#define SQLITE_VERSION_NUMBER 3014000 +#define SQLITE_SOURCE_ID "2016-08-08 13:40:27 d5e98057028abcf7217d0d2b2e29bbbcdf09d6de" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -731,6 +775,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec( #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) +#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) /* ** CAPI3REF: Flags For File Open Operations @@ -1260,6 +1305,16 @@ struct sqlite3_io_methods { */ typedef struct sqlite3_mutex sqlite3_mutex; +/* +** CAPI3REF: Loadable Extension Thunk +** +** A pointer to the opaque sqlite3_api_routines structure is passed as +** the third parameter to entry points of [loadable extensions]. This +** structure must be typedefed in order to work around compiler warnings +** on some platforms. +*/ +typedef struct sqlite3_api_routines sqlite3_api_routines; + /* ** CAPI3REF: OS Interface Object ** @@ -2157,12 +2212,30 @@ struct sqlite3_mem_methods { ** following this call. The second parameter may be a NULL pointer, in ** which case the new setting is not reported back. ** +**
SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION
+**
^This option is used to enable or disable the [sqlite3_load_extension()] +** interface independently of the [load_extension()] SQL function. +** The [sqlite3_enable_load_extension()] API enables or disables both the +** C-API [sqlite3_load_extension()] and the SQL function [load_extension()]. +** There should be two additional arguments. +** When the first argument to this interface is 1, then only the C-API is +** enabled and the SQL function remains disabled. If the first argument to +** this interface is 0, then both the C-API and the SQL function are disabled. +** If the first argument is -1, then no changes are made to state of either the +** C-API or the SQL function. +** The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface +** is disabled or enabled following this call. The second parameter may +** be a NULL pointer, in which case the new setting is not reported back. +**
+** ** */ #define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ #define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */ #define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ #define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */ /* @@ -2439,7 +2512,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql); ** A busy handler must not close the database connection ** or [prepared statement] that invoked the busy handler. */ -SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*); +SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*); /* ** CAPI3REF: Set A Busy Timeout @@ -2961,6 +3034,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer( ** CAPI3REF: Tracing And Profiling Functions ** METHOD: sqlite3 ** +** These routines are deprecated. Use the [sqlite3_trace_v2()] interface +** instead of the routines described here. +** ** These routines register callback functions that can be used for ** tracing and profiling the execution of SQL statements. ** @@ -2986,10 +3062,104 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer( ** sqlite3_profile() function is considered experimental and is ** subject to change in future versions of SQLite. */ -SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); -SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_profile(sqlite3*, +SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_trace(sqlite3*, + void(*xTrace)(void*,const char*), void*); +SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_profile(sqlite3*, void(*xProfile)(void*,const char*,sqlite3_uint64), void*); +/* +** CAPI3REF: SQL Trace Event Codes +** KEYWORDS: SQLITE_TRACE +** +** These constants identify classes of events that can be monitored +** using the [sqlite3_trace_v2()] tracing logic. The third argument +** to [sqlite3_trace_v2()] is an OR-ed combination of one or more of +** the following constants. ^The first argument to the trace callback +** is one of the following constants. +** +** New tracing constants may be added in future releases. +** +** ^A trace callback has four arguments: xCallback(T,C,P,X). +** ^The T argument is one of the integer type codes above. +** ^The C argument is a copy of the context pointer passed in as the +** fourth argument to [sqlite3_trace_v2()]. +** The P and X arguments are pointers whose meanings depend on T. +** +**
+** [[SQLITE_TRACE_STMT]]
SQLITE_TRACE_STMT
+**
^An SQLITE_TRACE_STMT callback is invoked when a prepared statement +** first begins running and possibly at other times during the +** execution of the prepared statement, such as at the start of each +** trigger subprogram. ^The P argument is a pointer to the +** [prepared statement]. ^The X argument is a pointer to a string which +** is the unexpanded SQL text of the prepared statement or an SQL comment +** that indicates the invocation of a trigger. ^The callback can compute +** the same text that would have been returned by the legacy [sqlite3_trace()] +** interface by using the X argument when X begins with "--" and invoking +** [sqlite3_expanded_sql(P)] otherwise. +** +** [[SQLITE_TRACE_PROFILE]]
SQLITE_TRACE_PROFILE
+**
^An SQLITE_TRACE_PROFILE callback provides approximately the same +** information as is provided by the [sqlite3_profile()] callback. +** ^The P argument is a pointer to the [prepared statement] and the +** X argument points to a 64-bit integer which is the estimated of +** the number of nanosecond that the prepared statement took to run. +** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. +** +** [[SQLITE_TRACE_ROW]]
SQLITE_TRACE_ROW
+**
^An SQLITE_TRACE_ROW callback is invoked whenever a prepared +** statement generates a single row of result. +** ^The P argument is a pointer to the [prepared statement] and the +** X argument is unused. +** +** [[SQLITE_TRACE_CLOSE]]
SQLITE_TRACE_CLOSE
+**
^An SQLITE_TRACE_CLOSE callback is invoked when a database +** connection closes. +** ^The P argument is a pointer to the [database connection] object +** and the X argument is unused. +**
+*/ +#define SQLITE_TRACE_STMT 0x01 +#define SQLITE_TRACE_PROFILE 0x02 +#define SQLITE_TRACE_ROW 0x04 +#define SQLITE_TRACE_CLOSE 0x08 + +/* +** CAPI3REF: SQL Trace Hook +** METHOD: sqlite3 +** +** ^The sqlite3_trace_v2(D,M,X,P) interface registers a trace callback +** function X against [database connection] D, using property mask M +** and context pointer P. ^If the X callback is +** NULL or if the M mask is zero, then tracing is disabled. The +** M argument should be the bitwise OR-ed combination of +** zero or more [SQLITE_TRACE] constants. +** +** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides +** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2(). +** +** ^The X callback is invoked whenever any of the events identified by +** mask M occur. ^The integer return value from the callback is currently +** ignored, though this may change in future releases. Callback +** implementations should return zero to ensure future compatibility. +** +** ^A trace callback is invoked with four arguments: callback(T,C,P,X). +** ^The T argument is one of the [SQLITE_TRACE] +** constants to indicate why the callback was invoked. +** ^The C argument is a copy of the context pointer. +** The P and X arguments are pointers whose meanings depend on T. +** +** The sqlite3_trace_v2() interface is intended to replace the legacy +** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which +** are deprecated. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2( + sqlite3*, + unsigned uMask, + int(*xCallback)(unsigned,void*,void*,void*), + void *pCtx +); + /* ** CAPI3REF: Query Progress Callbacks ** METHOD: sqlite3 @@ -3608,11 +3778,35 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2( ** CAPI3REF: Retrieving Statement SQL ** METHOD: sqlite3_stmt ** -** ^This interface can be used to retrieve a saved copy of the original -** SQL text used to create a [prepared statement] if that statement was -** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. +** ^The sqlite3_sql(P) interface returns a pointer to a copy of the UTF-8 +** SQL text used to create [prepared statement] P if P was +** created by either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. +** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8 +** string containing the SQL text of prepared statement P with +** [bound parameters] expanded. +** +** ^(For example, if a prepared statement is created using the SQL +** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345 +** and parameter :xyz is unbound, then sqlite3_sql() will return +** the original string, "SELECT $abc,:xyz" but sqlite3_expanded_sql() +** will return "SELECT 2345,NULL".)^ +** +** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory +** is available to hold the result, or if the result would exceed the +** the maximum string length determined by the [SQLITE_LIMIT_LENGTH]. +** +** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of +** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time +** option causes sqlite3_expanded_sql() to always return NULL. +** +** ^The string returned by sqlite3_sql(P) is managed by SQLite and is +** automatically freed when the prepared statement is finalized. +** ^The string returned by sqlite3_expanded_sql(P), on the other hand, +** is obtained from [sqlite3_malloc()] and must be free by the application +** by passing it to [sqlite3_free()]. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt); +SQLITE_API char *SQLITE_STDCALL sqlite3_expanded_sql(sqlite3_stmt *pStmt); /* ** CAPI3REF: Determine If An SQL Statement Writes The Database @@ -4770,12 +4964,13 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*); ** SQLite will invoke the destructor function X with parameter P exactly ** once, when the metadata is discarded. ** SQLite is free to discard the metadata at any time, including:
    -**
  • when the corresponding function parameter changes, or -**
  • when [sqlite3_reset()] or [sqlite3_finalize()] is called for the -** SQL statement, or -**
  • when sqlite3_set_auxdata() is invoked again on the same parameter, or -**
  • during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.
)^ +**
  • ^(when the corresponding function parameter changes)^, or +**
  • ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the +** SQL statement)^, or +**
  • ^(when sqlite3_set_auxdata() is invoked again on the same +** parameter)^, or +**
  • ^(during the original sqlite3_set_auxdata() call when a memory +** allocation error occurs.)^ ** ** Note the last bullet in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the @@ -5412,7 +5607,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), ** ^The sqlite3_update_hook() interface registers a callback function ** with the [database connection] identified by the first argument ** to be invoked whenever a row is updated, inserted or deleted in -** a rowid table. +** a [rowid table]. ** ^Any callback set by a previous call to this function ** for the same database connection is overridden. ** @@ -5451,8 +5646,8 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), ** on the same [database connection] D, or NULL for ** the first call on D. ** -** See also the [sqlite3_commit_hook()] and [sqlite3_rollback_hook()] -** interfaces. +** See also the [sqlite3_commit_hook()], [sqlite3_rollback_hook()], +** and [sqlite3_preupdate_hook()] interfaces. */ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook( sqlite3*, @@ -5602,7 +5797,7 @@ SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N); ** column exists. ^The sqlite3_table_column_metadata() interface returns ** SQLITE_ERROR and if the specified column does not exist. ** ^If the column-name parameter to sqlite3_table_column_metadata() is a -** NULL pointer, then this routine simply checks for the existance of the +** NULL pointer, then this routine simply checks for the existence of the ** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it ** does not. ** @@ -5699,9 +5894,18 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata( ** should free this memory by calling [sqlite3_free()]. ** ** ^Extension loading must be enabled using -** [sqlite3_enable_load_extension()] prior to calling this API, +** [sqlite3_enable_load_extension()] or +** [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],1,NULL) +** prior to calling this API, ** otherwise an error will be returned. ** +** Security warning: It is recommended that the +** [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method be used to enable only this +** interface. The use of the [sqlite3_enable_load_extension()] interface +** should be avoided. This will keep the SQL function [load_extension()] +** disabled and prevent SQL injections from giving attackers +** access to extension loading capabilities. +** ** See also the [load_extension() SQL function]. */ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension( @@ -5724,6 +5928,17 @@ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension( ** ^Call the sqlite3_enable_load_extension() routine with onoff==1 ** to turn extension loading on and call it with onoff==0 to turn ** it back off again. +** +** ^This interface enables or disables both the C-API +** [sqlite3_load_extension()] and the SQL function [load_extension()]. +** ^(Use [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],..) +** to enable or disable only the C-API.)^ +** +** Security warning: It is recommended that extension loading +** be disabled using the [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method +** rather than this interface, so the [load_extension()] SQL function +** remains disabled. This will prevent SQL injections from giving attackers +** access to extension loading capabilities. */ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff); @@ -5737,7 +5952,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono ** ** ^(Even though the function prototype shows that xEntryPoint() takes ** no arguments and returns void, SQLite invokes xEntryPoint() with three -** arguments and expects and integer result as if the signature of the +** arguments and expects an integer result as if the signature of the ** entry point where as follows: ** **
    @@ -5763,7 +5978,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono
     ** See also: [sqlite3_reset_auto_extension()]
     ** and [sqlite3_cancel_auto_extension()]
     */
    -SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xEntryPoint)(void));
    +SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void(*xEntryPoint)(void));
     
     /*
     ** CAPI3REF: Cancel Automatic Extension Loading
    @@ -5775,7 +5990,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xEntryPoint)(void));
     ** unregistered and it returns 0 if X was not on the list of initialization
     ** routines.
     */
    -SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xEntryPoint)(void));
    +SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
     
     /*
     ** CAPI3REF: Reset Automatic Extension Loading
    @@ -6951,6 +7166,18 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
     ** memory used by all pager caches associated with the database connection.)^
     ** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0.
     **
    +** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]] 
    +** ^(
    SQLITE_DBSTATUS_CACHE_USED_SHARED
    +**
    This parameter is similar to DBSTATUS_CACHE_USED, except that if a +** pager cache is shared between two or more connections the bytes of heap +** memory used by that pager cache is divided evenly between the attached +** connections.)^ In other words, if none of the pager caches associated +** with the database connection are shared, this request returns the same +** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are +** shared, the value returned by this call will be smaller than that returned +** by DBSTATUS_CACHE_USED. ^The highwater mark associated with +** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. +** ** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
    SQLITE_DBSTATUS_SCHEMA_USED
    **
    This parameter returns the approximate number of bytes of heap ** memory used to store the schema for all databases associated @@ -7008,7 +7235,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int #define SQLITE_DBSTATUS_CACHE_MISS 8 #define SQLITE_DBSTATUS_CACHE_WRITE 9 #define SQLITE_DBSTATUS_DEFERRED_FKS 10 -#define SQLITE_DBSTATUS_MAX 10 /* Largest defined DBSTATUS */ +#define SQLITE_DBSTATUS_CACHE_USED_SHARED 11 +#define SQLITE_DBSTATUS_MAX 11 /* Largest defined DBSTATUS */ /* @@ -7362,7 +7590,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** must be different or else sqlite3_backup_init(D,N,S,M) will fail with ** an error. ** -** ^A call to sqlite3_backup_init() will fail, returning SQLITE_ERROR, if +** ^A call to sqlite3_backup_init() will fail, returning NULL, if ** there is already a read or read-write transaction open on the ** destination database. ** @@ -8140,11 +8368,107 @@ SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); */ SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3*); +/* +** CAPI3REF: The pre-update hook. +** +** ^These interfaces are only available if SQLite is compiled using the +** [SQLITE_ENABLE_PREUPDATE_HOOK] compile-time option. +** +** ^The [sqlite3_preupdate_hook()] interface registers a callback function +** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation +** on a [rowid table]. +** ^At most one preupdate hook may be registered at a time on a single +** [database connection]; each call to [sqlite3_preupdate_hook()] overrides +** the previous setting. +** ^The preupdate hook is disabled by invoking [sqlite3_preupdate_hook()] +** with a NULL pointer as the second parameter. +** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as +** the first parameter to callbacks. +** +** ^The preupdate hook only fires for changes to [rowid tables]; the preupdate +** hook is not invoked for changes to [virtual tables] or [WITHOUT ROWID] +** tables. +** +** ^The second parameter to the preupdate callback is a pointer to +** the [database connection] that registered the preupdate hook. +** ^The third parameter to the preupdate callback is one of the constants +** [SQLITE_INSERT], [SQLITE_DELETE], or [SQLITE_UPDATE] to identify the +** kind of update operation that is about to occur. +** ^(The fourth parameter to the preupdate callback is the name of the +** database within the database connection that is being modified. This +** will be "main" for the main database or "temp" for TEMP tables or +** the name given after the AS keyword in the [ATTACH] statement for attached +** databases.)^ +** ^The fifth parameter to the preupdate callback is the name of the +** table that is being modified. +** ^The sixth parameter to the preupdate callback is the initial [rowid] of the +** row being changes for SQLITE_UPDATE and SQLITE_DELETE changes and is +** undefined for SQLITE_INSERT changes. +** ^The seventh parameter to the preupdate callback is the final [rowid] of +** the row being changed for SQLITE_UPDATE and SQLITE_INSERT changes and is +** undefined for SQLITE_DELETE changes. +** +** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], +** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces +** provide additional information about a preupdate event. These routines +** may only be called from within a preupdate callback. Invoking any of +** these routines from outside of a preupdate callback or with a +** [database connection] pointer that is different from the one supplied +** to the preupdate callback results in undefined and probably undesirable +** behavior. +** +** ^The [sqlite3_preupdate_count(D)] interface returns the number of columns +** in the row that is being inserted, updated, or deleted. +** +** ^The [sqlite3_preupdate_old(D,N,P)] interface writes into P a pointer to +** a [protected sqlite3_value] that contains the value of the Nth column of +** the table row before it is updated. The N parameter must be between 0 +** and one less than the number of columns or the behavior will be +** undefined. This must only be used within SQLITE_UPDATE and SQLITE_DELETE +** preupdate callbacks; if it is used by an SQLITE_INSERT callback then the +** behavior is undefined. The [sqlite3_value] that P points to +** will be destroyed when the preupdate callback returns. +** +** ^The [sqlite3_preupdate_new(D,N,P)] interface writes into P a pointer to +** a [protected sqlite3_value] that contains the value of the Nth column of +** the table row after it is updated. The N parameter must be between 0 +** and one less than the number of columns or the behavior will be +** undefined. This must only be used within SQLITE_INSERT and SQLITE_UPDATE +** preupdate callbacks; if it is used by an SQLITE_DELETE callback then the +** behavior is undefined. The [sqlite3_value] that P points to +** will be destroyed when the preupdate callback returns. +** +** ^The [sqlite3_preupdate_depth(D)] interface returns 0 if the preupdate +** callback was invoked as a result of a direct insert, update, or delete +** operation; or 1 for inserts, updates, or deletes invoked by top-level +** triggers; or 2 for changes resulting from triggers called by top-level +** triggers; and so forth. +** +** See also: [sqlite3_update_hook()] +*/ +SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_preupdate_hook( + sqlite3 *db, + void(*xPreUpdate)( + void *pCtx, /* Copy of third arg to preupdate_hook() */ + sqlite3 *db, /* Database handle */ + int op, /* SQLITE_UPDATE, DELETE or INSERT */ + char const *zDb, /* Database name */ + char const *zName, /* Table name */ + sqlite3_int64 iKey1, /* Rowid of row about to be deleted/updated */ + sqlite3_int64 iKey2 /* New rowid value (for a rowid UPDATE) */ + ), + void* +); +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_count(sqlite3 *); +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_depth(sqlite3 *); +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); + /* ** CAPI3REF: Low-level system error code ** ** ^Attempt to return the underlying operating system error code or error -** number that caused the most reason I/O error or failure to open a file. +** number that caused the most recent I/O error or failure to open a file. ** The return value is OS-dependent. For example, on unix systems, after ** [sqlite3_open_v2()] returns [SQLITE_CANTOPEN], this interface could be ** called to get back the underlying "errno" that caused the problem, such @@ -8210,20 +8534,29 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_get( ** CAPI3REF: Start a read transaction on an historical snapshot ** EXPERIMENTAL ** -** ^The [sqlite3_snapshot_open(D,S,P)] interface attempts to move the -** read transaction that is currently open on schema S of -** [database connection] D so that it refers to historical [snapshot] P. +** ^The [sqlite3_snapshot_open(D,S,P)] interface starts a +** read transaction for schema S of +** [database connection] D such that the read transaction +** refers to historical [snapshot] P, rather than the most +** recent change to the database. ** ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK on success ** or an appropriate [error code] if it fails. ** ** ^In order to succeed, a call to [sqlite3_snapshot_open(D,S,P)] must be -** the first operation, apart from other sqlite3_snapshot_open() calls, -** following the [BEGIN] that starts a new read transaction. -** ^A [snapshot] will fail to open if it has been overwritten by a +** the first operation following the [BEGIN] that takes the schema S +** out of [autocommit mode]. +** ^In other words, schema S must not currently be in +** a transaction for [sqlite3_snapshot_open(D,S,P)] to work, but the +** database connection D must be out of [autocommit mode]. +** ^A [snapshot] will fail to open if it has been overwritten by a ** [checkpoint]. -** ^A [snapshot] will fail to open if the database connection D has not -** previously completed at least one read operation against the database -** file. (Hint: Run "[PRAGMA application_id]" against a newly opened +** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the +** database connection D does not know that the database file for +** schema S is in [WAL mode]. A database connection might not know +** that the database file is in [WAL mode] if there has been no prior +** I/O on that database connection, or if the database entered [WAL mode] +** after the most recent I/O on the database connection.)^ +** (Hint: Run "[PRAGMA application_id]" against a newly opened ** database connection in order to make it ready to use snapshots.) ** ** The [sqlite3_snapshot_open()] interface is only available when the @@ -8248,6 +8581,33 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_open( */ SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3_snapshot*); +/* +** CAPI3REF: Compare the ages of two snapshot handles. +** EXPERIMENTAL +** +** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages +** of two valid snapshot handles. +** +** If the two snapshot handles are not associated with the same database +** file, the result of the comparison is undefined. +** +** Additionally, the result of the comparison is only valid if both of the +** snapshot handles were obtained by calling sqlite3_snapshot_get() since the +** last time the wal file was deleted. The wal file is deleted when the +** database is changed back to rollback mode or when the number of database +** clients drops to zero. If either snapshot handle was obtained before the +** wal file was last deleted, the value returned by this function +** is undefined. +** +** Otherwise, this API returns a negative value if P1 refers to an older +** snapshot than P2, zero if the two handles refer to the same database +** snapshot, and a positive value if P1 is a newer snapshot than P2. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_cmp( + sqlite3_snapshot *p1, + sqlite3_snapshot *p2 +); + /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. @@ -8259,8 +8619,9 @@ SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3 #if 0 } /* End of the 'extern "C"' block */ #endif -#endif /* _SQLITE3_H_ */ +#endif /* SQLITE3_H */ +/******** Begin file sqlite3rtree.h *********/ /* ** 2010 August 30 ** @@ -8378,6 +8739,1287 @@ struct sqlite3_rtree_query_info { #endif /* ifndef _SQLITE3RTREE_H_ */ +/******** End of sqlite3rtree.h *********/ +/******** Begin file sqlite3session.h *********/ + +#if !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) +#define __SQLITESESSION_H_ 1 + +/* +** Make sure we can call this stuff from C++. +*/ +#if 0 +extern "C" { +#endif + + +/* +** CAPI3REF: Session Object Handle +*/ +typedef struct sqlite3_session sqlite3_session; + +/* +** CAPI3REF: Changeset Iterator Handle +*/ +typedef struct sqlite3_changeset_iter sqlite3_changeset_iter; + +/* +** CAPI3REF: Create A New Session Object +** +** Create a new session object attached to database handle db. If successful, +** a pointer to the new object is written to *ppSession and SQLITE_OK is +** returned. If an error occurs, *ppSession is set to NULL and an SQLite +** error code (e.g. SQLITE_NOMEM) is returned. +** +** It is possible to create multiple session objects attached to a single +** database handle. +** +** Session objects created using this function should be deleted using the +** [sqlite3session_delete()] function before the database handle that they +** are attached to is itself closed. If the database handle is closed before +** the session object is deleted, then the results of calling any session +** module function, including [sqlite3session_delete()] on the session object +** are undefined. +** +** Because the session module uses the [sqlite3_preupdate_hook()] API, it +** is not possible for an application to register a pre-update hook on a +** database handle that has one or more session objects attached. Nor is +** it possible to create a session object attached to a database handle for +** which a pre-update hook is already defined. The results of attempting +** either of these things are undefined. +** +** The session object will be used to create changesets for tables in +** database zDb, where zDb is either "main", or "temp", or the name of an +** attached database. It is not an error if database zDb is not attached +** to the database when the session object is created. +*/ +int sqlite3session_create( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Name of db (e.g. "main") */ + sqlite3_session **ppSession /* OUT: New session object */ +); + +/* +** CAPI3REF: Delete A Session Object +** +** Delete a session object previously allocated using +** [sqlite3session_create()]. Once a session object has been deleted, the +** results of attempting to use pSession with any other session module +** function are undefined. +** +** Session objects must be deleted before the database handle to which they +** are attached is closed. Refer to the documentation for +** [sqlite3session_create()] for details. +*/ +void sqlite3session_delete(sqlite3_session *pSession); + + +/* +** CAPI3REF: Enable Or Disable A Session Object +** +** Enable or disable the recording of changes by a session object. When +** enabled, a session object records changes made to the database. When +** disabled - it does not. A newly created session object is enabled. +** Refer to the documentation for [sqlite3session_changeset()] for further +** details regarding how enabling and disabling a session object affects +** the eventual changesets. +** +** Passing zero to this function disables the session. Passing a value +** greater than zero enables it. Passing a value less than zero is a +** no-op, and may be used to query the current state of the session. +** +** The return value indicates the final state of the session object: 0 if +** the session is disabled, or 1 if it is enabled. +*/ +int sqlite3session_enable(sqlite3_session *pSession, int bEnable); + +/* +** CAPI3REF: Set Or Clear the Indirect Change Flag +** +** Each change recorded by a session object is marked as either direct or +** indirect. A change is marked as indirect if either: +** +**
      +**
    • The session object "indirect" flag is set when the change is +** made, or +**
    • The change is made by an SQL trigger or foreign key action +** instead of directly as a result of a users SQL statement. +**
    +** +** If a single row is affected by more than one operation within a session, +** then the change is considered indirect if all operations meet the criteria +** for an indirect change above, or direct otherwise. +** +** This function is used to set, clear or query the session object indirect +** flag. If the second argument passed to this function is zero, then the +** indirect flag is cleared. If it is greater than zero, the indirect flag +** is set. Passing a value less than zero does not modify the current value +** of the indirect flag, and may be used to query the current state of the +** indirect flag for the specified session object. +** +** The return value indicates the final state of the indirect flag: 0 if +** it is clear, or 1 if it is set. +*/ +int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); + +/* +** CAPI3REF: Attach A Table To A Session Object +** +** If argument zTab is not NULL, then it is the name of a table to attach +** to the session object passed as the first argument. All subsequent changes +** made to the table while the session object is enabled will be recorded. See +** documentation for [sqlite3session_changeset()] for further details. +** +** Or, if argument zTab is NULL, then changes are recorded for all tables +** in the database. If additional tables are added to the database (by +** executing "CREATE TABLE" statements) after this call is made, changes for +** the new tables are also recorded. +** +** Changes can only be recorded for tables that have a PRIMARY KEY explicitly +** defined as part of their CREATE TABLE statement. It does not matter if the +** PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias) or not. The PRIMARY +** KEY may consist of a single column, or may be a composite key. +** +** It is not an error if the named table does not exist in the database. Nor +** is it an error if the named table does not have a PRIMARY KEY. However, +** no changes will be recorded in either of these scenarios. +** +** Changes are not recorded for individual rows that have NULL values stored +** in one or more of their PRIMARY KEY columns. +** +** SQLITE_OK is returned if the call completes without error. Or, if an error +** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned. +*/ +int sqlite3session_attach( + sqlite3_session *pSession, /* Session object */ + const char *zTab /* Table name */ +); + +/* +** CAPI3REF: Set a table filter on a Session Object. +** +** The second argument (xFilter) is the "filter callback". For changes to rows +** in tables that are not attached to the Session oject, the filter is called +** to determine whether changes to the table's rows should be tracked or not. +** If xFilter returns 0, changes is not tracked. Note that once a table is +** attached, xFilter will not be called again. +*/ +void sqlite3session_table_filter( + sqlite3_session *pSession, /* Session object */ + int(*xFilter)( + void *pCtx, /* Copy of third arg to _filter_table() */ + const char *zTab /* Table name */ + ), + void *pCtx /* First argument passed to xFilter */ +); + +/* +** CAPI3REF: Generate A Changeset From A Session Object +** +** Obtain a changeset containing changes to the tables attached to the +** session object passed as the first argument. If successful, +** set *ppChangeset to point to a buffer containing the changeset +** and *pnChangeset to the size of the changeset in bytes before returning +** SQLITE_OK. If an error occurs, set both *ppChangeset and *pnChangeset to +** zero and return an SQLite error code. +** +** A changeset consists of zero or more INSERT, UPDATE and/or DELETE changes, +** each representing a change to a single row of an attached table. An INSERT +** change contains the values of each field of a new database row. A DELETE +** contains the original values of each field of a deleted database row. An +** UPDATE change contains the original values of each field of an updated +** database row along with the updated values for each updated non-primary-key +** column. It is not possible for an UPDATE change to represent a change that +** modifies the values of primary key columns. If such a change is made, it +** is represented in a changeset as a DELETE followed by an INSERT. +** +** Changes are not recorded for rows that have NULL values stored in one or +** more of their PRIMARY KEY columns. If such a row is inserted or deleted, +** no corresponding change is present in the changesets returned by this +** function. If an existing row with one or more NULL values stored in +** PRIMARY KEY columns is updated so that all PRIMARY KEY columns are non-NULL, +** only an INSERT is appears in the changeset. Similarly, if an existing row +** with non-NULL PRIMARY KEY values is updated so that one or more of its +** PRIMARY KEY columns are set to NULL, the resulting changeset contains a +** DELETE change only. +** +** The contents of a changeset may be traversed using an iterator created +** using the [sqlite3changeset_start()] API. A changeset may be applied to +** a database with a compatible schema using the [sqlite3changeset_apply()] +** API. +** +** Within a changeset generated by this function, all changes related to a +** single table are grouped together. In other words, when iterating through +** a changeset or when applying a changeset to a database, all changes related +** to a single table are processed before moving on to the next table. Tables +** are sorted in the same order in which they were attached (or auto-attached) +** to the sqlite3_session object. The order in which the changes related to +** a single table are stored is undefined. +** +** Following a successful call to this function, it is the responsibility of +** the caller to eventually free the buffer that *ppChangeset points to using +** [sqlite3_free()]. +** +**

    Changeset Generation

    +** +** Once a table has been attached to a session object, the session object +** records the primary key values of all new rows inserted into the table. +** It also records the original primary key and other column values of any +** deleted or updated rows. For each unique primary key value, data is only +** recorded once - the first time a row with said primary key is inserted, +** updated or deleted in the lifetime of the session. +** +** There is one exception to the previous paragraph: when a row is inserted, +** updated or deleted, if one or more of its primary key columns contain a +** NULL value, no record of the change is made. +** +** The session object therefore accumulates two types of records - those +** that consist of primary key values only (created when the user inserts +** a new record) and those that consist of the primary key values and the +** original values of other table columns (created when the users deletes +** or updates a record). +** +** When this function is called, the requested changeset is created using +** both the accumulated records and the current contents of the database +** file. Specifically: +** +**
      +**
    • For each record generated by an insert, the database is queried +** for a row with a matching primary key. If one is found, an INSERT +** change is added to the changeset. If no such row is found, no change +** is added to the changeset. +** +**
    • For each record generated by an update or delete, the database is +** queried for a row with a matching primary key. If such a row is +** found and one or more of the non-primary key fields have been +** modified from their original values, an UPDATE change is added to +** the changeset. Or, if no such row is found in the table, a DELETE +** change is added to the changeset. If there is a row with a matching +** primary key in the database, but all fields contain their original +** values, no change is added to the changeset. +**
    +** +** This means, amongst other things, that if a row is inserted and then later +** deleted while a session object is active, neither the insert nor the delete +** will be present in the changeset. Or if a row is deleted and then later a +** row with the same primary key values inserted while a session object is +** active, the resulting changeset will contain an UPDATE change instead of +** a DELETE and an INSERT. +** +** When a session object is disabled (see the [sqlite3session_enable()] API), +** it does not accumulate records when rows are inserted, updated or deleted. +** This may appear to have some counter-intuitive effects if a single row +** is written to more than once during a session. For example, if a row +** is inserted while a session object is enabled, then later deleted while +** the same session object is disabled, no INSERT record will appear in the +** changeset, even though the delete took place while the session was disabled. +** Or, if one field of a row is updated while a session is disabled, and +** another field of the same row is updated while the session is enabled, the +** resulting changeset will contain an UPDATE change that updates both fields. +*/ +int sqlite3session_changeset( + sqlite3_session *pSession, /* Session object */ + int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ + void **ppChangeset /* OUT: Buffer containing changeset */ +); + +/* +** CAPI3REF: Load The Difference Between Tables Into A Session +** +** If it is not already attached to the session object passed as the first +** argument, this function attaches table zTbl in the same manner as the +** [sqlite3session_attach()] function. If zTbl does not exist, or if it +** does not have a primary key, this function is a no-op (but does not return +** an error). +** +** Argument zFromDb must be the name of a database ("main", "temp" etc.) +** attached to the same database handle as the session object that contains +** a table compatible with the table attached to the session by this function. +** A table is considered compatible if it: +** +**
      +**
    • Has the same name, +**
    • Has the same set of columns declared in the same order, and +**
    • Has the same PRIMARY KEY definition. +**
    +** +** If the tables are not compatible, SQLITE_SCHEMA is returned. If the tables +** are compatible but do not have any PRIMARY KEY columns, it is not an error +** but no changes are added to the session object. As with other session +** APIs, tables without PRIMARY KEYs are simply ignored. +** +** This function adds a set of changes to the session object that could be +** used to update the table in database zFrom (call this the "from-table") +** so that its content is the same as the table attached to the session +** object (call this the "to-table"). Specifically: +** +**
      +**
    • For each row (primary key) that exists in the to-table but not in +** the from-table, an INSERT record is added to the session object. +** +**
    • For each row (primary key) that exists in the to-table but not in +** the from-table, a DELETE record is added to the session object. +** +**
    • For each row (primary key) that exists in both tables, but features +** different in each, an UPDATE record is added to the session. +**
    +** +** To clarify, if this function is called and then a changeset constructed +** using [sqlite3session_changeset()], then after applying that changeset to +** database zFrom the contents of the two compatible tables would be +** identical. +** +** It an error if database zFrom does not exist or does not contain the +** required compatible table. +** +** If the operation successful, SQLITE_OK is returned. Otherwise, an SQLite +** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg +** may be set to point to a buffer containing an English language error +** message. It is the responsibility of the caller to free this buffer using +** sqlite3_free(). +*/ +int sqlite3session_diff( + sqlite3_session *pSession, + const char *zFromDb, + const char *zTbl, + char **pzErrMsg +); + + +/* +** CAPI3REF: Generate A Patchset From A Session Object +** +** The differences between a patchset and a changeset are that: +** +**
      +**
    • DELETE records consist of the primary key fields only. The +** original values of other fields are omitted. +**
    • The original values of any modified fields are omitted from +** UPDATE records. +**
    +** +** A patchset blob may be used with up to date versions of all +** sqlite3changeset_xxx API functions except for sqlite3changeset_invert(), +** which returns SQLITE_CORRUPT if it is passed a patchset. Similarly, +** attempting to use a patchset blob with old versions of the +** sqlite3changeset_xxx APIs also provokes an SQLITE_CORRUPT error. +** +** Because the non-primary key "old.*" fields are omitted, no +** SQLITE_CHANGESET_DATA conflicts can be detected or reported if a patchset +** is passed to the sqlite3changeset_apply() API. Other conflict types work +** in the same way as for changesets. +** +** Changes within a patchset are ordered in the same way as for changesets +** generated by the sqlite3session_changeset() function (i.e. all changes for +** a single table are grouped together, tables appear in the order in which +** they were attached to the session object). +*/ +int sqlite3session_patchset( + sqlite3_session *pSession, /* Session object */ + int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */ + void **ppPatchset /* OUT: Buffer containing changeset */ +); + +/* +** CAPI3REF: Test if a changeset has recorded any changes. +** +** Return non-zero if no changes to attached tables have been recorded by +** the session object passed as the first argument. Otherwise, if one or +** more changes have been recorded, return zero. +** +** Even if this function returns zero, it is possible that calling +** [sqlite3session_changeset()] on the session handle may still return a +** changeset that contains no changes. This can happen when a row in +** an attached table is modified and then later on the original values +** are restored. However, if this function returns non-zero, then it is +** guaranteed that a call to sqlite3session_changeset() will return a +** changeset containing zero changes. +*/ +int sqlite3session_isempty(sqlite3_session *pSession); + +/* +** CAPI3REF: Create An Iterator To Traverse A Changeset +** +** Create an iterator used to iterate through the contents of a changeset. +** If successful, *pp is set to point to the iterator handle and SQLITE_OK +** is returned. Otherwise, if an error occurs, *pp is set to zero and an +** SQLite error code is returned. +** +** The following functions can be used to advance and query a changeset +** iterator created by this function: +** +**
      +**
    • [sqlite3changeset_next()] +**
    • [sqlite3changeset_op()] +**
    • [sqlite3changeset_new()] +**
    • [sqlite3changeset_old()] +**
    +** +** It is the responsibility of the caller to eventually destroy the iterator +** by passing it to [sqlite3changeset_finalize()]. The buffer containing the +** changeset (pChangeset) must remain valid until after the iterator is +** destroyed. +** +** Assuming the changeset blob was created by one of the +** [sqlite3session_changeset()], [sqlite3changeset_concat()] or +** [sqlite3changeset_invert()] functions, all changes within the changeset +** that apply to a single table are grouped together. This means that when +** an application iterates through a changeset using an iterator created by +** this function, all changes that relate to a single table are visted +** consecutively. There is no chance that the iterator will visit a change +** the applies to table X, then one for table Y, and then later on visit +** another change for table X. +*/ +int sqlite3changeset_start( + sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */ + int nChangeset, /* Size of changeset blob in bytes */ + void *pChangeset /* Pointer to blob containing changeset */ +); + + +/* +** CAPI3REF: Advance A Changeset Iterator +** +** This function may only be used with iterators created by function +** [sqlite3changeset_start()]. If it is called on an iterator passed to +** a conflict-handler callback by [sqlite3changeset_apply()], SQLITE_MISUSE +** is returned and the call has no effect. +** +** Immediately after an iterator is created by sqlite3changeset_start(), it +** does not point to any change in the changeset. Assuming the changeset +** is not empty, the first call to this function advances the iterator to +** point to the first change in the changeset. Each subsequent call advances +** the iterator to point to the next change in the changeset (if any). If +** no error occurs and the iterator points to a valid change after a call +** to sqlite3changeset_next() has advanced it, SQLITE_ROW is returned. +** Otherwise, if all changes in the changeset have already been visited, +** SQLITE_DONE is returned. +** +** If an error occurs, an SQLite error code is returned. Possible error +** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or +** SQLITE_NOMEM. +*/ +int sqlite3changeset_next(sqlite3_changeset_iter *pIter); + +/* +** CAPI3REF: Obtain The Current Operation From A Changeset Iterator +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this +** is not the case, this function returns [SQLITE_MISUSE]. +** +** If argument pzTab is not NULL, then *pzTab is set to point to a +** nul-terminated utf-8 encoded string containing the name of the table +** affected by the current change. The buffer remains valid until either +** sqlite3changeset_next() is called on the iterator or until the +** conflict-handler function returns. If pnCol is not NULL, then *pnCol is +** set to the number of columns in the table affected by the change. If +** pbIncorrect is not NULL, then *pbIndirect is set to true (1) if the change +** is an indirect change, or false (0) otherwise. See the documentation for +** [sqlite3session_indirect()] for a description of direct and indirect +** changes. Finally, if pOp is not NULL, then *pOp is set to one of +** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the +** type of change that the iterator currently points to. +** +** If no error occurs, SQLITE_OK is returned. If an error does occur, an +** SQLite error code is returned. The values of the output variables may not +** be trusted in this case. +*/ +int sqlite3changeset_op( + sqlite3_changeset_iter *pIter, /* Iterator object */ + const char **pzTab, /* OUT: Pointer to table name */ + int *pnCol, /* OUT: Number of columns in table */ + int *pOp, /* OUT: SQLITE_INSERT, DELETE or UPDATE */ + int *pbIndirect /* OUT: True for an 'indirect' change */ +); + +/* +** CAPI3REF: Obtain The Primary Key Definition Of A Table +** +** For each modified table, a changeset includes the following: +** +**
      +**
    • The number of columns in the table, and +**
    • Which of those columns make up the tables PRIMARY KEY. +**
    +** +** This function is used to find which columns comprise the PRIMARY KEY of +** the table modified by the change that iterator pIter currently points to. +** If successful, *pabPK is set to point to an array of nCol entries, where +** nCol is the number of columns in the table. Elements of *pabPK are set to +** 0x01 if the corresponding column is part of the tables primary key, or +** 0x00 if it is not. +** +** If argumet pnCol is not NULL, then *pnCol is set to the number of columns +** in the table. +** +** If this function is called when the iterator does not point to a valid +** entry, SQLITE_MISUSE is returned and the output variables zeroed. Otherwise, +** SQLITE_OK is returned and the output variables populated as described +** above. +*/ +int sqlite3changeset_pk( + sqlite3_changeset_iter *pIter, /* Iterator object */ + unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */ + int *pnCol /* OUT: Number of entries in output array */ +); + +/* +** CAPI3REF: Obtain old.* Values From A Changeset Iterator +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned SQLITE_ROW. +** Furthermore, it may only be called if the type of change that the iterator +** currently points to is either [SQLITE_DELETE] or [SQLITE_UPDATE]. Otherwise, +** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the vector of +** original row values stored as part of the UPDATE or DELETE change and +** returns SQLITE_OK. The name of the function comes from the fact that this +** is similar to the "old.*" columns available to update or delete triggers. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +int sqlite3changeset_old( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */ +); + +/* +** CAPI3REF: Obtain new.* Values From A Changeset Iterator +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned SQLITE_ROW. +** Furthermore, it may only be called if the type of change that the iterator +** currently points to is either [SQLITE_UPDATE] or [SQLITE_INSERT]. Otherwise, +** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the vector of +** new row values stored as part of the UPDATE or INSERT change and +** returns SQLITE_OK. If the change is an UPDATE and does not include +** a new value for the requested column, *ppValue is set to NULL and +** SQLITE_OK returned. The name of the function comes from the fact that +** this is similar to the "new.*" columns available to update or delete +** triggers. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +int sqlite3changeset_new( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */ +); + +/* +** CAPI3REF: Obtain Conflicting Row Values From A Changeset Iterator +** +** This function should only be used with iterator objects passed to a +** conflict-handler callback by [sqlite3changeset_apply()] with either +** [SQLITE_CHANGESET_DATA] or [SQLITE_CHANGESET_CONFLICT]. If this function +** is called on any other iterator, [SQLITE_MISUSE] is returned and *ppValue +** is set to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the +** "conflicting row" associated with the current conflict-handler callback +** and returns SQLITE_OK. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +int sqlite3changeset_conflict( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: Value from conflicting row */ +); + +/* +** CAPI3REF: Determine The Number Of Foreign Key Constraint Violations +** +** This function may only be called with an iterator passed to an +** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case +** it sets the output variable to the total number of known foreign key +** violations in the destination database and returns SQLITE_OK. +** +** In all other cases this function returns SQLITE_MISUSE. +*/ +int sqlite3changeset_fk_conflicts( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int *pnOut /* OUT: Number of FK violations */ +); + + +/* +** CAPI3REF: Finalize A Changeset Iterator +** +** This function is used to finalize an iterator allocated with +** [sqlite3changeset_start()]. +** +** This function should only be called on iterators created using the +** [sqlite3changeset_start()] function. If an application calls this +** function with an iterator passed to a conflict-handler by +** [sqlite3changeset_apply()], [SQLITE_MISUSE] is immediately returned and the +** call has no effect. +** +** If an error was encountered within a call to an sqlite3changeset_xxx() +** function (for example an [SQLITE_CORRUPT] in [sqlite3changeset_next()] or an +** [SQLITE_NOMEM] in [sqlite3changeset_new()]) then an error code corresponding +** to that error is returned by this function. Otherwise, SQLITE_OK is +** returned. This is to allow the following pattern (pseudo-code): +** +** sqlite3changeset_start(); +** while( SQLITE_ROW==sqlite3changeset_next() ){ +** // Do something with change. +** } +** rc = sqlite3changeset_finalize(); +** if( rc!=SQLITE_OK ){ +** // An error has occurred +** } +*/ +int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); + +/* +** CAPI3REF: Invert A Changeset +** +** This function is used to "invert" a changeset object. Applying an inverted +** changeset to a database reverses the effects of applying the uninverted +** changeset. Specifically: +** +**
      +**
    • Each DELETE change is changed to an INSERT, and +**
    • Each INSERT change is changed to a DELETE, and +**
    • For each UPDATE change, the old.* and new.* values are exchanged. +**
    +** +** This function does not change the order in which changes appear within +** the changeset. It merely reverses the sense of each individual change. +** +** If successful, a pointer to a buffer containing the inverted changeset +** is stored in *ppOut, the size of the same buffer is stored in *pnOut, and +** SQLITE_OK is returned. If an error occurs, both *pnOut and *ppOut are +** zeroed and an SQLite error code returned. +** +** It is the responsibility of the caller to eventually call sqlite3_free() +** on the *ppOut pointer to free the buffer allocation following a successful +** call to this function. +** +** WARNING/TODO: This function currently assumes that the input is a valid +** changeset. If it is not, the results are undefined. +*/ +int sqlite3changeset_invert( + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + +/* +** CAPI3REF: Concatenate Two Changeset Objects +** +** This function is used to concatenate two changesets, A and B, into a +** single changeset. The result is a changeset equivalent to applying +** changeset A followed by changeset B. +** +** This function combines the two input changesets using an +** sqlite3_changegroup object. Calling it produces similar results as the +** following code fragment: +** +** sqlite3_changegroup *pGrp; +** rc = sqlite3_changegroup_new(&pGrp); +** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nA, pA); +** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nB, pB); +** if( rc==SQLITE_OK ){ +** rc = sqlite3changegroup_output(pGrp, pnOut, ppOut); +** }else{ +** *ppOut = 0; +** *pnOut = 0; +** } +** +** Refer to the sqlite3_changegroup documentation below for details. +*/ +int sqlite3changeset_concat( + int nA, /* Number of bytes in buffer pA */ + void *pA, /* Pointer to buffer containing changeset A */ + int nB, /* Number of bytes in buffer pB */ + void *pB, /* Pointer to buffer containing changeset B */ + int *pnOut, /* OUT: Number of bytes in output changeset */ + void **ppOut /* OUT: Buffer containing output changeset */ +); + + +/* +** Changegroup handle. +*/ +typedef struct sqlite3_changegroup sqlite3_changegroup; + +/* +** CAPI3REF: Combine two or more changesets into a single changeset. +** +** An sqlite3_changegroup object is used to combine two or more changesets +** (or patchsets) into a single changeset (or patchset). A single changegroup +** object may combine changesets or patchsets, but not both. The output is +** always in the same format as the input. +** +** If successful, this function returns SQLITE_OK and populates (*pp) with +** a pointer to a new sqlite3_changegroup object before returning. The caller +** should eventually free the returned object using a call to +** sqlite3changegroup_delete(). If an error occurs, an SQLite error code +** (i.e. SQLITE_NOMEM) is returned and *pp is set to NULL. +** +** The usual usage pattern for an sqlite3_changegroup object is as follows: +** +**
      +**
    • It is created using a call to sqlite3changegroup_new(). +** +**
    • Zero or more changesets (or patchsets) are added to the object +** by calling sqlite3changegroup_add(). +** +**
    • The result of combining all input changesets together is obtained +** by the application via a call to sqlite3changegroup_output(). +** +**
    • The object is deleted using a call to sqlite3changegroup_delete(). +**
    +** +** Any number of calls to add() and output() may be made between the calls to +** new() and delete(), and in any order. +** +** As well as the regular sqlite3changegroup_add() and +** sqlite3changegroup_output() functions, also available are the streaming +** versions sqlite3changegroup_add_strm() and sqlite3changegroup_output_strm(). +*/ +int sqlite3changegroup_new(sqlite3_changegroup **pp); + +/* +** Add all changes within the changeset (or patchset) in buffer pData (size +** nData bytes) to the changegroup. +** +** If the buffer contains a patchset, then all prior calls to this function +** on the same changegroup object must also have specified patchsets. Or, if +** the buffer contains a changeset, so must have the earlier calls to this +** function. Otherwise, SQLITE_ERROR is returned and no changes are added +** to the changegroup. +** +** Rows within the changeset and changegroup are identified by the values in +** their PRIMARY KEY columns. A change in the changeset is considered to +** apply to the same row as a change already present in the changegroup if +** the two rows have the same primary key. +** +** Changes to rows that that do not already appear in the changegroup are +** simply copied into it. Or, if both the new changeset and the changegroup +** contain changes that apply to a single row, the final contents of the +** changegroup depends on the type of each change, as follows: +** +** +** +** +**
    Existing Change New Change Output Change +**
    INSERT INSERT +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
    INSERT UPDATE +** The INSERT change remains in the changegroup. The values in the +** INSERT change are modified as if the row was inserted by the +** existing change and then updated according to the new change. +**
    INSERT DELETE +** The existing INSERT is removed from the changegroup. The DELETE is +** not added. +**
    UPDATE INSERT +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
    UPDATE UPDATE +** The existing UPDATE remains within the changegroup. It is amended +** so that the accompanying values are as if the row was updated once +** by the existing change and then again by the new change. +**
    UPDATE DELETE +** The existing UPDATE is replaced by the new DELETE within the +** changegroup. +**
    DELETE INSERT +** If one or more of the column values in the row inserted by the +** new change differ from those in the row deleted by the existing +** change, the existing DELETE is replaced by an UPDATE within the +** changegroup. Otherwise, if the inserted row is exactly the same +** as the deleted row, the existing DELETE is simply discarded. +**
    DELETE UPDATE +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
    DELETE DELETE +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
    +** +** If the new changeset contains changes to a table that is already present +** in the changegroup, then the number of columns and the position of the +** primary key columns for the table must be consistent. If this is not the +** case, this function fails with SQLITE_SCHEMA. If the input changeset +** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is +** returned. Or, if an out-of-memory condition occurs during processing, this +** function returns SQLITE_NOMEM. In all cases, if an error occurs the +** final contents of the changegroup is undefined. +** +** If no error occurs, SQLITE_OK is returned. +*/ +int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); + +/* +** Obtain a buffer containing a changeset (or patchset) representing the +** current contents of the changegroup. If the inputs to the changegroup +** were themselves changesets, the output is a changeset. Or, if the +** inputs were patchsets, the output is also a patchset. +** +** As with the output of the sqlite3session_changeset() and +** sqlite3session_patchset() functions, all changes related to a single +** table are grouped together in the output of this function. Tables appear +** in the same order as for the very first changeset added to the changegroup. +** If the second or subsequent changesets added to the changegroup contain +** changes for tables that do not appear in the first changeset, they are +** appended onto the end of the output changeset, again in the order in +** which they are first encountered. +** +** If an error occurs, an SQLite error code is returned and the output +** variables (*pnData) and (*ppData) are set to 0. Otherwise, SQLITE_OK +** is returned and the output variables are set to the size of and a +** pointer to the output buffer, respectively. In this case it is the +** responsibility of the caller to eventually free the buffer using a +** call to sqlite3_free(). +*/ +int sqlite3changegroup_output( + sqlite3_changegroup*, + int *pnData, /* OUT: Size of output buffer in bytes */ + void **ppData /* OUT: Pointer to output buffer */ +); + +/* +** Delete a changegroup object. +*/ +void sqlite3changegroup_delete(sqlite3_changegroup*); + +/* +** CAPI3REF: Apply A Changeset To A Database +** +** Apply a changeset to a database. This function attempts to update the +** "main" database attached to handle db with the changes found in the +** changeset passed via the second and third arguments. +** +** The fourth argument (xFilter) passed to this function is the "filter +** callback". If it is not NULL, then for each table affected by at least one +** change in the changeset, the filter callback is invoked with +** the table name as the second argument, and a copy of the context pointer +** passed as the sixth argument to this function as the first. If the "filter +** callback" returns zero, then no attempt is made to apply any changes to +** the table. Otherwise, if the return value is non-zero or the xFilter +** argument to this function is NULL, all changes related to the table are +** attempted. +** +** For each table that is not excluded by the filter callback, this function +** tests that the target database contains a compatible table. A table is +** considered compatible if all of the following are true: +** +**
      +**
    • The table has the same name as the name recorded in the +** changeset, and +**
    • The table has the same number of columns as recorded in the +** changeset, and +**
    • The table has primary key columns in the same position as +** recorded in the changeset. +**
    +** +** If there is no compatible table, it is not an error, but none of the +** changes associated with the table are applied. A warning message is issued +** via the sqlite3_log() mechanism with the error code SQLITE_SCHEMA. At most +** one such warning is issued for each table in the changeset. +** +** For each change for which there is a compatible table, an attempt is made +** to modify the table contents according to the UPDATE, INSERT or DELETE +** change. If a change cannot be applied cleanly, the conflict handler +** function passed as the fifth argument to sqlite3changeset_apply() may be +** invoked. A description of exactly when the conflict handler is invoked for +** each type of change is below. +** +** Unlike the xFilter argument, xConflict may not be passed NULL. The results +** of passing anything other than a valid function pointer as the xConflict +** argument are undefined. +** +** Each time the conflict handler function is invoked, it must return one +** of [SQLITE_CHANGESET_OMIT], [SQLITE_CHANGESET_ABORT] or +** [SQLITE_CHANGESET_REPLACE]. SQLITE_CHANGESET_REPLACE may only be returned +** if the second argument passed to the conflict handler is either +** SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If the conflict-handler +** returns an illegal value, any changes already made are rolled back and +** the call to sqlite3changeset_apply() returns SQLITE_MISUSE. Different +** actions are taken by sqlite3changeset_apply() depending on the value +** returned by each invocation of the conflict-handler function. Refer to +** the documentation for the three +** [SQLITE_CHANGESET_OMIT|available return values] for details. +** +**
    +**
    DELETE Changes
    +** For each DELETE change, this function checks if the target database +** contains a row with the same primary key value (or values) as the +** original row values stored in the changeset. If it does, and the values +** stored in all non-primary key columns also match the values stored in +** the changeset the row is deleted from the target database. +** +** If a row with matching primary key values is found, but one or more of +** the non-primary key fields contains a value different from the original +** row value stored in the changeset, the conflict-handler function is +** invoked with [SQLITE_CHANGESET_DATA] as the second argument. +** +** If no row with matching primary key values is found in the database, +** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] +** passed as the second argument. +** +** If the DELETE operation is attempted, but SQLite returns SQLITE_CONSTRAINT +** (which can only happen if a foreign key constraint is violated), the +** conflict-handler function is invoked with [SQLITE_CHANGESET_CONSTRAINT] +** passed as the second argument. This includes the case where the DELETE +** operation is attempted because an earlier call to the conflict handler +** function returned [SQLITE_CHANGESET_REPLACE]. +** +**
    INSERT Changes
    +** For each INSERT change, an attempt is made to insert the new row into +** the database. +** +** If the attempt to insert the row fails because the database already +** contains a row with the same primary key values, the conflict handler +** function is invoked with the second argument set to +** [SQLITE_CHANGESET_CONFLICT]. +** +** If the attempt to insert the row fails because of some other constraint +** violation (e.g. NOT NULL or UNIQUE), the conflict handler function is +** invoked with the second argument set to [SQLITE_CHANGESET_CONSTRAINT]. +** This includes the case where the INSERT operation is re-attempted because +** an earlier call to the conflict handler function returned +** [SQLITE_CHANGESET_REPLACE]. +** +**
    UPDATE Changes
    +** For each UPDATE change, this function checks if the target database +** contains a row with the same primary key value (or values) as the +** original row values stored in the changeset. If it does, and the values +** stored in all non-primary key columns also match the values stored in +** the changeset the row is updated within the target database. +** +** If a row with matching primary key values is found, but one or more of +** the non-primary key fields contains a value different from an original +** row value stored in the changeset, the conflict-handler function is +** invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since +** UPDATE changes only contain values for non-primary key fields that are +** to be modified, only those fields need to match the original values to +** avoid the SQLITE_CHANGESET_DATA conflict-handler callback. +** +** If no row with matching primary key values is found in the database, +** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] +** passed as the second argument. +** +** If the UPDATE operation is attempted, but SQLite returns +** SQLITE_CONSTRAINT, the conflict-handler function is invoked with +** [SQLITE_CHANGESET_CONSTRAINT] passed as the second argument. +** This includes the case where the UPDATE operation is attempted after +** an earlier call to the conflict handler function returned +** [SQLITE_CHANGESET_REPLACE]. +**
    +** +** It is safe to execute SQL statements, including those that write to the +** table that the callback related to, from within the xConflict callback. +** This can be used to further customize the applications conflict +** resolution strategy. +** +** All changes made by this function are enclosed in a savepoint transaction. +** If any other error (aside from a constraint failure when attempting to +** write to the target database) occurs, then the savepoint transaction is +** rolled back, restoring the target database to its original state, and an +** SQLite error code returned. +*/ +int sqlite3changeset_apply( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +); + +/* +** CAPI3REF: Constants Passed To The Conflict Handler +** +** Values that may be passed as the second argument to a conflict-handler. +** +**
    +**
    SQLITE_CHANGESET_DATA
    +** The conflict handler is invoked with CHANGESET_DATA as the second argument +** when processing a DELETE or UPDATE change if a row with the required +** PRIMARY KEY fields is present in the database, but one or more other +** (non primary-key) fields modified by the update do not contain the +** expected "before" values. +** +** The conflicting row, in this case, is the database row with the matching +** primary key. +** +**
    SQLITE_CHANGESET_NOTFOUND
    +** The conflict handler is invoked with CHANGESET_NOTFOUND as the second +** argument when processing a DELETE or UPDATE change if a row with the +** required PRIMARY KEY fields is not present in the database. +** +** There is no conflicting row in this case. The results of invoking the +** sqlite3changeset_conflict() API are undefined. +** +**
    SQLITE_CHANGESET_CONFLICT
    +** CHANGESET_CONFLICT is passed as the second argument to the conflict +** handler while processing an INSERT change if the operation would result +** in duplicate primary key values. +** +** The conflicting row in this case is the database row with the matching +** primary key. +** +**
    SQLITE_CHANGESET_FOREIGN_KEY
    +** If foreign key handling is enabled, and applying a changeset leaves the +** database in a state containing foreign key violations, the conflict +** handler is invoked with CHANGESET_FOREIGN_KEY as the second argument +** exactly once before the changeset is committed. If the conflict handler +** returns CHANGESET_OMIT, the changes, including those that caused the +** foreign key constraint violation, are committed. Or, if it returns +** CHANGESET_ABORT, the changeset is rolled back. +** +** No current or conflicting row information is provided. The only function +** it is possible to call on the supplied sqlite3_changeset_iter handle +** is sqlite3changeset_fk_conflicts(). +** +**
    SQLITE_CHANGESET_CONSTRAINT
    +** If any other constraint violation occurs while applying a change (i.e. +** a UNIQUE, CHECK or NOT NULL constraint), the conflict handler is +** invoked with CHANGESET_CONSTRAINT as the second argument. +** +** There is no conflicting row in this case. The results of invoking the +** sqlite3changeset_conflict() API are undefined. +** +**
    +*/ +#define SQLITE_CHANGESET_DATA 1 +#define SQLITE_CHANGESET_NOTFOUND 2 +#define SQLITE_CHANGESET_CONFLICT 3 +#define SQLITE_CHANGESET_CONSTRAINT 4 +#define SQLITE_CHANGESET_FOREIGN_KEY 5 + +/* +** CAPI3REF: Constants Returned By The Conflict Handler +** +** A conflict handler callback must return one of the following three values. +** +**
    +**
    SQLITE_CHANGESET_OMIT
    +** If a conflict handler returns this value no special action is taken. The +** change that caused the conflict is not applied. The session module +** continues to the next change in the changeset. +** +**
    SQLITE_CHANGESET_REPLACE
    +** This value may only be returned if the second argument to the conflict +** handler was SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If this +** is not the case, any changes applied so far are rolled back and the +** call to sqlite3changeset_apply() returns SQLITE_MISUSE. +** +** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_DATA conflict +** handler, then the conflicting row is either updated or deleted, depending +** on the type of change. +** +** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_CONFLICT conflict +** handler, then the conflicting row is removed from the database and a +** second attempt to apply the change is made. If this second attempt fails, +** the original row is restored to the database before continuing. +** +**
    SQLITE_CHANGESET_ABORT
    +** If this value is returned, any changes applied so far are rolled back +** and the call to sqlite3changeset_apply() returns SQLITE_ABORT. +**
    +*/ +#define SQLITE_CHANGESET_OMIT 0 +#define SQLITE_CHANGESET_REPLACE 1 +#define SQLITE_CHANGESET_ABORT 2 + +/* +** CAPI3REF: Streaming Versions of API functions. +** +** The six streaming API xxx_strm() functions serve similar purposes to the +** corresponding non-streaming API functions: +** +** +** +**
    Streaming functionNon-streaming equivalent
    sqlite3changeset_apply_str[sqlite3changeset_apply] +**
    sqlite3changeset_concat_str[sqlite3changeset_concat] +**
    sqlite3changeset_invert_str[sqlite3changeset_invert] +**
    sqlite3changeset_start_str[sqlite3changeset_start] +**
    sqlite3session_changeset_str[sqlite3session_changeset] +**
    sqlite3session_patchset_str[sqlite3session_patchset] +**
    +** +** Non-streaming functions that accept changesets (or patchsets) as input +** require that the entire changeset be stored in a single buffer in memory. +** Similarly, those that return a changeset or patchset do so by returning +** a pointer to a single large buffer allocated using sqlite3_malloc(). +** Normally this is convenient. However, if an application running in a +** low-memory environment is required to handle very large changesets, the +** large contiguous memory allocations required can become onerous. +** +** In order to avoid this problem, instead of a single large buffer, input +** is passed to a streaming API functions by way of a callback function that +** the sessions module invokes to incrementally request input data as it is +** required. In all cases, a pair of API function parameters such as +** +**
    +**        int nChangeset,
    +**        void *pChangeset,
    +**  
    +** +** Is replaced by: +** +**
    +**        int (*xInput)(void *pIn, void *pData, int *pnData),
    +**        void *pIn,
    +**  
    +** +** Each time the xInput callback is invoked by the sessions module, the first +** argument passed is a copy of the supplied pIn context pointer. The second +** argument, pData, points to a buffer (*pnData) bytes in size. Assuming no +** error occurs the xInput method should copy up to (*pnData) bytes of data +** into the buffer and set (*pnData) to the actual number of bytes copied +** before returning SQLITE_OK. If the input is completely exhausted, (*pnData) +** should be set to zero to indicate this. Or, if an error occurs, an SQLite +** error code should be returned. In all cases, if an xInput callback returns +** an error, all processing is abandoned and the streaming API function +** returns a copy of the error code to the caller. +** +** In the case of sqlite3changeset_start_strm(), the xInput callback may be +** invoked by the sessions module at any point during the lifetime of the +** iterator. If such an xInput callback returns an error, the iterator enters +** an error state, whereby all subsequent calls to iterator functions +** immediately fail with the same error code as returned by xInput. +** +** Similarly, streaming API functions that return changesets (or patchsets) +** return them in chunks by way of a callback function instead of via a +** pointer to a single large buffer. In this case, a pair of parameters such +** as: +** +**
    +**        int *pnChangeset,
    +**        void **ppChangeset,
    +**  
    +** +** Is replaced by: +** +**
    +**        int (*xOutput)(void *pOut, const void *pData, int nData),
    +**        void *pOut
    +**  
    +** +** The xOutput callback is invoked zero or more times to return data to +** the application. The first parameter passed to each call is a copy of the +** pOut pointer supplied by the application. The second parameter, pData, +** points to a buffer nData bytes in size containing the chunk of output +** data being returned. If the xOutput callback successfully processes the +** supplied data, it should return SQLITE_OK to indicate success. Otherwise, +** it should return some other SQLite error code. In this case processing +** is immediately abandoned and the streaming API function returns a copy +** of the xOutput error code to the application. +** +** The sessions module never invokes an xOutput callback with the third +** parameter set to a value less than or equal to zero. Other than this, +** no guarantees are made as to the size of the chunks of data returned. +*/ +int sqlite3changeset_apply_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +); +int sqlite3changeset_concat_strm( + int (*xInputA)(void *pIn, void *pData, int *pnData), + void *pInA, + int (*xInputB)(void *pIn, void *pData, int *pnData), + void *pInB, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +int sqlite3changeset_invert_strm( + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +int sqlite3changeset_start_strm( + sqlite3_changeset_iter **pp, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +); +int sqlite3session_changeset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +int sqlite3session_patchset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +int sqlite3changegroup_add_strm(sqlite3_changegroup*, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +); +int sqlite3changegroup_output_strm(sqlite3_changegroup*, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); + + +/* +** Make sure we can call this stuff from C++. +*/ +#if 0 +} +#endif + +#endif /* !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) */ + +/******** End of sqlite3session.h *********/ +/******** Begin file fts5.h *********/ /* ** 2014 May 31 ** @@ -8522,11 +10164,13 @@ struct Fts5PhraseIter { ** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid ** ** with $p set to a phrase equivalent to the phrase iPhrase of the -** current query is executed. For each row visited, the callback function -** passed as the fourth argument is invoked. The context and API objects -** passed to the callback function may be used to access the properties of -** each matched row. Invoking Api.xUserData() returns a copy of the pointer -** passed as the third argument to pUserData. +** current query is executed. Any column filter that applies to +** phrase iPhrase of the current query is included in $p. For each +** row visited, the callback function passed as the fourth argument +** is invoked. The context and API objects passed to the callback +** function may be used to access the properties of each matched row. +** Invoking Api.xUserData() returns a copy of the pointer passed as +** the third argument to pUserData. ** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. @@ -8695,7 +10339,7 @@ struct Fts5ExtensionApi { ** behaviour. The structure methods are expected to function as follows: ** ** xCreate: -** This function is used to allocate and inititalize a tokenizer instance. +** This function is used to allocate and initialize a tokenizer instance. ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) @@ -8955,7 +10599,7 @@ struct fts5_api { #endif /* _FTS5_H */ - +/******** End of fts5.h *********/ /************** End of sqlite3.h *********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -9252,7 +10896,7 @@ struct fts5_api { ** the SQLITE_DISABLE_INTRINSIC define. */ #if !defined(SQLITE_DISABLE_INTRINSIC) -# if defined(_MSC_VER) && _MSC_VER>=1300 +# if defined(_MSC_VER) && _MSC_VER>=1400 # if !defined(_WIN32_WCE) # include # pragma intrinsic(_byteswap_ushort) @@ -9437,7 +11081,7 @@ SQLITE_PRIVATE void sqlite3Coverage(int); ** be true and false so that the unreachable code they specify will ** not be counted as untested code. */ -#if defined(SQLITE_COVERAGE_TEST) +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST) # define ALWAYS(X) (1) # define NEVER(X) (0) #elif !defined(NDEBUG) @@ -9529,8 +11173,8 @@ SQLITE_PRIVATE void sqlite3Coverage(int); ** This is the header file for the generic hash-table implementation ** used in SQLite. */ -#ifndef _SQLITE_HASH_H_ -#define _SQLITE_HASH_H_ +#ifndef SQLITE_HASH_H +#define SQLITE_HASH_H /* Forward declarations of structures. */ typedef struct Hash Hash; @@ -9610,7 +11254,7 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); */ /* #define sqliteHashCount(H) ((H)->count) // NOT USED */ -#endif /* _SQLITE_HASH_H_ */ +#endif /* SQLITE_HASH_H */ /************** End of hash.h ************************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -9642,76 +11286,76 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*); #define TK_AS 24 #define TK_WITHOUT 25 #define TK_COMMA 26 -#define TK_ID 27 -#define TK_INDEXED 28 -#define TK_ABORT 29 -#define TK_ACTION 30 -#define TK_AFTER 31 -#define TK_ANALYZE 32 -#define TK_ASC 33 -#define TK_ATTACH 34 -#define TK_BEFORE 35 -#define TK_BY 36 -#define TK_CASCADE 37 -#define TK_CAST 38 -#define TK_COLUMNKW 39 -#define TK_CONFLICT 40 -#define TK_DATABASE 41 -#define TK_DESC 42 -#define TK_DETACH 43 -#define TK_EACH 44 -#define TK_FAIL 45 -#define TK_FOR 46 -#define TK_IGNORE 47 -#define TK_INITIALLY 48 -#define TK_INSTEAD 49 -#define TK_LIKE_KW 50 -#define TK_MATCH 51 -#define TK_NO 52 -#define TK_KEY 53 -#define TK_OF 54 -#define TK_OFFSET 55 -#define TK_PRAGMA 56 -#define TK_RAISE 57 -#define TK_RECURSIVE 58 -#define TK_REPLACE 59 -#define TK_RESTRICT 60 -#define TK_ROW 61 -#define TK_TRIGGER 62 -#define TK_VACUUM 63 -#define TK_VIEW 64 -#define TK_VIRTUAL 65 -#define TK_WITH 66 -#define TK_REINDEX 67 -#define TK_RENAME 68 -#define TK_CTIME_KW 69 -#define TK_ANY 70 -#define TK_OR 71 -#define TK_AND 72 -#define TK_IS 73 -#define TK_BETWEEN 74 -#define TK_IN 75 -#define TK_ISNULL 76 -#define TK_NOTNULL 77 -#define TK_NE 78 -#define TK_EQ 79 -#define TK_GT 80 -#define TK_LE 81 -#define TK_LT 82 -#define TK_GE 83 -#define TK_ESCAPE 84 -#define TK_BITAND 85 -#define TK_BITOR 86 -#define TK_LSHIFT 87 -#define TK_RSHIFT 88 -#define TK_PLUS 89 -#define TK_MINUS 90 -#define TK_STAR 91 -#define TK_SLASH 92 -#define TK_REM 93 -#define TK_CONCAT 94 -#define TK_COLLATE 95 -#define TK_BITNOT 96 +#define TK_OR 27 +#define TK_AND 28 +#define TK_IS 29 +#define TK_MATCH 30 +#define TK_LIKE_KW 31 +#define TK_BETWEEN 32 +#define TK_IN 33 +#define TK_ISNULL 34 +#define TK_NOTNULL 35 +#define TK_NE 36 +#define TK_EQ 37 +#define TK_GT 38 +#define TK_LE 39 +#define TK_LT 40 +#define TK_GE 41 +#define TK_ESCAPE 42 +#define TK_BITAND 43 +#define TK_BITOR 44 +#define TK_LSHIFT 45 +#define TK_RSHIFT 46 +#define TK_PLUS 47 +#define TK_MINUS 48 +#define TK_STAR 49 +#define TK_SLASH 50 +#define TK_REM 51 +#define TK_CONCAT 52 +#define TK_COLLATE 53 +#define TK_BITNOT 54 +#define TK_ID 55 +#define TK_INDEXED 56 +#define TK_ABORT 57 +#define TK_ACTION 58 +#define TK_AFTER 59 +#define TK_ANALYZE 60 +#define TK_ASC 61 +#define TK_ATTACH 62 +#define TK_BEFORE 63 +#define TK_BY 64 +#define TK_CASCADE 65 +#define TK_CAST 66 +#define TK_COLUMNKW 67 +#define TK_CONFLICT 68 +#define TK_DATABASE 69 +#define TK_DESC 70 +#define TK_DETACH 71 +#define TK_EACH 72 +#define TK_FAIL 73 +#define TK_FOR 74 +#define TK_IGNORE 75 +#define TK_INITIALLY 76 +#define TK_INSTEAD 77 +#define TK_NO 78 +#define TK_KEY 79 +#define TK_OF 80 +#define TK_OFFSET 81 +#define TK_PRAGMA 82 +#define TK_RAISE 83 +#define TK_RECURSIVE 84 +#define TK_REPLACE 85 +#define TK_RESTRICT 86 +#define TK_ROW 87 +#define TK_TRIGGER 88 +#define TK_VACUUM 89 +#define TK_VIEW 90 +#define TK_VIRTUAL 91 +#define TK_WITH 92 +#define TK_REINDEX 93 +#define TK_RENAME 94 +#define TK_CTIME_KW 95 +#define TK_ANY 96 #define TK_STRING 97 #define TK_JOIN_KW 98 #define TK_CONSTRAINT 99 @@ -10313,6 +11957,7 @@ typedef struct LookasideSlot LookasideSlot; typedef struct Module Module; typedef struct NameContext NameContext; typedef struct Parse Parse; +typedef struct PreUpdate PreUpdate; typedef struct PrintfArguments PrintfArguments; typedef struct RowSet RowSet; typedef struct Savepoint Savepoint; @@ -10357,8 +12002,8 @@ typedef struct With With; ** subsystem. See comments in the source code for a detailed description ** of what each interface routine does. */ -#ifndef _BTREE_H_ -#define _BTREE_H_ +#ifndef SQLITE_BTREE_H +#define SQLITE_BTREE_H /* TODO: This definition is just included so other modules compile. It ** needs to be revisited. @@ -10383,6 +12028,7 @@ typedef struct With With; typedef struct Btree Btree; typedef struct BtCursor BtCursor; typedef struct BtShared BtShared; +typedef struct BtreePayload BtreePayload; SQLITE_PRIVATE int sqlite3BtreeOpen( @@ -10594,19 +12240,43 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, u8 flags); #define BTREE_SAVEPOSITION 0x02 /* Leave cursor pointing at NEXT or PREV */ #define BTREE_AUXDELETE 0x04 /* not the primary delete operation */ -SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const void *pKey, i64 nKey, - const void *pData, int nData, - int nZero, int bias, int seekResult); +/* An instance of the BtreePayload object describes the content of a single +** entry in either an index or table btree. +** +** Index btrees (used for indexes and also WITHOUT ROWID tables) contain +** an arbitrary key and no data. These btrees have pKey,nKey set to their +** key and pData,nData,nZero set to zero. +** +** Table btrees (used for rowid tables) contain an integer rowid used as +** the key and passed in the nKey field. The pKey field is zero. +** pData,nData hold the content of the new entry. nZero extra zero bytes +** are appended to the end of the content when constructing the entry. +** +** This object is used to pass information into sqlite3BtreeInsert(). The +** same information used to be passed as five separate parameters. But placing +** the information into this object helps to keep the interface more +** organized and understandable, and it also helps the resulting code to +** run a little faster by using fewer registers for parameter passing. +*/ +struct BtreePayload { + const void *pKey; /* Key content for indexes. NULL for tables */ + sqlite3_int64 nKey; /* Size of pKey for indexes. PRIMARY KEY for tabs */ + const void *pData; /* Data for tables. NULL for indexes */ + int nData; /* Size of pData. 0 if none. */ + int nZero; /* Extra zero data appended after pData,nData */ +}; + +SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const BtreePayload *pPayload, + int bias, int seekResult); SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int *pRes); SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*); SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int *pRes); -SQLITE_PRIVATE int sqlite3BtreeKeySize(BtCursor*, i64 *pSize); +SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor*, u32 offset, u32 amt, void*); -SQLITE_PRIVATE const void *sqlite3BtreeKeyFetch(BtCursor*, u32 *pAmt); -SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor*, u32 *pAmt); -SQLITE_PRIVATE int sqlite3BtreeDataSize(BtCursor*, u32 *pSize); +SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt); +SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*); SQLITE_PRIVATE int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*); @@ -10647,11 +12317,13 @@ SQLITE_PRIVATE void sqlite3BtreeEnter(Btree*); SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3*); SQLITE_PRIVATE int sqlite3BtreeSharable(Btree*); SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor*); +SQLITE_PRIVATE int sqlite3BtreeConnectionCount(Btree*); #else # define sqlite3BtreeEnter(X) # define sqlite3BtreeEnterAll(X) # define sqlite3BtreeSharable(X) 0 # define sqlite3BtreeEnterCursor(X) +# define sqlite3BtreeConnectionCount(X) 1 #endif #if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE @@ -10676,7 +12348,7 @@ SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3*,int,Schema*); #endif -#endif /* _BTREE_H_ */ +#endif /* SQLITE_BTREE_H */ /************** End of btree.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -10699,8 +12371,8 @@ SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3*,int,Schema*); ** or VDBE. The VDBE implements an abstract machine that runs a ** simple program to access and modify the underlying database. */ -#ifndef _SQLITE_VDBE_H_ -#define _SQLITE_VDBE_H_ +#ifndef SQLITE_VDBE_H +#define SQLITE_VDBE_H /* #include */ /* @@ -10725,7 +12397,7 @@ typedef struct SubProgram SubProgram; struct VdbeOp { u8 opcode; /* What operation to perform */ signed char p4type; /* One of the P4_xxx constants for p4 */ - u8 opflags; /* Mask of the OPFLG_* flags in opcodes.h */ + u8 notUsed1; u8 p5; /* Fifth parameter is an unsigned character */ int p1; /* First operand */ int p2; /* Second parameter (often the jump destination) */ @@ -10744,6 +12416,7 @@ struct VdbeOp { KeyInfo *pKeyInfo; /* Used when p4type is P4_KEYINFO */ int *ai; /* Used when p4type is P4_INTARRAY */ SubProgram *pProgram; /* Used when p4type is P4_SUBPROGRAM */ + Table *pTab; /* Used when p4type is P4_TABLE */ #ifdef SQLITE_ENABLE_CURSOR_HINTS Expr *pExpr; /* Used when p4type is P4_EXPR */ #endif @@ -10808,7 +12481,8 @@ typedef struct VdbeOpList VdbeOpList; #define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */ #define P4_SUBPROGRAM (-18) /* P4 is a pointer to a SubProgram structure */ #define P4_ADVANCE (-19) /* P4 is a pointer to BtreeNext() or BtreePrev() */ -#define P4_FUNCCTX (-20) /* P4 is a pointer to an sqlite3_context object */ +#define P4_TABLE (-20) /* P4 is a pointer to a Table structure */ +#define P4_FUNCCTX (-21) /* P4 is a pointer to an sqlite3_context object */ /* Error message codes for OP_Halt */ #define P5_ConstraintNotNull 1 @@ -10866,150 +12540,150 @@ typedef struct VdbeOpList VdbeOpList; #define OP_VUpdate 12 /* synopsis: data=r[P3@P2] */ #define OP_Goto 13 #define OP_Gosub 14 -#define OP_Return 15 -#define OP_InitCoroutine 16 -#define OP_EndCoroutine 17 -#define OP_Yield 18 +#define OP_InitCoroutine 15 +#define OP_Yield 16 +#define OP_MustBeInt 17 +#define OP_Jump 18 #define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */ -#define OP_HaltIfNull 20 /* synopsis: if r[P3]=null halt */ -#define OP_Halt 21 -#define OP_Integer 22 /* synopsis: r[P2]=P1 */ -#define OP_Int64 23 /* synopsis: r[P2]=P4 */ -#define OP_String 24 /* synopsis: r[P2]='P4' (len=P1) */ -#define OP_Null 25 /* synopsis: r[P2..P3]=NULL */ -#define OP_SoftNull 26 /* synopsis: r[P1]=NULL */ -#define OP_Blob 27 /* synopsis: r[P2]=P4 (len=P1) */ -#define OP_Variable 28 /* synopsis: r[P2]=parameter(P1,P4) */ -#define OP_Move 29 /* synopsis: r[P2@P3]=r[P1@P3] */ -#define OP_Copy 30 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ -#define OP_SCopy 31 /* synopsis: r[P2]=r[P1] */ -#define OP_IntCopy 32 /* synopsis: r[P2]=r[P1] */ -#define OP_ResultRow 33 /* synopsis: output=r[P1@P2] */ -#define OP_CollSeq 34 -#define OP_Function0 35 /* synopsis: r[P3]=func(r[P2@P5]) */ -#define OP_Function 36 /* synopsis: r[P3]=func(r[P2@P5]) */ -#define OP_AddImm 37 /* synopsis: r[P1]=r[P1]+P2 */ -#define OP_MustBeInt 38 -#define OP_RealAffinity 39 -#define OP_Cast 40 /* synopsis: affinity(r[P1]) */ -#define OP_Permutation 41 -#define OP_Compare 42 /* synopsis: r[P1@P3] <-> r[P2@P3] */ -#define OP_Jump 43 -#define OP_Once 44 -#define OP_If 45 -#define OP_IfNot 46 -#define OP_Column 47 /* synopsis: r[P3]=PX */ -#define OP_Affinity 48 /* synopsis: affinity(r[P1@P2]) */ -#define OP_MakeRecord 49 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ -#define OP_Count 50 /* synopsis: r[P2]=count() */ -#define OP_ReadCookie 51 -#define OP_SetCookie 52 -#define OP_ReopenIdx 53 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenRead 54 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenWrite 55 /* synopsis: root=P2 iDb=P3 */ -#define OP_OpenAutoindex 56 /* synopsis: nColumn=P2 */ -#define OP_OpenEphemeral 57 /* synopsis: nColumn=P2 */ -#define OP_SorterOpen 58 -#define OP_SequenceTest 59 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ -#define OP_OpenPseudo 60 /* synopsis: P3 columns in r[P2] */ -#define OP_Close 61 -#define OP_ColumnsUsed 62 -#define OP_SeekLT 63 /* synopsis: key=r[P3@P4] */ -#define OP_SeekLE 64 /* synopsis: key=r[P3@P4] */ -#define OP_SeekGE 65 /* synopsis: key=r[P3@P4] */ -#define OP_SeekGT 66 /* synopsis: key=r[P3@P4] */ -#define OP_NoConflict 67 /* synopsis: key=r[P3@P4] */ -#define OP_NotFound 68 /* synopsis: key=r[P3@P4] */ -#define OP_Found 69 /* synopsis: key=r[P3@P4] */ -#define OP_NotExists 70 /* synopsis: intkey=r[P3] */ -#define OP_Or 71 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */ -#define OP_And 72 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */ -#define OP_Sequence 73 /* synopsis: r[P2]=cursor[P1].ctr++ */ -#define OP_NewRowid 74 /* synopsis: r[P2]=rowid */ -#define OP_Insert 75 /* synopsis: intkey=r[P3] data=r[P2] */ -#define OP_IsNull 76 /* same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ -#define OP_NotNull 77 /* same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ -#define OP_Ne 78 /* same as TK_NE, synopsis: if r[P1]!=r[P3] goto P2 */ -#define OP_Eq 79 /* same as TK_EQ, synopsis: if r[P1]==r[P3] goto P2 */ -#define OP_Gt 80 /* same as TK_GT, synopsis: if r[P1]>r[P3] goto P2 */ -#define OP_Le 81 /* same as TK_LE, synopsis: if r[P1]<=r[P3] goto P2 */ -#define OP_Lt 82 /* same as TK_LT, synopsis: if r[P1]=r[P3] goto P2 */ -#define OP_InsertInt 84 /* synopsis: intkey=P3 data=r[P2] */ -#define OP_BitAnd 85 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ -#define OP_BitOr 86 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ -#define OP_ShiftLeft 87 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */ -#define OP_Add 89 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ -#define OP_Subtract 90 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ -#define OP_Multiply 91 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ -#define OP_Divide 92 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ -#define OP_Remainder 93 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ -#define OP_Concat 94 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ -#define OP_Delete 95 -#define OP_BitNot 96 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */ +#define OP_Once 20 +#define OP_If 21 +#define OP_IfNot 22 +#define OP_SeekLT 23 /* synopsis: key=r[P3@P4] */ +#define OP_SeekLE 24 /* synopsis: key=r[P3@P4] */ +#define OP_SeekGE 25 /* synopsis: key=r[P3@P4] */ +#define OP_SeekGT 26 /* synopsis: key=r[P3@P4] */ +#define OP_Or 27 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */ +#define OP_And 28 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */ +#define OP_NoConflict 29 /* synopsis: key=r[P3@P4] */ +#define OP_NotFound 30 /* synopsis: key=r[P3@P4] */ +#define OP_Found 31 /* synopsis: key=r[P3@P4] */ +#define OP_SeekRowid 32 /* synopsis: intkey=r[P3] */ +#define OP_NotExists 33 /* synopsis: intkey=r[P3] */ +#define OP_IsNull 34 /* same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ +#define OP_NotNull 35 /* same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ +#define OP_Ne 36 /* same as TK_NE, synopsis: if r[P1]!=r[P3] goto P2 */ +#define OP_Eq 37 /* same as TK_EQ, synopsis: if r[P1]==r[P3] goto P2 */ +#define OP_Gt 38 /* same as TK_GT, synopsis: if r[P1]>r[P3] goto P2 */ +#define OP_Le 39 /* same as TK_LE, synopsis: if r[P1]<=r[P3] goto P2 */ +#define OP_Lt 40 /* same as TK_LT, synopsis: if r[P1]=r[P3] goto P2 */ +#define OP_Last 42 +#define OP_BitAnd 43 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */ +#define OP_BitOr 44 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */ +#define OP_ShiftLeft 45 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */ +#define OP_Add 47 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */ +#define OP_Subtract 48 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */ +#define OP_Multiply 49 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */ +#define OP_Divide 50 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */ +#define OP_Remainder 51 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */ +#define OP_Concat 52 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */ +#define OP_SorterSort 53 +#define OP_BitNot 54 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */ +#define OP_Sort 55 +#define OP_Rewind 56 +#define OP_IdxLE 57 /* synopsis: key=r[P3@P4] */ +#define OP_IdxGT 58 /* synopsis: key=r[P3@P4] */ +#define OP_IdxLT 59 /* synopsis: key=r[P3@P4] */ +#define OP_IdxGE 60 /* synopsis: key=r[P3@P4] */ +#define OP_RowSetRead 61 /* synopsis: r[P3]=rowset(P1) */ +#define OP_RowSetTest 62 /* synopsis: if r[P3] in rowset(P1) goto P2 */ +#define OP_Program 63 +#define OP_FkIfZero 64 /* synopsis: if fkctr[P1]==0 goto P2 */ +#define OP_IfPos 65 /* synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ +#define OP_IfNotZero 66 /* synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2 */ +#define OP_DecrJumpZero 67 /* synopsis: if (--r[P1])==0 goto P2 */ +#define OP_IncrVacuum 68 +#define OP_VNext 69 +#define OP_Init 70 /* synopsis: Start at P2 */ +#define OP_Return 71 +#define OP_EndCoroutine 72 +#define OP_HaltIfNull 73 /* synopsis: if r[P3]=null halt */ +#define OP_Halt 74 +#define OP_Integer 75 /* synopsis: r[P2]=P1 */ +#define OP_Int64 76 /* synopsis: r[P2]=P4 */ +#define OP_String 77 /* synopsis: r[P2]='P4' (len=P1) */ +#define OP_Null 78 /* synopsis: r[P2..P3]=NULL */ +#define OP_SoftNull 79 /* synopsis: r[P1]=NULL */ +#define OP_Blob 80 /* synopsis: r[P2]=P4 (len=P1) */ +#define OP_Variable 81 /* synopsis: r[P2]=parameter(P1,P4) */ +#define OP_Move 82 /* synopsis: r[P2@P3]=r[P1@P3] */ +#define OP_Copy 83 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */ +#define OP_SCopy 84 /* synopsis: r[P2]=r[P1] */ +#define OP_IntCopy 85 /* synopsis: r[P2]=r[P1] */ +#define OP_ResultRow 86 /* synopsis: output=r[P1@P2] */ +#define OP_CollSeq 87 +#define OP_Function0 88 /* synopsis: r[P3]=func(r[P2@P5]) */ +#define OP_Function 89 /* synopsis: r[P3]=func(r[P2@P5]) */ +#define OP_AddImm 90 /* synopsis: r[P1]=r[P1]+P2 */ +#define OP_RealAffinity 91 +#define OP_Cast 92 /* synopsis: affinity(r[P1]) */ +#define OP_Permutation 93 +#define OP_Compare 94 /* synopsis: r[P1@P3] <-> r[P2@P3] */ +#define OP_Column 95 /* synopsis: r[P3]=PX */ +#define OP_Affinity 96 /* synopsis: affinity(r[P1@P2]) */ #define OP_String8 97 /* same as TK_STRING, synopsis: r[P2]='P4' */ -#define OP_ResetCount 98 -#define OP_SorterCompare 99 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ -#define OP_SorterData 100 /* synopsis: r[P2]=data */ -#define OP_RowKey 101 /* synopsis: r[P2]=key */ -#define OP_RowData 102 /* synopsis: r[P2]=data */ -#define OP_Rowid 103 /* synopsis: r[P2]=rowid */ -#define OP_NullRow 104 -#define OP_Last 105 -#define OP_SorterSort 106 -#define OP_Sort 107 -#define OP_Rewind 108 -#define OP_SorterInsert 109 -#define OP_IdxInsert 110 /* synopsis: key=r[P2] */ -#define OP_IdxDelete 111 /* synopsis: key=r[P2@P3] */ -#define OP_Seek 112 /* synopsis: Move P3 to P1.rowid */ -#define OP_IdxRowid 113 /* synopsis: r[P2]=rowid */ -#define OP_IdxLE 114 /* synopsis: key=r[P3@P4] */ -#define OP_IdxGT 115 /* synopsis: key=r[P3@P4] */ -#define OP_IdxLT 116 /* synopsis: key=r[P3@P4] */ -#define OP_IdxGE 117 /* synopsis: key=r[P3@P4] */ -#define OP_Destroy 118 -#define OP_Clear 119 -#define OP_ResetSorter 120 -#define OP_CreateIndex 121 /* synopsis: r[P2]=root iDb=P1 */ -#define OP_CreateTable 122 /* synopsis: r[P2]=root iDb=P1 */ -#define OP_ParseSchema 123 -#define OP_LoadAnalysis 124 -#define OP_DropTable 125 -#define OP_DropIndex 126 -#define OP_DropTrigger 127 -#define OP_IntegrityCk 128 -#define OP_RowSetAdd 129 /* synopsis: rowset(P1)=r[P2] */ -#define OP_RowSetRead 130 /* synopsis: r[P3]=rowset(P1) */ -#define OP_RowSetTest 131 /* synopsis: if r[P3] in rowset(P1) goto P2 */ -#define OP_Program 132 +#define OP_MakeRecord 98 /* synopsis: r[P3]=mkrec(r[P1@P2]) */ +#define OP_Count 99 /* synopsis: r[P2]=count() */ +#define OP_ReadCookie 100 +#define OP_SetCookie 101 +#define OP_ReopenIdx 102 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenRead 103 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenWrite 104 /* synopsis: root=P2 iDb=P3 */ +#define OP_OpenAutoindex 105 /* synopsis: nColumn=P2 */ +#define OP_OpenEphemeral 106 /* synopsis: nColumn=P2 */ +#define OP_SorterOpen 107 +#define OP_SequenceTest 108 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */ +#define OP_OpenPseudo 109 /* synopsis: P3 columns in r[P2] */ +#define OP_Close 110 +#define OP_ColumnsUsed 111 +#define OP_Sequence 112 /* synopsis: r[P2]=cursor[P1].ctr++ */ +#define OP_NewRowid 113 /* synopsis: r[P2]=rowid */ +#define OP_Insert 114 /* synopsis: intkey=r[P3] data=r[P2] */ +#define OP_InsertInt 115 /* synopsis: intkey=P3 data=r[P2] */ +#define OP_Delete 116 +#define OP_ResetCount 117 +#define OP_SorterCompare 118 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */ +#define OP_SorterData 119 /* synopsis: r[P2]=data */ +#define OP_RowKey 120 /* synopsis: r[P2]=key */ +#define OP_RowData 121 /* synopsis: r[P2]=data */ +#define OP_Rowid 122 /* synopsis: r[P2]=rowid */ +#define OP_NullRow 123 +#define OP_SorterInsert 124 +#define OP_IdxInsert 125 /* synopsis: key=r[P2] */ +#define OP_IdxDelete 126 /* synopsis: key=r[P2@P3] */ +#define OP_Seek 127 /* synopsis: Move P3 to P1.rowid */ +#define OP_IdxRowid 128 /* synopsis: r[P2]=rowid */ +#define OP_Destroy 129 +#define OP_Clear 130 +#define OP_ResetSorter 131 +#define OP_CreateIndex 132 /* synopsis: r[P2]=root iDb=P1 */ #define OP_Real 133 /* same as TK_FLOAT, synopsis: r[P2]=P4 */ -#define OP_Param 134 -#define OP_FkCounter 135 /* synopsis: fkctr[P1]+=P2 */ -#define OP_FkIfZero 136 /* synopsis: if fkctr[P1]==0 goto P2 */ -#define OP_MemMax 137 /* synopsis: r[P1]=max(r[P1],r[P2]) */ -#define OP_IfPos 138 /* synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ -#define OP_OffsetLimit 139 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ -#define OP_IfNotZero 140 /* synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2 */ -#define OP_DecrJumpZero 141 /* synopsis: if (--r[P1])==0 goto P2 */ -#define OP_JumpZeroIncr 142 /* synopsis: if (r[P1]++)==0 ) goto P2 */ -#define OP_AggStep0 143 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggStep 144 /* synopsis: accum=r[P3] step(r[P2@P5]) */ -#define OP_AggFinal 145 /* synopsis: accum=r[P1] N=P2 */ -#define OP_IncrVacuum 146 -#define OP_Expire 147 -#define OP_TableLock 148 /* synopsis: iDb=P1 root=P2 write=P3 */ -#define OP_VBegin 149 -#define OP_VCreate 150 -#define OP_VDestroy 151 -#define OP_VOpen 152 -#define OP_VColumn 153 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VNext 154 -#define OP_VRename 155 -#define OP_Pagecount 156 -#define OP_MaxPgcnt 157 -#define OP_Init 158 /* synopsis: Start at P2 */ +#define OP_CreateTable 134 /* synopsis: r[P2]=root iDb=P1 */ +#define OP_ParseSchema 135 +#define OP_LoadAnalysis 136 +#define OP_DropTable 137 +#define OP_DropIndex 138 +#define OP_DropTrigger 139 +#define OP_IntegrityCk 140 +#define OP_RowSetAdd 141 /* synopsis: rowset(P1)=r[P2] */ +#define OP_Param 142 +#define OP_FkCounter 143 /* synopsis: fkctr[P1]+=P2 */ +#define OP_MemMax 144 /* synopsis: r[P1]=max(r[P1],r[P2]) */ +#define OP_OffsetLimit 145 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */ +#define OP_AggStep0 146 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggStep 147 /* synopsis: accum=r[P3] step(r[P2@P5]) */ +#define OP_AggFinal 148 /* synopsis: accum=r[P1] N=P2 */ +#define OP_Expire 149 +#define OP_TableLock 150 /* synopsis: iDb=P1 root=P2 write=P3 */ +#define OP_VBegin 151 +#define OP_VCreate 152 +#define OP_VDestroy 153 +#define OP_VOpen 154 +#define OP_VColumn 155 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 156 +#define OP_Pagecount 157 +#define OP_MaxPgcnt 158 #define OP_CursorHint 159 #define OP_Noop 160 #define OP_Explain 161 @@ -11026,27 +12700,35 @@ typedef struct VdbeOpList VdbeOpList; #define OPFLG_OUT3 0x20 /* out3: P3 is an output */ #define OPFLG_INITIALIZER {\ /* 0 */ 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01,\ -/* 8 */ 0x00, 0x10, 0x00, 0x01, 0x00, 0x01, 0x01, 0x02,\ -/* 16 */ 0x01, 0x02, 0x03, 0x12, 0x08, 0x00, 0x10, 0x10,\ -/* 24 */ 0x10, 0x10, 0x00, 0x10, 0x10, 0x00, 0x00, 0x10,\ -/* 32 */ 0x10, 0x00, 0x00, 0x00, 0x00, 0x02, 0x03, 0x02,\ -/* 40 */ 0x02, 0x00, 0x00, 0x01, 0x01, 0x03, 0x03, 0x00,\ -/* 48 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00,\ -/* 56 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09,\ -/* 64 */ 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x26,\ -/* 72 */ 0x26, 0x10, 0x10, 0x00, 0x03, 0x03, 0x0b, 0x0b,\ -/* 80 */ 0x0b, 0x0b, 0x0b, 0x0b, 0x00, 0x26, 0x26, 0x26,\ -/* 88 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x00,\ -/* 96 */ 0x12, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,\ -/* 104 */ 0x00, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x00,\ -/* 112 */ 0x00, 0x10, 0x01, 0x01, 0x01, 0x01, 0x10, 0x00,\ -/* 120 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 128 */ 0x00, 0x06, 0x23, 0x0b, 0x01, 0x10, 0x10, 0x00,\ -/* 136 */ 0x01, 0x04, 0x03, 0x1a, 0x03, 0x03, 0x03, 0x00,\ -/* 144 */ 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 152 */ 0x00, 0x00, 0x01, 0x00, 0x10, 0x10, 0x01, 0x00,\ +/* 8 */ 0x00, 0x10, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01,\ +/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x03, 0x03, 0x09,\ +/* 24 */ 0x09, 0x09, 0x09, 0x26, 0x26, 0x09, 0x09, 0x09,\ +/* 32 */ 0x09, 0x09, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ +/* 40 */ 0x0b, 0x0b, 0x01, 0x26, 0x26, 0x26, 0x26, 0x26,\ +/* 48 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x01, 0x12, 0x01,\ +/* 56 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x23, 0x0b, 0x01,\ +/* 64 */ 0x01, 0x03, 0x03, 0x03, 0x01, 0x01, 0x01, 0x02,\ +/* 72 */ 0x02, 0x08, 0x00, 0x10, 0x10, 0x10, 0x10, 0x00,\ +/* 80 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\ +/* 88 */ 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x00,\ +/* 96 */ 0x00, 0x10, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00,\ +/* 104 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 112 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 120 */ 0x00, 0x00, 0x10, 0x00, 0x04, 0x04, 0x00, 0x00,\ +/* 128 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00,\ +/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10, 0x00,\ +/* 144 */ 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ +/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\ /* 160 */ 0x00, 0x00,} +/* The sqlite3P2Values() routine is able to run faster if it knows +** the value of the largest JUMP opcode. The smaller the maximum +** JUMP opcode the better, so the mkopcodeh.tcl script that +** generated this include file strives to group all JUMP opcodes +** together near the beginning of the list. +*/ +#define SQLITE_MX_JUMP_OPCODE 70 /* Maximum JUMP opcode */ + /************** End of opcodes.h *********************************************/ /************** Continuing where we left off in vdbe.h ***********************/ @@ -11192,7 +12874,7 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const ch # define sqlite3VdbeScanStatus(a,b,c,d,e) #endif -#endif +#endif /* SQLITE_VDBE_H */ /************** End of vdbe.h ************************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -11214,8 +12896,8 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const ch ** at a time and provides a journal for rollback. */ -#ifndef _PAGER_H_ -#define _PAGER_H_ +#ifndef SQLITE_PAGER_H +#define SQLITE_PAGER_H /* ** Default maximum size for persistent journal files. A negative @@ -11268,7 +12950,11 @@ typedef struct PgHdr DbPage; #define PAGER_LOCKINGMODE_EXCLUSIVE 1 /* -** Numeric constants that encode the journalmode. +** Numeric constants that encode the journalmode. +** +** The numeric values encoded here (other than PAGER_JOURNALMODE_QUERY) +** are exposed in the API via the "PRAGMA journal_mode" command and +** therefore cannot be changed without a compatibility break. */ #define PAGER_JOURNALMODE_QUERY (-1) /* Query the value of journalmode */ #define PAGER_JOURNALMODE_DELETE 0 /* Commit by deleting journal file */ @@ -11286,6 +12972,11 @@ typedef struct PgHdr DbPage; /* ** Flags for sqlite3PagerSetFlags() +** +** Value constraints (enforced via assert()): +** PAGER_FULLFSYNC == SQLITE_FullFSync +** PAGER_CKPT_FULLFSYNC == SQLITE_CkptFullFSync +** PAGER_CACHE_SPILL == SQLITE_CacheSpill */ #define PAGER_SYNCHRONOUS_OFF 0x01 /* PRAGMA synchronous=OFF */ #define PAGER_SYNCHRONOUS_NORMAL 0x02 /* PRAGMA synchronous=NORMAL */ @@ -11394,7 +13085,7 @@ SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*); SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*); SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*); SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *); -SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *); +SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*); SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *); /* Functions used to truncate the database file. */ @@ -11421,7 +13112,7 @@ SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*); # define enable_simulated_io_errors() #endif -#endif /* _PAGER_H_ */ +#endif /* SQLITE_PAGER_H */ /************** End of pager.h ***********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ @@ -11455,7 +13146,7 @@ struct PgHdr { sqlite3_pcache_page *pPage; /* Pcache object page handle */ void *pData; /* Page data */ void *pExtra; /* Extra content */ - PgHdr *pDirty; /* Transient list of dirty pages */ + PgHdr *pDirty; /* Transient list of dirty sorted by pgno */ Pager *pPager; /* The pager this page is part of */ Pgno pgno; /* Page number for this page */ #ifdef SQLITE_CHECK_PAGES @@ -11480,11 +13171,10 @@ struct PgHdr { #define PGHDR_WRITEABLE 0x004 /* Journaled and ready to modify */ #define PGHDR_NEED_SYNC 0x008 /* Fsync the rollback journal before ** writing this page to the database */ -#define PGHDR_NEED_READ 0x010 /* Content is unread */ -#define PGHDR_DONT_WRITE 0x020 /* Do not write content to disk */ -#define PGHDR_MMAP 0x040 /* This is an mmap page object */ +#define PGHDR_DONT_WRITE 0x010 /* Do not write content to disk */ +#define PGHDR_MMAP 0x020 /* This is an mmap page object */ -#define PGHDR_WAL_APPEND 0x080 /* Appended to wal file */ +#define PGHDR_WAL_APPEND 0x040 /* Appended to wal file */ /* Initialize and shutdown the page cache subsystem */ SQLITE_PRIVATE int sqlite3PcacheInitialize(void); @@ -11528,6 +13218,7 @@ SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr*); /* Make sure page is marked dirty */ SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr*); /* Mark a single page as clean */ SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache*); /* Mark all dirty list pages as clean */ +SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache*); /* Change a page number. Used by incr-vacuum. */ SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr*, Pgno); @@ -11566,6 +13257,11 @@ SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*); SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *)); #endif +#if defined(SQLITE_DEBUG) +/* Check invariants on a PgHdr object */ +SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr*); +#endif + /* Set and get the suggested cache-size for the specified pager-cache. ** ** If no global maximum is configured, then the system attempts to limit @@ -11602,6 +13298,9 @@ SQLITE_PRIVATE void sqlite3PCacheSetDefault(void); SQLITE_PRIVATE int sqlite3HeaderSizePcache(void); SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void); +/* Number of dirty pages as a percentage of the configured cache size */ +SQLITE_PRIVATE int sqlite3PCachePercentDirty(PCache*); + #endif /* _PCACHE_H_ */ /************** End of pcache.h **********************************************/ @@ -11651,8 +13350,8 @@ SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void); ** This file contains pre-processor directives related to operating system ** detection and/or setup. */ -#ifndef _OS_SETUP_H_ -#define _OS_SETUP_H_ +#ifndef SQLITE_OS_SETUP_H +#define SQLITE_OS_SETUP_H /* ** Figure out if we are dealing with Unix, Windows, or some other operating @@ -11692,7 +13391,7 @@ SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void); # endif #endif -#endif /* _OS_SETUP_H_ */ +#endif /* SQLITE_OS_SETUP_H */ /************** End of os_setup.h ********************************************/ /************** Continuing where we left off in os.h *************************/ @@ -11831,7 +13530,7 @@ SQLITE_PRIVATE int sqlite3OsInit(void); /* ** Functions for accessing sqlite3_file methods */ -SQLITE_PRIVATE int sqlite3OsClose(sqlite3_file*); +SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file*); SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size); @@ -11876,7 +13575,7 @@ SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*); ** sqlite3_malloc() to obtain space for the file-handle structure. */ SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); -SQLITE_PRIVATE int sqlite3OsCloseFree(sqlite3_file *); +SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *); #endif /* _SQLITE_OS_H_ */ @@ -12149,6 +13848,15 @@ SQLITE_PRIVATE void sqlite3CryptFunc(sqlite3_context*,int,sqlite3_value**); const char*); #endif +#ifndef SQLITE_OMIT_DEPRECATED +/* This is an extra SQLITE_TRACE macro that indicates "legacy" tracing +** in the style of sqlite3_trace() +*/ +#define SQLITE_TRACE_LEGACY 0x80 +#else +#define SQLITE_TRACE_LEGACY 0 +#endif /* SQLITE_OMIT_DEPRECATED */ + /* ** Each database connection is an instance of the following structure. @@ -12178,6 +13886,7 @@ struct sqlite3 { u8 suppressErr; /* Do not issue error messages if true */ u8 vtabOnConflict; /* Value to return for s3_vtab_on_conflict() */ u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */ + u8 mTrace; /* zero or more SQLITE_TRACE flags */ int nextPagesize; /* Pagesize after VACUUM if >0 */ u32 magic; /* Magic number for detect library misuse */ int nChange; /* Value returned by sqlite3_changes() */ @@ -12198,7 +13907,7 @@ struct sqlite3 { int nVDestroy; /* Number of active OP_VDestroy operations */ int nExtension; /* Number of loaded extensions */ void **aExtension; /* Array of shared library handles */ - void (*xTrace)(void*,const char*); /* Trace function */ + int (*xTrace)(u32,void*,void*,void*); /* Trace function */ void *pTraceArg; /* Argument to the trace function */ void (*xProfile)(void*,const char*,u64); /* Profiling function */ void *pProfileArg; /* Argument to profile function */ @@ -12208,6 +13917,13 @@ struct sqlite3 { void (*xRollbackCallback)(void*); /* Invoked at every commit. */ void *pUpdateArg; void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + void *pPreUpdateArg; /* First argument to xPreUpdateCallback */ + void (*xPreUpdateCallback)( /* Registered using sqlite3_preupdate_hook() */ + void*,sqlite3*,int,char const*,char const*,sqlite3_int64,sqlite3_int64 + ); + PreUpdate *pPreUpdate; /* Context for active pre-update callback */ +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ #ifndef SQLITE_OMIT_WAL int (*xWalCallback)(void *, sqlite3 *, const char *, int); void *pWalArg; @@ -12278,6 +13994,11 @@ struct sqlite3 { /* ** Possible values for the sqlite3.flags. +** +** Value constraints (enforced via assert()): +** SQLITE_FullFSync == PAGER_FULLFSYNC +** SQLITE_CkptFullFSync == PAGER_CKPT_FULLFSYNC +** SQLITE_CacheSpill == PAGER_CACHE_SPILL */ #define SQLITE_VdbeTrace 0x00000001 /* True to trace VDBE execution */ #define SQLITE_InternChanges 0x00000002 /* Uncommitted Hash table changes */ @@ -12305,13 +14026,14 @@ struct sqlite3 { #define SQLITE_AutoIndex 0x00100000 /* Enable automatic indexes */ #define SQLITE_PreferBuiltin 0x00200000 /* Preference to built-in funcs */ #define SQLITE_LoadExtension 0x00400000 /* Enable load_extension */ -#define SQLITE_EnableTrigger 0x00800000 /* True to enable triggers */ -#define SQLITE_DeferFKs 0x01000000 /* Defer all FK constraints */ -#define SQLITE_QueryOnly 0x02000000 /* Disable database changes */ -#define SQLITE_VdbeEQP 0x04000000 /* Debug EXPLAIN QUERY PLAN */ -#define SQLITE_Vacuum 0x08000000 /* Currently in a VACUUM */ -#define SQLITE_CellSizeCk 0x10000000 /* Check btree cell sizes on load */ -#define SQLITE_Fts3Tokenizer 0x20000000 /* Enable fts3_tokenizer(2) */ +#define SQLITE_LoadExtFunc 0x00800000 /* Enable load_extension() SQL func */ +#define SQLITE_EnableTrigger 0x01000000 /* True to enable triggers */ +#define SQLITE_DeferFKs 0x02000000 /* Defer all FK constraints */ +#define SQLITE_QueryOnly 0x04000000 /* Disable database changes */ +#define SQLITE_VdbeEQP 0x08000000 /* Debug EXPLAIN QUERY PLAN */ +#define SQLITE_Vacuum 0x10000000 /* Currently in a VACUUM */ +#define SQLITE_CellSizeCk 0x20000000 /* Check btree cell sizes on load */ +#define SQLITE_Fts3Tokenizer 0x40000000 /* Enable fts3_tokenizer(2) */ /* @@ -12412,6 +14134,13 @@ struct FuncDestructor { ** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. And ** SQLITE_FUNC_CONSTANT must be the same as SQLITE_DETERMINISTIC. There ** are assert() statements in the code to verify this. +** +** Value constraints (enforced via assert()): +** SQLITE_FUNC_MINMAX == NC_MinMaxAgg == SF_MinMaxAgg +** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG +** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG +** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API +** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API */ #define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */ #define SQLITE_FUNC_LIKE 0x0004 /* Candidate for the LIKE optimization */ @@ -13373,7 +15102,7 @@ struct SrcList { int regReturn; /* Register holding return address of addrFillSub */ int regResult; /* Registers holding results of a co-routine */ struct { - u8 jointype; /* Type of join between this able and the previous */ + u8 jointype; /* Type of join between this table and the previous */ unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */ unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */ unsigned isTabFunc :1; /* True if table-valued-function syntax */ @@ -13411,23 +15140,28 @@ struct SrcList { /* ** Flags appropriate for the wctrlFlags parameter of sqlite3WhereBegin() ** and the WhereInfo.wctrlFlags member. +** +** Value constraints (enforced via assert()): +** WHERE_USE_LIMIT == SF_FixedLimit */ #define WHERE_ORDERBY_NORMAL 0x0000 /* No-op */ #define WHERE_ORDERBY_MIN 0x0001 /* ORDER BY processing for min() func */ #define WHERE_ORDERBY_MAX 0x0002 /* ORDER BY processing for max() func */ #define WHERE_ONEPASS_DESIRED 0x0004 /* Want to do one-pass UPDATE/DELETE */ -#define WHERE_DUPLICATES_OK 0x0008 /* Ok to return a row more than once */ -#define WHERE_OMIT_OPEN_CLOSE 0x0010 /* Table cursors are already open */ -#define WHERE_FORCE_TABLE 0x0020 /* Do not use an index-only search */ -#define WHERE_ONETABLE_ONLY 0x0040 /* Only code the 1st table in pTabList */ -#define WHERE_NO_AUTOINDEX 0x0080 /* Disallow automatic indexes */ -#define WHERE_GROUPBY 0x0100 /* pOrderBy is really a GROUP BY */ -#define WHERE_DISTINCTBY 0x0200 /* pOrderby is really a DISTINCT clause */ -#define WHERE_WANT_DISTINCT 0x0400 /* All output needs to be distinct */ -#define WHERE_SORTBYGROUP 0x0800 /* Support sqlite3WhereIsSorted() */ -#define WHERE_REOPEN_IDX 0x1000 /* Try to use OP_ReopenIdx */ -#define WHERE_ONEPASS_MULTIROW 0x2000 /* ONEPASS is ok with multiple rows */ -#define WHERE_USE_LIMIT 0x4000 /* There is a constant LIMIT clause */ +#define WHERE_ONEPASS_MULTIROW 0x0008 /* ONEPASS is ok with multiple rows */ +#define WHERE_DUPLICATES_OK 0x0010 /* Ok to return a row more than once */ +#define WHERE_OR_SUBCLAUSE 0x0020 /* Processing a sub-WHERE as part of + ** the OR optimization */ +#define WHERE_GROUPBY 0x0040 /* pOrderBy is really a GROUP BY */ +#define WHERE_DISTINCTBY 0x0080 /* pOrderby is really a DISTINCT clause */ +#define WHERE_WANT_DISTINCT 0x0100 /* All output needs to be distinct */ +#define WHERE_SORTBYGROUP 0x0200 /* Support sqlite3WhereIsSorted() */ +#define WHERE_SEEK_TABLE 0x0400 /* Do not defer seeks on main table */ +#define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */ + /* 0x1000 not currently used */ + /* 0x2000 not currently used */ +#define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */ + /* 0x8000 not currently used */ /* Allowed return values from sqlite3WhereIsDistinct() */ @@ -13471,16 +15205,18 @@ struct NameContext { /* ** Allowed values for the NameContext, ncFlags field. ** -** Note: NC_MinMaxAgg must have the same value as SF_MinMaxAgg and -** SQLITE_FUNC_MINMAX. +** Value constraints (all checked via assert()): +** NC_HasAgg == SF_HasAgg +** NC_MinMaxAgg == SF_MinMaxAgg == SQLITE_FUNC_MINMAX ** */ #define NC_AllowAgg 0x0001 /* Aggregate functions are allowed here */ -#define NC_HasAgg 0x0002 /* One or more aggregate functions seen */ +#define NC_PartIdx 0x0002 /* True if resolving a partial index WHERE */ #define NC_IsCheck 0x0004 /* True if resolving names in a CHECK constraint */ #define NC_InAggFunc 0x0008 /* True if analyzing arguments to an agg func */ -#define NC_PartIdx 0x0010 /* True if resolving a partial index WHERE */ +#define NC_HasAgg 0x0010 /* One or more aggregate functions seen */ #define NC_IdxExpr 0x0020 /* True if resolving columns of CREATE INDEX */ +#define NC_VarSelect 0x0040 /* A correlated subquery has been seen */ #define NC_MinMaxAgg 0x1000 /* min/max aggregates seen. See note above */ /* @@ -13528,24 +15264,30 @@ struct Select { /* ** Allowed values for Select.selFlags. The "SF" prefix stands for ** "Select Flag". +** +** Value constraints (all checked via assert()) +** SF_HasAgg == NC_HasAgg +** SF_MinMaxAgg == NC_MinMaxAgg == SQLITE_FUNC_MINMAX +** SF_FixedLimit == WHERE_USE_LIMIT */ #define SF_Distinct 0x00001 /* Output should be DISTINCT */ #define SF_All 0x00002 /* Includes the ALL keyword */ #define SF_Resolved 0x00004 /* Identifiers have been resolved */ -#define SF_Aggregate 0x00008 /* Contains aggregate functions */ -#define SF_UsesEphemeral 0x00010 /* Uses the OpenEphemeral opcode */ -#define SF_Expanded 0x00020 /* sqlite3SelectExpand() called on this */ -#define SF_HasTypeInfo 0x00040 /* FROM subqueries have Table metadata */ -#define SF_Compound 0x00080 /* Part of a compound query */ -#define SF_Values 0x00100 /* Synthesized from VALUES clause */ -#define SF_MultiValue 0x00200 /* Single VALUES term with multiple rows */ -#define SF_NestedFrom 0x00400 /* Part of a parenthesized FROM clause */ -#define SF_MaybeConvert 0x00800 /* Need convertCompoundSelectToSubquery() */ +#define SF_Aggregate 0x00008 /* Contains agg functions or a GROUP BY */ +#define SF_HasAgg 0x00010 /* Contains aggregate functions */ +#define SF_UsesEphemeral 0x00020 /* Uses the OpenEphemeral opcode */ +#define SF_Expanded 0x00040 /* sqlite3SelectExpand() called on this */ +#define SF_HasTypeInfo 0x00080 /* FROM subqueries have Table metadata */ +#define SF_Compound 0x00100 /* Part of a compound query */ +#define SF_Values 0x00200 /* Synthesized from VALUES clause */ +#define SF_MultiValue 0x00400 /* Single VALUES term with multiple rows */ +#define SF_NestedFrom 0x00800 /* Part of a parenthesized FROM clause */ #define SF_MinMaxAgg 0x01000 /* Aggregate containing min() or max() */ #define SF_Recursive 0x02000 /* The recursive part of a recursive CTE */ #define SF_FixedLimit 0x04000 /* nSelectRow set by a constant LIMIT */ -#define SF_Converted 0x08000 /* By convertCompoundSelectToSubquery() */ -#define SF_IncludeHidden 0x10000 /* Include hidden columns in output */ +#define SF_MaybeConvert 0x08000 /* Need convertCompoundSelectToSubquery() */ +#define SF_Converted 0x10000 /* By convertCompoundSelectToSubquery() */ +#define SF_IncludeHidden 0x20000 /* Include hidden columns in output */ /* @@ -13742,6 +15484,7 @@ struct Parse { u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */ u8 okConstFactor; /* OK to factor out constants */ u8 disableLookaside; /* Number of times lookaside has been disabled */ + u8 nColCache; /* Number of entries in aColCache[] */ int aTempReg[8]; /* Holding area for temporary registers */ int nRangeReg; /* Size of the temporary register block */ int iRangeReg; /* First register in temporary register block */ @@ -13855,6 +15598,15 @@ struct AuthContext { /* ** Bitfield flags for P5 value in various opcodes. +** +** Value constraints (enforced via assert()): +** OPFLAG_LENGTHARG == SQLITE_FUNC_LENGTH +** OPFLAG_TYPEOFARG == SQLITE_FUNC_TYPEOF +** OPFLAG_BULKCSR == BTREE_BULKLOAD +** OPFLAG_SEEKEQ == BTREE_SEEK_EQ +** OPFLAG_FORDELETE == BTREE_FORDELETE +** OPFLAG_SAVEPOSITION == BTREE_SAVEPOSITION +** OPFLAG_AUXDELETE == BTREE_AUXDELETE */ #define OPFLAG_NCHANGE 0x01 /* OP_Insert: Set to update db->nChange */ /* Also used in P2 (not P5) of OP_Delete */ @@ -13863,6 +15615,9 @@ struct AuthContext { #define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */ #define OPFLAG_APPEND 0x08 /* This is likely to be an append */ #define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +#define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */ +#endif #define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */ #define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */ #define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */ @@ -14109,6 +15864,7 @@ struct Walker { struct SrcCount *pSrcCount; /* Counting column references */ struct CCurHint *pCCurHint; /* Used by codeCursorHint() */ int *aiCol; /* array of column indexes */ + struct IdxCover *pIdxCover; /* Check for index coverage */ } u; }; @@ -14226,6 +15982,7 @@ SQLITE_PRIVATE int sqlite3IoerrnomemError(int); # define sqlite3Isdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x04) # define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08) # define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)]) +# define sqlite3Isquote(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x80) #else # define sqlite3Toupper(x) toupper((unsigned char)(x)) # define sqlite3Isspace(x) isspace((unsigned char)(x)) @@ -14234,6 +15991,7 @@ SQLITE_PRIVATE int sqlite3IoerrnomemError(int); # define sqlite3Isdigit(x) isdigit((unsigned char)(x)) # define sqlite3Isxdigit(x) isxdigit((unsigned char)(x)) # define sqlite3Tolower(x) tolower((unsigned char)(x)) +# define sqlite3Isquote(x) ((x)=='"'||(x)=='\''||(x)=='['||(x)=='`') #endif #ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS SQLITE_PRIVATE int sqlite3IsIdChar(u8); @@ -14290,11 +16048,15 @@ SQLITE_PRIVATE int sqlite3HeapNearlyFull(void); # define sqlite3StackFree(D,P) sqlite3DbFree(D,P) #endif -#ifdef SQLITE_ENABLE_MEMSYS3 -SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void); -#endif +/* Do not allow both MEMSYS5 and MEMSYS3 to be defined together. If they +** are, disable MEMSYS3 +*/ #ifdef SQLITE_ENABLE_MEMSYS5 SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void); +#undef SQLITE_ENABLE_MEMSYS3 +#endif +#ifdef SQLITE_ENABLE_MEMSYS3 +SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void); #endif @@ -14357,7 +16119,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView*, const With*, u8); SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*); SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...); -SQLITE_PRIVATE int sqlite3Dequote(char*); +SQLITE_PRIVATE void sqlite3Dequote(char*); SQLITE_PRIVATE void sqlite3TokenInit(Token*,char*); SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int); SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*, char **); @@ -14374,6 +16136,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprAlloc(sqlite3*,int,const Token*,int); SQLITE_PRIVATE Expr *sqlite3Expr(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*); SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*, const Token*); +SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*); SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*); SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*); @@ -14476,8 +16239,8 @@ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*); SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*); SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3*, SrcList*); SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(sqlite3*,i16,int,char**); -SQLITE_PRIVATE Index *sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*, - Expr*, int, int); +SQLITE_PRIVATE void sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*, + Expr*, int, int, u8); SQLITE_PRIVATE void sqlite3DropIndex(Parse*, SrcList*, int); SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*); SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*, @@ -14496,6 +16259,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo*); SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*); +SQLITE_PRIVATE int sqlite3WhereOrderedInnerLoop(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*); SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*); @@ -14529,8 +16293,10 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse*, Expr*, int, int); SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse*, Expr*, int, int); SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse*, Expr*, int, int); SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*); -SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,int isView,const char*, const char*); -SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,int isView,struct SrcList_item *); +#define LOCATE_VIEW 0x01 +#define LOCATE_NOERR 0x02 +SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,u32 flags,const char*, const char*); +SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,struct SrcList_item *); SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*); SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*); @@ -14542,6 +16308,7 @@ SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList*, ExprList*, int); SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr*, Expr*, int); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); +SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(Expr*, int iCur, Index *pIdx); SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*); SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*); #ifndef SQLITE_OMIT_BUILTIN_TEST @@ -15093,7 +16860,7 @@ SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread*, void**); SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3*); #endif -#endif /* _SQLITEINT_H_ */ +#endif /* SQLITEINT_H */ /************** End of sqliteInt.h *******************************************/ /************** Begin file global.c ******************************************/ @@ -15169,6 +16936,7 @@ SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = { ** isxdigit() 0x08 ** toupper() 0x20 ** SQLite identifier character 0x40 +** Quote character 0x80 ** ** Bit 0x20 is set if the mapped character requires translation to upper ** case. i.e. if the character is a lower-case ASCII character. @@ -15194,7 +16962,7 @@ SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = { 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, /* 08..0f ........ */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10..17 ........ */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18..1f ........ */ - 0x01, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, /* 20..27 !"#$%&' */ + 0x01, 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x80, /* 20..27 !"#$%&' */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28..2f ()*+,-./ */ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, /* 30..37 01234567 */ 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38..3f 89:;<=>? */ @@ -15202,8 +16970,8 @@ SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = { 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x02, /* 40..47 @ABCDEFG */ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 48..4f HIJKLMNO */ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 50..57 PQRSTUVW */ - 0x02, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x40, /* 58..5f XYZ[\]^_ */ - 0x00, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x22, /* 60..67 `abcdefg */ + 0x02, 0x02, 0x02, 0x80, 0x00, 0x00, 0x00, 0x40, /* 58..5f XYZ[\]^_ */ + 0x80, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x22, /* 60..67 `abcdefg */ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 68..6f hijklmno */ 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 70..77 pqrstuvw */ 0x22, 0x22, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78..7f xyz{|}~. */ @@ -15427,6 +17195,15 @@ static const char * const azCompileOpt[] = { #if SQLITE_CHECK_PAGES "CHECK_PAGES", #endif +#if defined(__clang__) && defined(__clang_major__) + "COMPILER=clang-" CTIMEOPT_VAL(__clang_major__) "." + CTIMEOPT_VAL(__clang_minor__) "." + CTIMEOPT_VAL(__clang_patchlevel__), +#elif defined(_MSC_VER) + "COMPILER=msvc-" CTIMEOPT_VAL(_MSC_VER), +#elif defined(__GNUC__) && defined(__VERSION__) + "COMPILER=gcc-" __VERSION__, +#endif #if SQLITE_COVERAGE_TEST "COVERAGE_TEST", #endif @@ -15446,7 +17223,7 @@ static const char * const azCompileOpt[] = { "DISABLE_LFS", #endif #if SQLITE_ENABLE_8_3_NAMES - "ENABLE_8_3_NAMES", + "ENABLE_8_3_NAMES=" CTIMEOPT_VAL(SQLITE_ENABLE_8_3_NAMES), #endif #if SQLITE_ENABLE_API_ARMOR "ENABLE_API_ARMOR", @@ -15860,8 +17637,8 @@ SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N){ ** 6000 lines long) it was split up into several smaller files and ** this header information was factored out. */ -#ifndef _VDBEINT_H_ -#define _VDBEINT_H_ +#ifndef SQLITE_VDBEINT_H +#define SQLITE_VDBEINT_H /* ** The maximum number of times that a statement will try to reparse @@ -16273,6 +18050,25 @@ struct Vdbe { #define VDBE_MAGIC_HALT 0x519c2973 /* VDBE has completed execution */ #define VDBE_MAGIC_DEAD 0xb606c3c8 /* The VDBE has been deallocated */ +/* +** Structure used to store the context required by the +** sqlite3_preupdate_*() API functions. +*/ +struct PreUpdate { + Vdbe *v; + VdbeCursor *pCsr; /* Cursor to read old values from */ + int op; /* One of SQLITE_INSERT, UPDATE, DELETE */ + u8 *aRecord; /* old.* database record */ + KeyInfo keyinfo; + UnpackedRecord *pUnpacked; /* Unpacked version of aRecord[] */ + UnpackedRecord *pNewUnpacked; /* Unpacked version of new.* record */ + int iNewReg; /* Register for new.* values */ + i64 iKey1; /* First key value passed to hook */ + i64 iKey2; /* Second key value passed to hook */ + int iPKey; /* If not negative index of IPK column */ + Mem *aNew; /* Array of new.* values */ +}; + /* ** Function prototypes */ @@ -16332,6 +18128,9 @@ SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int n); SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *, int); SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*); SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(Vdbe*,VdbeCursor*,int,const char*,Table*,i64,int); +#endif SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p); SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *, int, VdbeCursor *); @@ -16381,7 +18180,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *); #define ExpandBlob(P) SQLITE_OK #endif -#endif /* !defined(_VDBEINT_H_) */ +#endif /* !defined(SQLITE_VDBEINT_H) */ /************** End of vdbeInt.h *********************************************/ /************** Continuing where we left off in status.c *********************/ @@ -16528,7 +18327,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_status64( return SQLITE_OK; } SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){ - sqlite3_int64 iCur, iHwtr; + sqlite3_int64 iCur = 0, iHwtr = 0; int rc; #ifdef SQLITE_ENABLE_API_ARMOR if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT; @@ -16589,6 +18388,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status( ** by all pagers associated with the given database connection. The ** highwater mark is meaningless and is returned as zero. */ + case SQLITE_DBSTATUS_CACHE_USED_SHARED: case SQLITE_DBSTATUS_CACHE_USED: { int totalUsed = 0; int i; @@ -16597,7 +18397,11 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status( Btree *pBt = db->aDb[i].pBt; if( pBt ){ Pager *pPager = sqlite3BtreePager(pBt); - totalUsed += sqlite3PagerMemUsed(pPager); + int nByte = sqlite3PagerMemUsed(pPager); + if( op==SQLITE_DBSTATUS_CACHE_USED_SHARED ){ + nByte = nByte / sqlite3BtreeConnectionCount(pBt); + } + totalUsed += nByte; } } sqlite3BtreeLeaveAll(db); @@ -16769,6 +18573,15 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status( #ifndef SQLITE_OMIT_DATETIME_FUNCS +/* +** The MSVC CRT on Windows CE may not have a localtime() function. +** So declare a substitute. The substitute function itself is +** defined in "os_win.c". +*/ +#if !defined(SQLITE_OMIT_LOCALTIME) && defined(_WIN32_WCE) && \ + (!defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API) +struct tm *__cdecl localtime(const time_t *); +#endif /* ** A structure for holding a single date and time. @@ -17137,6 +18950,7 @@ static void clearYMD_HMS_TZ(DateTime *p){ p->validTZ = 0; } +#ifndef SQLITE_OMIT_LOCALTIME /* ** On recent Windows platforms, the localtime_s() function is available ** as part of the "Secure CRT". It is essentially equivalent to @@ -17155,7 +18969,6 @@ static void clearYMD_HMS_TZ(DateTime *p){ #define HAVE_LOCALTIME_S 1 #endif -#ifndef SQLITE_OMIT_LOCALTIME /* ** The following routine implements the rough equivalent of localtime_r() ** using whatever operating-system specific localtime facility that @@ -17822,7 +19635,6 @@ static void currentTimeFunc( ){ time_t t; char *zFormat = (char *)sqlite3_user_data(context); - sqlite3 *db; sqlite3_int64 iT; struct tm *pTm; struct tm sNow; @@ -17891,9 +19703,7 @@ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){ ** This file contains OS interface code that is common to all ** architectures. */ -#define _SQLITE_OS_C_ 1 /* #include "sqliteInt.h" */ -#undef _SQLITE_OS_C_ /* ** If we compile with the SQLITE_TEST macro set, then the following block @@ -17959,13 +19769,11 @@ SQLITE_API int sqlite3_memdebug_vfs_oom_test = 1; ** of this would be completely automatic if SQLite were coded using ** C++ instead of plain old C. */ -SQLITE_PRIVATE int sqlite3OsClose(sqlite3_file *pId){ - int rc = SQLITE_OK; +SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file *pId){ if( pId->pMethods ){ - rc = pId->pMethods->xClose(pId); + pId->pMethods->xClose(pId); pId->pMethods = 0; } - return rc; } SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file *id, void *pBuf, int amt, i64 offset){ DO_OS_MALLOC_TEST(id); @@ -18183,12 +19991,10 @@ SQLITE_PRIVATE int sqlite3OsOpenMalloc( } return rc; } -SQLITE_PRIVATE int sqlite3OsCloseFree(sqlite3_file *pFile){ - int rc = SQLITE_OK; +SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *pFile){ assert( pFile ); - rc = sqlite3OsClose(pFile); + sqlite3OsClose(pFile); sqlite3_free(pFile); - return rc; } /* @@ -21410,8 +23216,8 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ ** This file contains inline asm code for retrieving "high-performance" ** counters for x86 class CPUs. */ -#ifndef _HWTIME_H_ -#define _HWTIME_H_ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H /* ** The following routine only works on pentium-class (or newer) processors. @@ -21479,7 +23285,7 @@ SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } #endif -#endif /* !defined(_HWTIME_H_) */ +#endif /* !defined(SQLITE_HWTIME_H) */ /************** End of hwtime.h **********************************************/ /************** Continuing where we left off in os_common.h ******************/ @@ -21569,8 +23375,8 @@ SQLITE_API extern int sqlite3_open_file_count; ** ** This file contains code that is specific to Windows. */ -#ifndef _OS_WIN_H_ -#define _OS_WIN_H_ +#ifndef SQLITE_OS_WIN_H +#define SQLITE_OS_WIN_H /* ** Include the primary Windows SDK header file. @@ -21642,7 +23448,7 @@ SQLITE_API extern int sqlite3_open_file_count; # define SQLITE_OS_WIN_THREADS 0 #endif -#endif /* _OS_WIN_H_ */ +#endif /* SQLITE_OS_WIN_H */ /************** End of os_win.h **********************************************/ /************** Continuing where we left off in mutex_w32.c ******************/ @@ -22868,26 +24674,26 @@ SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){ ** Conversion types fall into various categories as defined by the ** following enumeration. */ -#define etRADIX 1 /* Integer types. %d, %x, %o, and so forth */ -#define etFLOAT 2 /* Floating point. %f */ -#define etEXP 3 /* Exponentional notation. %e and %E */ -#define etGENERIC 4 /* Floating or exponential, depending on exponent. %g */ -#define etSIZE 5 /* Return number of characters processed so far. %n */ -#define etSTRING 6 /* Strings. %s */ -#define etDYNSTRING 7 /* Dynamically allocated strings. %z */ -#define etPERCENT 8 /* Percent symbol. %% */ -#define etCHARX 9 /* Characters. %c */ +#define etRADIX 0 /* Integer types. %d, %x, %o, and so forth */ +#define etFLOAT 1 /* Floating point. %f */ +#define etEXP 2 /* Exponentional notation. %e and %E */ +#define etGENERIC 3 /* Floating or exponential, depending on exponent. %g */ +#define etSIZE 4 /* Return number of characters processed so far. %n */ +#define etSTRING 5 /* Strings. %s */ +#define etDYNSTRING 6 /* Dynamically allocated strings. %z */ +#define etPERCENT 7 /* Percent symbol. %% */ +#define etCHARX 8 /* Characters. %c */ /* The rest are extensions, not normally found in printf() */ -#define etSQLESCAPE 10 /* Strings with '\'' doubled. %q */ -#define etSQLESCAPE2 11 /* Strings with '\'' doubled and enclosed in '', +#define etSQLESCAPE 9 /* Strings with '\'' doubled. %q */ +#define etSQLESCAPE2 10 /* Strings with '\'' doubled and enclosed in '', NULL pointers replaced by SQL NULL. %Q */ -#define etTOKEN 12 /* a pointer to a Token structure */ -#define etSRCLIST 13 /* a pointer to a SrcList */ -#define etPOINTER 14 /* The %p conversion */ -#define etSQLESCAPE3 15 /* %w -> Strings with '\"' doubled */ -#define etORDINAL 16 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */ +#define etTOKEN 11 /* a pointer to a Token structure */ +#define etSRCLIST 12 /* a pointer to a SrcList */ +#define etPOINTER 13 /* The %p conversion */ +#define etSQLESCAPE3 14 /* %w -> Strings with '\"' doubled */ +#define etORDINAL 15 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */ -#define etINVALID 0 /* Any unrecognized conversion type */ +#define etINVALID 16 /* Any unrecognized conversion type */ /* @@ -23042,7 +24848,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf( etByte flag_long; /* True if "l" flag is present */ etByte flag_longlong; /* True if the "ll" flag is present */ etByte done; /* Loop termination flag */ - etByte xtype = 0; /* Conversion paradigm */ + etByte xtype = etINVALID; /* Conversion paradigm */ u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */ u8 useIntern; /* Ok to use internal conversions (ex: %T) */ char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ @@ -24393,6 +26199,12 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m break; } #endif + case TK_MATCH: { + sqlite3TreeViewLine(pView, "MATCH {%d:%d}%s", + pExpr->iTable, pExpr->iColumn, zFlgs); + sqlite3TreeViewExpr(pView, pExpr->pRight, 0); + break; + } default: { sqlite3TreeViewLine(pView, "op=%d", pExpr->op); break; @@ -25633,18 +27445,13 @@ SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){ ** brackets from around identifiers. For example: "[a-b-c]" becomes ** "a-b-c". */ -SQLITE_PRIVATE int sqlite3Dequote(char *z){ +SQLITE_PRIVATE void sqlite3Dequote(char *z){ char quote; int i, j; - if( z==0 ) return -1; + if( z==0 ) return; quote = z[0]; - switch( quote ){ - case '\'': break; - case '"': break; - case '`': break; /* For MySQL compatibility */ - case '[': quote = ']'; break; /* For MS SqlServer compatibility */ - default: return -1; - } + if( !sqlite3Isquote(quote) ) return; + if( quote=='[' ) quote = ']'; for(i=1, j=0;; i++){ assert( z[i] ); if( z[i]==quote ){ @@ -25659,7 +27466,6 @@ SQLITE_PRIVATE int sqlite3Dequote(char *z){ } } z[j] = 0; - return j; } /* @@ -25752,7 +27558,7 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en int eValid = 1; /* True exponent is either not used or is well-formed */ double result; int nDigits = 0; - int nonNum = 0; + int nonNum = 0; /* True if input contains UTF16 with high byte non-zero */ assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); *pResult = 0.0; /* Default return value, in case of an error */ @@ -25765,7 +27571,7 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); for(i=3-enc; i=zEnd ) goto do_atof_calc; @@ -25813,7 +27617,12 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en if( *z=='e' || *z=='E' ){ z+=incr; eValid = 0; - if( z>=zEnd ) goto do_atof_calc; + + /* This branch is needed to avoid a (harmless) buffer overread. The + ** special comment alerts the mutation tester that the correct answer + ** is obtained even if the branch is omitted */ + if( z>=zEnd ) goto do_atof_calc; /*PREVENTS-HARMLESS-OVERREAD*/ + /* get sign of exponent */ if( *z=='-' ){ esign = -1; @@ -25830,9 +27639,7 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en } /* skip trailing spaces */ - if( nDigits && eValid ){ - while( z0 ){ - while( s<(LARGEST_INT64/10) && e>0 ) e--,s*=10; - }else{ - while( !(s%10) && e>0 ) e--,s/=10; + /* Attempt to reduce exponent. + ** + ** Branches that are not required for the correct answer but which only + ** help to obtain the correct answer faster are marked with special + ** comments, as a hint to the mutation tester. + */ + while( e>0 ){ /*OPTIMIZATION-IF-TRUE*/ + if( esign>0 ){ + if( s>=(LARGEST_INT64/10) ) break; /*OPTIMIZATION-IF-FALSE*/ + s *= 10; + }else{ + if( s%10!=0 ) break; /*OPTIMIZATION-IF-FALSE*/ + s /= 10; + } + e--; } /* adjust the sign of significand */ s = sign<0 ? -s : s; - /* if exponent, scale significand as appropriate - ** and store in result. */ - if( e ){ + if( e==0 ){ /*OPTIMIZATION-IF-TRUE*/ + result = (double)s; + }else{ LONGDOUBLE_TYPE scale = 1.0; /* attempt to handle extremely small/large numbers better */ - if( e>307 && e<342 ){ - while( e%308 ) { scale *= 1.0e+1; e -= 1; } - if( esign<0 ){ - result = s / scale; - result /= 1.0e+308; - }else{ - result = s * scale; - result *= 1.0e+308; - } - }else if( e>=342 ){ - if( esign<0 ){ - result = 0.0*s; - }else{ - result = 1e308*1e308*s; /* Infinity */ + if( e>307 ){ /*OPTIMIZATION-IF-TRUE*/ + if( e<342 ){ /*OPTIMIZATION-IF-TRUE*/ + while( e%308 ) { scale *= 1.0e+1; e -= 1; } + if( esign<0 ){ + result = s / scale; + result /= 1.0e+308; + }else{ + result = s * scale; + result *= 1.0e+308; + } + }else{ assert( e>=342 ); + if( esign<0 ){ + result = 0.0*s; + }else{ + result = 1e308*1e308*s; /* Infinity */ + } } }else{ /* 1.0e+22 is the largest power of 10 than can be @@ -25891,8 +27708,6 @@ do_atof_calc: result = s * scale; } } - } else { - result = (double)s; } } @@ -25900,7 +27715,7 @@ do_atof_calc: *pResult = result; /* return true if number and no extra non-whitespace chracters after */ - return z>=zEnd && nDigits>0 && eValid && nonNum==0; + return z==zEnd && nDigits>0 && eValid && nonNum==0; #else return !sqlite3Atoi64(z, pResult, length, enc); #endif /* SQLITE_OMIT_FLOATING_POINT */ @@ -25962,7 +27777,7 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc int neg = 0; /* assume positive */ int i; int c = 0; - int nonNum = 0; + int nonNum = 0; /* True if input contains UTF16 with high byte non-zero */ const char *zStart; const char *zEnd = zNum + length; assert( enc==SQLITE_UTF8 || enc==SQLITE_UTF16LE || enc==SQLITE_UTF16BE ); @@ -25973,7 +27788,7 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc assert( SQLITE_UTF16LE==2 && SQLITE_UTF16BE==3 ); for(i=3-enc; i19*incr || nonNum ){ + if( &zNum[i]19*incr /* Too many digits */ + || nonNum /* UTF16 with high-order bytes non-zero */ + ){ /* zNum is empty or contains non-numeric text or is longer ** than 19 digits (thus guaranteeing that it is too large) */ return 1; @@ -26043,7 +27861,6 @@ SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char *z, i64 *pOut){ #ifndef SQLITE_OMIT_HEX_INTEGER if( z[0]=='0' && (z[1]=='x' || z[1]=='X') - && sqlite3Isxdigit(z[2]) ){ u64 u = 0; int i, k; @@ -26805,7 +28622,7 @@ SQLITE_PRIVATE LogEst sqlite3LogEst(u64 x){ if( x<2 ) return 0; while( x<8 ){ y -= 10; x <<= 1; } }else{ - while( x>255 ){ y += 40; x >>= 4; } + while( x>255 ){ y += 40; x >>= 4; } /*OPTIMIZATION-IF-TRUE*/ while( x>15 ){ y += 10; x >>= 1; } } return a[x&7] + y - 10; @@ -26839,7 +28656,6 @@ SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double x){ */ SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){ u64 n; - if( x<10 ) return 1; n = x%10; x /= 10; if( n>=5 ) n -= 2; @@ -26915,7 +28731,7 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash *pH){ static unsigned int strHash(const char *z){ unsigned int h = 0; unsigned char c; - while( (c = (unsigned char)*z++)!=0 ){ + while( (c = (unsigned char)*z++)!=0 ){ /*OPTIMIZATION-IF-TRUE*/ h = (h<<3) ^ h ^ sqlite3UpperToLower[c]; } return h; @@ -27008,7 +28824,7 @@ static HashElem *findElementWithHash( int count; /* Number of elements left to test */ unsigned int h; /* The computed hash */ - if( pH->ht ){ + if( pH->ht ){ /*OPTIMIZATION-IF-TRUE*/ struct _ht *pEntry; h = strHash(pKey) % pH->htsize; pEntry = &pH->ht[h]; @@ -27155,150 +28971,150 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 12 */ "VUpdate" OpHelp("data=r[P3@P2]"), /* 13 */ "Goto" OpHelp(""), /* 14 */ "Gosub" OpHelp(""), - /* 15 */ "Return" OpHelp(""), - /* 16 */ "InitCoroutine" OpHelp(""), - /* 17 */ "EndCoroutine" OpHelp(""), - /* 18 */ "Yield" OpHelp(""), + /* 15 */ "InitCoroutine" OpHelp(""), + /* 16 */ "Yield" OpHelp(""), + /* 17 */ "MustBeInt" OpHelp(""), + /* 18 */ "Jump" OpHelp(""), /* 19 */ "Not" OpHelp("r[P2]= !r[P1]"), - /* 20 */ "HaltIfNull" OpHelp("if r[P3]=null halt"), - /* 21 */ "Halt" OpHelp(""), - /* 22 */ "Integer" OpHelp("r[P2]=P1"), - /* 23 */ "Int64" OpHelp("r[P2]=P4"), - /* 24 */ "String" OpHelp("r[P2]='P4' (len=P1)"), - /* 25 */ "Null" OpHelp("r[P2..P3]=NULL"), - /* 26 */ "SoftNull" OpHelp("r[P1]=NULL"), - /* 27 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), - /* 28 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), - /* 29 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), - /* 30 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), - /* 31 */ "SCopy" OpHelp("r[P2]=r[P1]"), - /* 32 */ "IntCopy" OpHelp("r[P2]=r[P1]"), - /* 33 */ "ResultRow" OpHelp("output=r[P1@P2]"), - /* 34 */ "CollSeq" OpHelp(""), - /* 35 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"), - /* 36 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"), - /* 37 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), - /* 38 */ "MustBeInt" OpHelp(""), - /* 39 */ "RealAffinity" OpHelp(""), - /* 40 */ "Cast" OpHelp("affinity(r[P1])"), - /* 41 */ "Permutation" OpHelp(""), - /* 42 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), - /* 43 */ "Jump" OpHelp(""), - /* 44 */ "Once" OpHelp(""), - /* 45 */ "If" OpHelp(""), - /* 46 */ "IfNot" OpHelp(""), - /* 47 */ "Column" OpHelp("r[P3]=PX"), - /* 48 */ "Affinity" OpHelp("affinity(r[P1@P2])"), - /* 49 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), - /* 50 */ "Count" OpHelp("r[P2]=count()"), - /* 51 */ "ReadCookie" OpHelp(""), - /* 52 */ "SetCookie" OpHelp(""), - /* 53 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), - /* 54 */ "OpenRead" OpHelp("root=P2 iDb=P3"), - /* 55 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), - /* 56 */ "OpenAutoindex" OpHelp("nColumn=P2"), - /* 57 */ "OpenEphemeral" OpHelp("nColumn=P2"), - /* 58 */ "SorterOpen" OpHelp(""), - /* 59 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), - /* 60 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), - /* 61 */ "Close" OpHelp(""), - /* 62 */ "ColumnsUsed" OpHelp(""), - /* 63 */ "SeekLT" OpHelp("key=r[P3@P4]"), - /* 64 */ "SeekLE" OpHelp("key=r[P3@P4]"), - /* 65 */ "SeekGE" OpHelp("key=r[P3@P4]"), - /* 66 */ "SeekGT" OpHelp("key=r[P3@P4]"), - /* 67 */ "NoConflict" OpHelp("key=r[P3@P4]"), - /* 68 */ "NotFound" OpHelp("key=r[P3@P4]"), - /* 69 */ "Found" OpHelp("key=r[P3@P4]"), - /* 70 */ "NotExists" OpHelp("intkey=r[P3]"), - /* 71 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"), - /* 72 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"), - /* 73 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), - /* 74 */ "NewRowid" OpHelp("r[P2]=rowid"), - /* 75 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), - /* 76 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), - /* 77 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), - /* 78 */ "Ne" OpHelp("if r[P1]!=r[P3] goto P2"), - /* 79 */ "Eq" OpHelp("if r[P1]==r[P3] goto P2"), - /* 80 */ "Gt" OpHelp("if r[P1]>r[P3] goto P2"), - /* 81 */ "Le" OpHelp("if r[P1]<=r[P3] goto P2"), - /* 82 */ "Lt" OpHelp("if r[P1]=r[P3] goto P2"), - /* 84 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"), - /* 85 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), - /* 86 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), - /* 87 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"), - /* 89 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), - /* 90 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), - /* 91 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), - /* 92 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), - /* 93 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), - /* 94 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), - /* 95 */ "Delete" OpHelp(""), - /* 96 */ "BitNot" OpHelp("r[P1]= ~r[P1]"), + /* 20 */ "Once" OpHelp(""), + /* 21 */ "If" OpHelp(""), + /* 22 */ "IfNot" OpHelp(""), + /* 23 */ "SeekLT" OpHelp("key=r[P3@P4]"), + /* 24 */ "SeekLE" OpHelp("key=r[P3@P4]"), + /* 25 */ "SeekGE" OpHelp("key=r[P3@P4]"), + /* 26 */ "SeekGT" OpHelp("key=r[P3@P4]"), + /* 27 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"), + /* 28 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"), + /* 29 */ "NoConflict" OpHelp("key=r[P3@P4]"), + /* 30 */ "NotFound" OpHelp("key=r[P3@P4]"), + /* 31 */ "Found" OpHelp("key=r[P3@P4]"), + /* 32 */ "SeekRowid" OpHelp("intkey=r[P3]"), + /* 33 */ "NotExists" OpHelp("intkey=r[P3]"), + /* 34 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), + /* 35 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), + /* 36 */ "Ne" OpHelp("if r[P1]!=r[P3] goto P2"), + /* 37 */ "Eq" OpHelp("if r[P1]==r[P3] goto P2"), + /* 38 */ "Gt" OpHelp("if r[P1]>r[P3] goto P2"), + /* 39 */ "Le" OpHelp("if r[P1]<=r[P3] goto P2"), + /* 40 */ "Lt" OpHelp("if r[P1]=r[P3] goto P2"), + /* 42 */ "Last" OpHelp(""), + /* 43 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"), + /* 44 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"), + /* 45 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<>r[P1]"), + /* 47 */ "Add" OpHelp("r[P3]=r[P1]+r[P2]"), + /* 48 */ "Subtract" OpHelp("r[P3]=r[P2]-r[P1]"), + /* 49 */ "Multiply" OpHelp("r[P3]=r[P1]*r[P2]"), + /* 50 */ "Divide" OpHelp("r[P3]=r[P2]/r[P1]"), + /* 51 */ "Remainder" OpHelp("r[P3]=r[P2]%r[P1]"), + /* 52 */ "Concat" OpHelp("r[P3]=r[P2]+r[P1]"), + /* 53 */ "SorterSort" OpHelp(""), + /* 54 */ "BitNot" OpHelp("r[P1]= ~r[P1]"), + /* 55 */ "Sort" OpHelp(""), + /* 56 */ "Rewind" OpHelp(""), + /* 57 */ "IdxLE" OpHelp("key=r[P3@P4]"), + /* 58 */ "IdxGT" OpHelp("key=r[P3@P4]"), + /* 59 */ "IdxLT" OpHelp("key=r[P3@P4]"), + /* 60 */ "IdxGE" OpHelp("key=r[P3@P4]"), + /* 61 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), + /* 62 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), + /* 63 */ "Program" OpHelp(""), + /* 64 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), + /* 65 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), + /* 66 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]-=P3, goto P2"), + /* 67 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), + /* 68 */ "IncrVacuum" OpHelp(""), + /* 69 */ "VNext" OpHelp(""), + /* 70 */ "Init" OpHelp("Start at P2"), + /* 71 */ "Return" OpHelp(""), + /* 72 */ "EndCoroutine" OpHelp(""), + /* 73 */ "HaltIfNull" OpHelp("if r[P3]=null halt"), + /* 74 */ "Halt" OpHelp(""), + /* 75 */ "Integer" OpHelp("r[P2]=P1"), + /* 76 */ "Int64" OpHelp("r[P2]=P4"), + /* 77 */ "String" OpHelp("r[P2]='P4' (len=P1)"), + /* 78 */ "Null" OpHelp("r[P2..P3]=NULL"), + /* 79 */ "SoftNull" OpHelp("r[P1]=NULL"), + /* 80 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"), + /* 81 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"), + /* 82 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"), + /* 83 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"), + /* 84 */ "SCopy" OpHelp("r[P2]=r[P1]"), + /* 85 */ "IntCopy" OpHelp("r[P2]=r[P1]"), + /* 86 */ "ResultRow" OpHelp("output=r[P1@P2]"), + /* 87 */ "CollSeq" OpHelp(""), + /* 88 */ "Function0" OpHelp("r[P3]=func(r[P2@P5])"), + /* 89 */ "Function" OpHelp("r[P3]=func(r[P2@P5])"), + /* 90 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"), + /* 91 */ "RealAffinity" OpHelp(""), + /* 92 */ "Cast" OpHelp("affinity(r[P1])"), + /* 93 */ "Permutation" OpHelp(""), + /* 94 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"), + /* 95 */ "Column" OpHelp("r[P3]=PX"), + /* 96 */ "Affinity" OpHelp("affinity(r[P1@P2])"), /* 97 */ "String8" OpHelp("r[P2]='P4'"), - /* 98 */ "ResetCount" OpHelp(""), - /* 99 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), - /* 100 */ "SorterData" OpHelp("r[P2]=data"), - /* 101 */ "RowKey" OpHelp("r[P2]=key"), - /* 102 */ "RowData" OpHelp("r[P2]=data"), - /* 103 */ "Rowid" OpHelp("r[P2]=rowid"), - /* 104 */ "NullRow" OpHelp(""), - /* 105 */ "Last" OpHelp(""), - /* 106 */ "SorterSort" OpHelp(""), - /* 107 */ "Sort" OpHelp(""), - /* 108 */ "Rewind" OpHelp(""), - /* 109 */ "SorterInsert" OpHelp(""), - /* 110 */ "IdxInsert" OpHelp("key=r[P2]"), - /* 111 */ "IdxDelete" OpHelp("key=r[P2@P3]"), - /* 112 */ "Seek" OpHelp("Move P3 to P1.rowid"), - /* 113 */ "IdxRowid" OpHelp("r[P2]=rowid"), - /* 114 */ "IdxLE" OpHelp("key=r[P3@P4]"), - /* 115 */ "IdxGT" OpHelp("key=r[P3@P4]"), - /* 116 */ "IdxLT" OpHelp("key=r[P3@P4]"), - /* 117 */ "IdxGE" OpHelp("key=r[P3@P4]"), - /* 118 */ "Destroy" OpHelp(""), - /* 119 */ "Clear" OpHelp(""), - /* 120 */ "ResetSorter" OpHelp(""), - /* 121 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"), - /* 122 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"), - /* 123 */ "ParseSchema" OpHelp(""), - /* 124 */ "LoadAnalysis" OpHelp(""), - /* 125 */ "DropTable" OpHelp(""), - /* 126 */ "DropIndex" OpHelp(""), - /* 127 */ "DropTrigger" OpHelp(""), - /* 128 */ "IntegrityCk" OpHelp(""), - /* 129 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), - /* 130 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), - /* 131 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), - /* 132 */ "Program" OpHelp(""), + /* 98 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"), + /* 99 */ "Count" OpHelp("r[P2]=count()"), + /* 100 */ "ReadCookie" OpHelp(""), + /* 101 */ "SetCookie" OpHelp(""), + /* 102 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"), + /* 103 */ "OpenRead" OpHelp("root=P2 iDb=P3"), + /* 104 */ "OpenWrite" OpHelp("root=P2 iDb=P3"), + /* 105 */ "OpenAutoindex" OpHelp("nColumn=P2"), + /* 106 */ "OpenEphemeral" OpHelp("nColumn=P2"), + /* 107 */ "SorterOpen" OpHelp(""), + /* 108 */ "SequenceTest" OpHelp("if( cursor[P1].ctr++ ) pc = P2"), + /* 109 */ "OpenPseudo" OpHelp("P3 columns in r[P2]"), + /* 110 */ "Close" OpHelp(""), + /* 111 */ "ColumnsUsed" OpHelp(""), + /* 112 */ "Sequence" OpHelp("r[P2]=cursor[P1].ctr++"), + /* 113 */ "NewRowid" OpHelp("r[P2]=rowid"), + /* 114 */ "Insert" OpHelp("intkey=r[P3] data=r[P2]"), + /* 115 */ "InsertInt" OpHelp("intkey=P3 data=r[P2]"), + /* 116 */ "Delete" OpHelp(""), + /* 117 */ "ResetCount" OpHelp(""), + /* 118 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"), + /* 119 */ "SorterData" OpHelp("r[P2]=data"), + /* 120 */ "RowKey" OpHelp("r[P2]=key"), + /* 121 */ "RowData" OpHelp("r[P2]=data"), + /* 122 */ "Rowid" OpHelp("r[P2]=rowid"), + /* 123 */ "NullRow" OpHelp(""), + /* 124 */ "SorterInsert" OpHelp(""), + /* 125 */ "IdxInsert" OpHelp("key=r[P2]"), + /* 126 */ "IdxDelete" OpHelp("key=r[P2@P3]"), + /* 127 */ "Seek" OpHelp("Move P3 to P1.rowid"), + /* 128 */ "IdxRowid" OpHelp("r[P2]=rowid"), + /* 129 */ "Destroy" OpHelp(""), + /* 130 */ "Clear" OpHelp(""), + /* 131 */ "ResetSorter" OpHelp(""), + /* 132 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"), /* 133 */ "Real" OpHelp("r[P2]=P4"), - /* 134 */ "Param" OpHelp(""), - /* 135 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), - /* 136 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), - /* 137 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), - /* 138 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), - /* 139 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), - /* 140 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]-=P3, goto P2"), - /* 141 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), - /* 142 */ "JumpZeroIncr" OpHelp("if (r[P1]++)==0 ) goto P2"), - /* 143 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 144 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), - /* 145 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), - /* 146 */ "IncrVacuum" OpHelp(""), - /* 147 */ "Expire" OpHelp(""), - /* 148 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), - /* 149 */ "VBegin" OpHelp(""), - /* 150 */ "VCreate" OpHelp(""), - /* 151 */ "VDestroy" OpHelp(""), - /* 152 */ "VOpen" OpHelp(""), - /* 153 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 154 */ "VNext" OpHelp(""), - /* 155 */ "VRename" OpHelp(""), - /* 156 */ "Pagecount" OpHelp(""), - /* 157 */ "MaxPgcnt" OpHelp(""), - /* 158 */ "Init" OpHelp("Start at P2"), + /* 134 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"), + /* 135 */ "ParseSchema" OpHelp(""), + /* 136 */ "LoadAnalysis" OpHelp(""), + /* 137 */ "DropTable" OpHelp(""), + /* 138 */ "DropIndex" OpHelp(""), + /* 139 */ "DropTrigger" OpHelp(""), + /* 140 */ "IntegrityCk" OpHelp(""), + /* 141 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"), + /* 142 */ "Param" OpHelp(""), + /* 143 */ "FkCounter" OpHelp("fkctr[P1]+=P2"), + /* 144 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"), + /* 145 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"), + /* 146 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 147 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"), + /* 148 */ "AggFinal" OpHelp("accum=r[P1] N=P2"), + /* 149 */ "Expire" OpHelp(""), + /* 150 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"), + /* 151 */ "VBegin" OpHelp(""), + /* 152 */ "VCreate" OpHelp(""), + /* 153 */ "VDestroy" OpHelp(""), + /* 154 */ "VOpen" OpHelp(""), + /* 155 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 156 */ "VRename" OpHelp(""), + /* 157 */ "Pagecount" OpHelp(""), + /* 158 */ "MaxPgcnt" OpHelp(""), /* 159 */ "CursorHint" OpHelp(""), /* 160 */ "Noop" OpHelp(""), /* 161 */ "Explain" OpHelp(""), @@ -27651,8 +29467,8 @@ static pid_t randomnessPid = 0; ** This file contains inline asm code for retrieving "high-performance" ** counters for x86 class CPUs. */ -#ifndef _HWTIME_H_ -#define _HWTIME_H_ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H /* ** The following routine only works on pentium-class (or newer) processors. @@ -27720,7 +29536,7 @@ SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } #endif -#endif /* !defined(_HWTIME_H_) */ +#endif /* !defined(SQLITE_HWTIME_H) */ /************** End of hwtime.h **********************************************/ /************** Continuing where we left off in os_common.h ******************/ @@ -27914,7 +29730,7 @@ static struct unix_syscall { #else { "pread64", (sqlite3_syscall_ptr)0, 0 }, #endif -#define osPread64 ((ssize_t(*)(int,void*,size_t,off_t))aSyscall[10].pCurrent) +#define osPread64 ((ssize_t(*)(int,void*,size_t,off64_t))aSyscall[10].pCurrent) { "write", (sqlite3_syscall_ptr)write, 0 }, #define osWrite ((ssize_t(*)(int,const void*,size_t))aSyscall[11].pCurrent) @@ -27932,7 +29748,7 @@ static struct unix_syscall { #else { "pwrite64", (sqlite3_syscall_ptr)0, 0 }, #endif -#define osPwrite64 ((ssize_t(*)(int,const void*,size_t,off_t))\ +#define osPwrite64 ((ssize_t(*)(int,const void*,size_t,off64_t))\ aSyscall[13].pCurrent) { "fchmod", (sqlite3_syscall_ptr)fchmod, 0 }, @@ -29011,7 +30827,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){ ** lock transitions in terms of the POSIX advisory shared and exclusive ** lock primitives (called read-locks and write-locks below, to avoid ** confusion with SQLite lock names). The algorithms are complicated - ** slightly in order to be compatible with windows systems simultaneously + ** slightly in order to be compatible with Windows95 systems simultaneously ** accessing the same database file, in case that is ever required. ** ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved @@ -29019,8 +30835,14 @@ static int unixLock(sqlite3_file *id, int eFileLock){ ** range', a range of 510 bytes at a well known offset. ** ** To obtain a SHARED lock, a read-lock is obtained on the 'pending - ** byte'. If this is successful, a random byte from the 'shared byte - ** range' is read-locked and the lock on the 'pending byte' released. + ** byte'. If this is successful, 'shared byte range' is read-locked + ** and the lock on the 'pending byte' released. (Legacy note: When + ** SQLite was first developed, Windows95 systems were still very common, + ** and Widnows95 lacks a shared-lock capability. So on Windows95, a + ** single randomly selected by from the 'shared byte range' is locked. + ** Windows95 is now pretty much extinct, but this work-around for the + ** lack of shared-locks on Windows95 lives on, for backwards + ** compatibility.) ** ** A process may only obtain a RESERVED lock after it has a SHARED lock. ** A RESERVED lock is implemented by grabbing a write-lock on the @@ -29039,11 +30861,6 @@ static int unixLock(sqlite3_file *id, int eFileLock){ ** range'. Since all other locks require a read-lock on one of the bytes ** within this range, this ensures that no other locks are held on the ** database. - ** - ** The reason a single byte cannot be used instead of the 'shared byte - ** range' is that some versions of windows do not support read-locks. By - ** locking a random byte from a range, concurrent SHARED locks may exist - ** even if the locking primitive used is always a write-lock. */ int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; @@ -31796,10 +33613,12 @@ static int unixOpenSharedMemory(unixFile *pDbFd){ pShmNode->h = -1; pDbFd->pInode->pShmNode = pShmNode; pShmNode->pInode = pDbFd->pInode; - pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); - if( pShmNode->mutex==0 ){ - rc = SQLITE_NOMEM_BKPT; - goto shm_open_err; + if( sqlite3GlobalConfig.bCoreMutex ){ + pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pShmNode->mutex==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto shm_open_err; + } } if( pInode->bProcessLock==0 ){ @@ -32918,20 +34737,24 @@ static const char *unixTempFileDir(void){ "/tmp", "." }; - unsigned int i; + unsigned int i = 0; struct stat buf; const char *zDir = sqlite3_temp_directory; if( !azDirs[0] ) azDirs[0] = getenv("SQLITE_TMPDIR"); if( !azDirs[1] ) azDirs[1] = getenv("TMPDIR"); - for(i=0; i=sizeof(azDirs)/sizeof(azDirs[0]) ) break; + zDir = azDirs[i++]; } - return zDir; + return 0; } /* @@ -32947,9 +34770,11 @@ static int unixGetTempname(int nBuf, char *zBuf){ ** using the io-error infrastructure to test that SQLite handles this ** function failing. */ + zBuf[0] = 0; SimulateIOError( return SQLITE_IOERR ); zDir = unixTempFileDir(); + if( zDir==0 ) return SQLITE_IOERR_GETTEMPPATH; do{ u64 r; sqlite3_randomness(sizeof(r), &r); @@ -35171,8 +36996,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void){ ** This file contains inline asm code for retrieving "high-performance" ** counters for x86 class CPUs. */ -#ifndef _HWTIME_H_ -#define _HWTIME_H_ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H /* ** The following routine only works on pentium-class (or newer) processors. @@ -35240,7 +37065,7 @@ SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } #endif -#endif /* !defined(_HWTIME_H_) */ +#endif /* !defined(SQLITE_HWTIME_H) */ /************** End of hwtime.h **********************************************/ /************** Continuing where we left off in os_common.h ******************/ @@ -35581,6 +37406,17 @@ struct winFile { #endif }; +/* +** The winVfsAppData structure is used for the pAppData member for all of the +** Win32 VFS variants. +*/ +typedef struct winVfsAppData winVfsAppData; +struct winVfsAppData { + const sqlite3_io_methods *pMethod; /* The file I/O methods to use. */ + void *pAppData; /* The extra pAppData, if any. */ + BOOL bNoLock; /* Non-zero if locking is disabled. */ +}; + /* ** Allowed values for winFile.ctrlFlags */ @@ -36551,8 +38387,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_win32_reset_heap(){ int rc; MUTEX_LOGIC( sqlite3_mutex *pMaster; ) /* The main static mutex */ MUTEX_LOGIC( sqlite3_mutex *pMem; ) /* The memsys static mutex */ - MUTEX_LOGIC( pMaster = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MASTER); ) - MUTEX_LOGIC( pMem = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_MEM); ) + MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) + MUTEX_LOGIC( pMem = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM); ) sqlite3_mutex_enter(pMaster); sqlite3_mutex_enter(pMem); winMemAssertMagic(); @@ -36597,6 +38433,12 @@ SQLITE_API void SQLITE_STDCALL sqlite3_win32_write_debug(const char *zBuf, int n int nMin = MIN(nBuf, (SQLITE_WIN32_DBG_BUF_SIZE - 1)); /* may be negative. */ if( nMin<-1 ) nMin = -1; /* all negative values become -1. */ assert( nMin==-1 || nMin==0 || nMin0 ){ memset(zDbgBuf, 0, SQLITE_WIN32_DBG_BUF_SIZE); @@ -36922,147 +38764,244 @@ SQLITE_PRIVATE void sqlite3MemSetDefault(void){ #endif /* SQLITE_WIN32_MALLOC */ /* -** Convert a UTF-8 string to Microsoft Unicode (UTF-16?). +** Convert a UTF-8 string to Microsoft Unicode. ** -** Space to hold the returned string is obtained from malloc. +** Space to hold the returned string is obtained from sqlite3_malloc(). */ -static LPWSTR winUtf8ToUnicode(const char *zFilename){ +static LPWSTR winUtf8ToUnicode(const char *zText){ int nChar; - LPWSTR zWideFilename; + LPWSTR zWideText; - nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, NULL, 0); + nChar = osMultiByteToWideChar(CP_UTF8, 0, zText, -1, NULL, 0); if( nChar==0 ){ return 0; } - zWideFilename = sqlite3MallocZero( nChar*sizeof(zWideFilename[0]) ); - if( zWideFilename==0 ){ + zWideText = sqlite3MallocZero( nChar*sizeof(WCHAR) ); + if( zWideText==0 ){ return 0; } - nChar = osMultiByteToWideChar(CP_UTF8, 0, zFilename, -1, zWideFilename, + nChar = osMultiByteToWideChar(CP_UTF8, 0, zText, -1, zWideText, nChar); if( nChar==0 ){ - sqlite3_free(zWideFilename); - zWideFilename = 0; + sqlite3_free(zWideText); + zWideText = 0; } - return zWideFilename; + return zWideText; } /* -** Convert Microsoft Unicode to UTF-8. Space to hold the returned string is -** obtained from sqlite3_malloc(). +** Convert a Microsoft Unicode string to UTF-8. +** +** Space to hold the returned string is obtained from sqlite3_malloc(). */ -static char *winUnicodeToUtf8(LPCWSTR zWideFilename){ +static char *winUnicodeToUtf8(LPCWSTR zWideText){ int nByte; - char *zFilename; + char *zText; - nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, 0, 0, 0, 0); + nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideText, -1, 0, 0, 0, 0); if( nByte == 0 ){ return 0; } - zFilename = sqlite3MallocZero( nByte ); - if( zFilename==0 ){ + zText = sqlite3MallocZero( nByte ); + if( zText==0 ){ return 0; } - nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideFilename, -1, zFilename, nByte, + nByte = osWideCharToMultiByte(CP_UTF8, 0, zWideText, -1, zText, nByte, 0, 0); if( nByte == 0 ){ - sqlite3_free(zFilename); - zFilename = 0; + sqlite3_free(zText); + zText = 0; } - return zFilename; + return zText; } /* -** Convert an ANSI string to Microsoft Unicode, based on the -** current codepage settings for file apis. +** Convert an ANSI string to Microsoft Unicode, using the ANSI or OEM +** code page. ** -** Space to hold the returned string is obtained -** from sqlite3_malloc. +** Space to hold the returned string is obtained from sqlite3_malloc(). */ -static LPWSTR winMbcsToUnicode(const char *zFilename){ +static LPWSTR winMbcsToUnicode(const char *zText, int useAnsi){ int nByte; - LPWSTR zMbcsFilename; - int codepage = osAreFileApisANSI() ? CP_ACP : CP_OEMCP; + LPWSTR zMbcsText; + int codepage = useAnsi ? CP_ACP : CP_OEMCP; - nByte = osMultiByteToWideChar(codepage, 0, zFilename, -1, NULL, + nByte = osMultiByteToWideChar(codepage, 0, zText, -1, NULL, 0)*sizeof(WCHAR); if( nByte==0 ){ return 0; } - zMbcsFilename = sqlite3MallocZero( nByte*sizeof(zMbcsFilename[0]) ); - if( zMbcsFilename==0 ){ + zMbcsText = sqlite3MallocZero( nByte*sizeof(WCHAR) ); + if( zMbcsText==0 ){ return 0; } - nByte = osMultiByteToWideChar(codepage, 0, zFilename, -1, zMbcsFilename, + nByte = osMultiByteToWideChar(codepage, 0, zText, -1, zMbcsText, nByte); if( nByte==0 ){ - sqlite3_free(zMbcsFilename); - zMbcsFilename = 0; + sqlite3_free(zMbcsText); + zMbcsText = 0; } - return zMbcsFilename; + return zMbcsText; } /* -** Convert Microsoft Unicode to multi-byte character string, based on the -** user's ANSI codepage. +** Convert a Microsoft Unicode string to a multi-byte character string, +** using the ANSI or OEM code page. ** -** Space to hold the returned string is obtained from -** sqlite3_malloc(). +** Space to hold the returned string is obtained from sqlite3_malloc(). */ -static char *winUnicodeToMbcs(LPCWSTR zWideFilename){ +static char *winUnicodeToMbcs(LPCWSTR zWideText, int useAnsi){ int nByte; - char *zFilename; - int codepage = osAreFileApisANSI() ? CP_ACP : CP_OEMCP; + char *zText; + int codepage = useAnsi ? CP_ACP : CP_OEMCP; - nByte = osWideCharToMultiByte(codepage, 0, zWideFilename, -1, 0, 0, 0, 0); + nByte = osWideCharToMultiByte(codepage, 0, zWideText, -1, 0, 0, 0, 0); if( nByte == 0 ){ return 0; } - zFilename = sqlite3MallocZero( nByte ); - if( zFilename==0 ){ + zText = sqlite3MallocZero( nByte ); + if( zText==0 ){ return 0; } - nByte = osWideCharToMultiByte(codepage, 0, zWideFilename, -1, zFilename, + nByte = osWideCharToMultiByte(codepage, 0, zWideText, -1, zText, nByte, 0, 0); if( nByte == 0 ){ - sqlite3_free(zFilename); - zFilename = 0; + sqlite3_free(zText); + zText = 0; } - return zFilename; + return zText; } /* -** Convert multibyte character string to UTF-8. Space to hold the -** returned string is obtained from sqlite3_malloc(). +** Convert a multi-byte character string to UTF-8. +** +** Space to hold the returned string is obtained from sqlite3_malloc(). */ -SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8(const char *zFilename){ - char *zFilenameUtf8; +static char *winMbcsToUtf8(const char *zText, int useAnsi){ + char *zTextUtf8; LPWSTR zTmpWide; - zTmpWide = winMbcsToUnicode(zFilename); + zTmpWide = winMbcsToUnicode(zText, useAnsi); if( zTmpWide==0 ){ return 0; } - zFilenameUtf8 = winUnicodeToUtf8(zTmpWide); + zTextUtf8 = winUnicodeToUtf8(zTmpWide); sqlite3_free(zTmpWide); - return zFilenameUtf8; + return zTextUtf8; } /* -** Convert UTF-8 to multibyte character string. Space to hold the -** returned string is obtained from sqlite3_malloc(). +** Convert a UTF-8 string to a multi-byte character string. +** +** Space to hold the returned string is obtained from sqlite3_malloc(). */ -SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs(const char *zFilename){ - char *zFilenameMbcs; +static char *winUtf8ToMbcs(const char *zText, int useAnsi){ + char *zTextMbcs; LPWSTR zTmpWide; - zTmpWide = winUtf8ToUnicode(zFilename); + zTmpWide = winUtf8ToUnicode(zText); if( zTmpWide==0 ){ return 0; } - zFilenameMbcs = winUnicodeToMbcs(zTmpWide); + zTextMbcs = winUnicodeToMbcs(zTmpWide, useAnsi); sqlite3_free(zTmpWide); - return zFilenameMbcs; + return zTextMbcs; +} + +/* +** This is a public wrapper for the winUtf8ToUnicode() function. +*/ +SQLITE_API LPWSTR SQLITE_STDCALL sqlite3_win32_utf8_to_unicode(const char *zText){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winUtf8ToUnicode(zText); +} + +/* +** This is a public wrapper for the winUnicodeToUtf8() function. +*/ +SQLITE_API char *SQLITE_STDCALL sqlite3_win32_unicode_to_utf8(LPCWSTR zWideText){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zWideText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winUnicodeToUtf8(zWideText); +} + +/* +** This is a public wrapper for the winMbcsToUtf8() function. +*/ +SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8(const char *zText){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winMbcsToUtf8(zText, osAreFileApisANSI()); +} + +/* +** This is a public wrapper for the winMbcsToUtf8() function. +*/ +SQLITE_API char *SQLITE_STDCALL sqlite3_win32_mbcs_to_utf8_v2(const char *zText, int useAnsi){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winMbcsToUtf8(zText, useAnsi); +} + +/* +** This is a public wrapper for the winUtf8ToMbcs() function. +*/ +SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs(const char *zText){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winUtf8ToMbcs(zText, osAreFileApisANSI()); +} + +/* +** This is a public wrapper for the winUtf8ToMbcs() function. +*/ +SQLITE_API char *SQLITE_STDCALL sqlite3_win32_utf8_to_mbcs_v2(const char *zText, int useAnsi){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !zText ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif +#ifndef SQLITE_OMIT_AUTOINIT + if( sqlite3_initialize() ) return 0; +#endif + return winUtf8ToMbcs(zText, useAnsi); } /* @@ -37164,7 +39103,7 @@ static int winGetLastErrorMsg(DWORD lastErrno, int nBuf, char *zBuf){ if( dwLen > 0 ){ /* allocate a buffer and convert to UTF8 */ sqlite3BeginBenignMalloc(); - zOut = sqlite3_win32_mbcs_to_utf8(zTemp); + zOut = winMbcsToUtf8(zTemp, osAreFileApisANSI()); sqlite3EndBenignMalloc(); /* free the system buffer allocated by FormatMessage */ osLocalFree(zTemp); @@ -37306,16 +39245,17 @@ static void winLogIoerr(int nRetry, int lineno){ } } -#if SQLITE_OS_WINCE -/************************************************************************* -** This section contains code for WinCE only. +/* +** This #if does not rely on the SQLITE_OS_WINCE define because the +** corresponding section in "date.c" cannot use it. */ -#if !defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API +#if !defined(SQLITE_OMIT_LOCALTIME) && defined(_WIN32_WCE) && \ + (!defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API) /* -** The MSVC CRT on Windows CE may not have a localtime() function. So -** create a substitute. +** The MSVC CRT on Windows CE may not have a localtime() function. +** So define a substitute. */ -/* #include */ +/* # include */ struct tm *__cdecl localtime(const time_t *t) { static struct tm y; @@ -37339,6 +39279,10 @@ struct tm *__cdecl localtime(const time_t *t) } #endif +#if SQLITE_OS_WINCE +/************************************************************************* +** This section contains code for WinCE only. +*/ #define HANDLE_TO_WINFILE(a) (winFile*)&((char*)a)[-(int)offsetof(winFile,h)] /* @@ -37794,7 +39738,12 @@ static int winClose(sqlite3_file *id){ }while( rc==0 && ++cnt < MX_CLOSE_ATTEMPT && (sqlite3_win32_sleep(100), 1) ); #if SQLITE_OS_WINCE #define WINCE_DELETION_ATTEMPTS 3 - winceDestroyLock(pFile); + { + winVfsAppData *pAppData = (winVfsAppData*)pFile->pVfs->pAppData; + if( pAppData==NULL || !pAppData->bNoLock ){ + winceDestroyLock(pFile); + } + } if( pFile->zDeleteOnClose ){ int cnt = 0; while( @@ -38352,9 +40301,8 @@ static int winLock(sqlite3_file *id, int locktype){ ** the PENDING_LOCK byte is temporary. */ newLocktype = pFile->locktype; - if( (pFile->locktype==NO_LOCK) - || ( (locktype==EXCLUSIVE_LOCK) - && (pFile->locktype==RESERVED_LOCK)) + if( pFile->locktype==NO_LOCK + || (locktype==EXCLUSIVE_LOCK && pFile->locktype<=RESERVED_LOCK) ){ int cnt = 3; while( cnt-->0 && (res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, @@ -38527,6 +40475,44 @@ static int winUnlock(sqlite3_file *id, int locktype){ return rc; } +/****************************************************************************** +****************************** No-op Locking ********************************** +** +** Of the various locking implementations available, this is by far the +** simplest: locking is ignored. No attempt is made to lock the database +** file for reading or writing. +** +** This locking mode is appropriate for use on read-only databases +** (ex: databases that are burned into CD-ROM, for example.) It can +** also be used if the application employs some external mechanism to +** prevent simultaneous access of the same database by two or more +** database connections. But there is a serious risk of database +** corruption if this locking mode is used in situations where multiple +** database connections are accessing the same database file at the same +** time and one or more of those connections are writing. +*/ + +static int winNolockLock(sqlite3_file *id, int locktype){ + UNUSED_PARAMETER(id); + UNUSED_PARAMETER(locktype); + return SQLITE_OK; +} + +static int winNolockCheckReservedLock(sqlite3_file *id, int *pResOut){ + UNUSED_PARAMETER(id); + UNUSED_PARAMETER(pResOut); + return SQLITE_OK; +} + +static int winNolockUnlock(sqlite3_file *id, int locktype){ + UNUSED_PARAMETER(id); + UNUSED_PARAMETER(locktype); + return SQLITE_OK; +} + +/******************* End of the no-op lock implementation ********************* +******************************************************************************/ + /* ** If *pArg is initially negative then this is a query. Set *pArg to ** 1 or 0 depending on whether or not bit mask of pFile->ctrlFlags is set. @@ -38805,12 +40791,12 @@ struct winShm { /* ** Apply advisory locks for all n bytes beginning at ofst. */ -#define _SHM_UNLCK 1 -#define _SHM_RDLCK 2 -#define _SHM_WRLCK 3 +#define WINSHM_UNLCK 1 +#define WINSHM_RDLCK 2 +#define WINSHM_WRLCK 3 static int winShmSystemLock( winShmNode *pFile, /* Apply locks to this open shared-memory segment */ - int lockType, /* _SHM_UNLCK, _SHM_RDLCK, or _SHM_WRLCK */ + int lockType, /* WINSHM_UNLCK, WINSHM_RDLCK, or WINSHM_WRLCK */ int ofst, /* Offset to first byte to be locked/unlocked */ int nByte /* Number of bytes to lock or unlock */ ){ @@ -38823,12 +40809,12 @@ static int winShmSystemLock( pFile->hFile.h, lockType, ofst, nByte)); /* Release/Acquire the system-level lock */ - if( lockType==_SHM_UNLCK ){ + if( lockType==WINSHM_UNLCK ){ rc = winUnlockFile(&pFile->hFile.h, ofst, 0, nByte, 0); }else{ /* Initialize the locking parameters */ DWORD dwFlags = LOCKFILE_FAIL_IMMEDIATELY; - if( lockType == _SHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; + if( lockType == WINSHM_WRLCK ) dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; rc = winLockFile(&pFile->hFile.h, dwFlags, ofst, 0, nByte, 0); } @@ -38840,7 +40826,7 @@ static int winShmSystemLock( } OSTRACE(("SHM-LOCK file=%p, func=%s, errno=%lu, rc=%s\n", - pFile->hFile.h, (lockType == _SHM_UNLCK) ? "winUnlockFile" : + pFile->hFile.h, (lockType == WINSHM_UNLCK) ? "winUnlockFile" : "winLockFile", pFile->lastErrno, sqlite3ErrName(rc))); return rc; @@ -38948,10 +40934,12 @@ static int winOpenSharedMemory(winFile *pDbFd){ pShmNode->pNext = winShmNodeList; winShmNodeList = pShmNode; - pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); - if( pShmNode->mutex==0 ){ - rc = SQLITE_IOERR_NOMEM_BKPT; - goto shm_open_err; + if( sqlite3GlobalConfig.bCoreMutex ){ + pShmNode->mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pShmNode->mutex==0 ){ + rc = SQLITE_IOERR_NOMEM_BKPT; + goto shm_open_err; + } } rc = winOpen(pDbFd->pVfs, @@ -38966,7 +40954,7 @@ static int winOpenSharedMemory(winFile *pDbFd){ /* Check to see if another process is holding the dead-man switch. ** If not, truncate the file to zero length. */ - if( winShmSystemLock(pShmNode, _SHM_WRLCK, WIN_SHM_DMS, 1)==SQLITE_OK ){ + if( winShmSystemLock(pShmNode, WINSHM_WRLCK, WIN_SHM_DMS, 1)==SQLITE_OK ){ rc = winTruncate((sqlite3_file *)&pShmNode->hFile, 0); if( rc!=SQLITE_OK ){ rc = winLogError(SQLITE_IOERR_SHMOPEN, osGetLastError(), @@ -38974,8 +40962,8 @@ static int winOpenSharedMemory(winFile *pDbFd){ } } if( rc==SQLITE_OK ){ - winShmSystemLock(pShmNode, _SHM_UNLCK, WIN_SHM_DMS, 1); - rc = winShmSystemLock(pShmNode, _SHM_RDLCK, WIN_SHM_DMS, 1); + winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); + rc = winShmSystemLock(pShmNode, WINSHM_RDLCK, WIN_SHM_DMS, 1); } if( rc ) goto shm_open_err; } @@ -39004,7 +40992,7 @@ static int winOpenSharedMemory(winFile *pDbFd){ /* Jump here on any error */ shm_open_err: - winShmSystemLock(pShmNode, _SHM_UNLCK, WIN_SHM_DMS, 1); + winShmSystemLock(pShmNode, WINSHM_UNLCK, WIN_SHM_DMS, 1); winShmPurge(pDbFd->pVfs, 0); /* This call frees pShmNode if required */ sqlite3_free(p); sqlite3_free(pNew); @@ -39093,7 +41081,7 @@ static int winShmLock( /* Unlock the system-level locks */ if( (mask & allMask)==0 ){ - rc = winShmSystemLock(pShmNode, _SHM_UNLCK, ofst+WIN_SHM_BASE, n); + rc = winShmSystemLock(pShmNode, WINSHM_UNLCK, ofst+WIN_SHM_BASE, n); }else{ rc = SQLITE_OK; } @@ -39121,7 +41109,7 @@ static int winShmLock( /* Get shared locks at the system level, if necessary */ if( rc==SQLITE_OK ){ if( (allShared & mask)==0 ){ - rc = winShmSystemLock(pShmNode, _SHM_RDLCK, ofst+WIN_SHM_BASE, n); + rc = winShmSystemLock(pShmNode, WINSHM_RDLCK, ofst+WIN_SHM_BASE, n); }else{ rc = SQLITE_OK; } @@ -39146,7 +41134,7 @@ static int winShmLock( ** also mark the local connection as being locked. */ if( rc==SQLITE_OK ){ - rc = winShmSystemLock(pShmNode, _SHM_WRLCK, ofst+WIN_SHM_BASE, n); + rc = winShmSystemLock(pShmNode, WINSHM_WRLCK, ofst+WIN_SHM_BASE, n); if( rc==SQLITE_OK ){ assert( (p->sharedMask & mask)==0 ); p->exclMask |= mask; @@ -39589,6 +41577,44 @@ static const sqlite3_io_methods winIoMethod = { winUnfetch /* xUnfetch */ }; +/* +** This vector defines all the methods that can operate on an +** sqlite3_file for win32 without performing any locking. +*/ +static const sqlite3_io_methods winIoNolockMethod = { + 3, /* iVersion */ + winClose, /* xClose */ + winRead, /* xRead */ + winWrite, /* xWrite */ + winTruncate, /* xTruncate */ + winSync, /* xSync */ + winFileSize, /* xFileSize */ + winNolockLock, /* xLock */ + winNolockUnlock, /* xUnlock */ + winNolockCheckReservedLock, /* xCheckReservedLock */ + winFileControl, /* xFileControl */ + winSectorSize, /* xSectorSize */ + winDeviceCharacteristics, /* xDeviceCharacteristics */ + winShmMap, /* xShmMap */ + winShmLock, /* xShmLock */ + winShmBarrier, /* xShmBarrier */ + winShmUnmap, /* xShmUnmap */ + winFetch, /* xFetch */ + winUnfetch /* xUnfetch */ +}; + +static winVfsAppData winAppData = { + &winIoMethod, /* pMethod */ + 0, /* pAppData */ + 0 /* bNoLock */ +}; + +static winVfsAppData winNolockAppData = { + &winIoNolockMethod, /* pMethod */ + 0, /* pAppData */ + 1 /* bNoLock */ +}; + /**************************************************************************** **************************** sqlite3_vfs methods **************************** ** @@ -39609,7 +41635,7 @@ static char *winConvertToUtf8Filename(const void *zFilename){ } #ifdef SQLITE_WIN32_HAS_ANSI else{ - zConverted = sqlite3_win32_mbcs_to_utf8(zFilename); + zConverted = winMbcsToUtf8(zFilename, osAreFileApisANSI()); } #endif /* caller will handle out of memory */ @@ -39630,7 +41656,7 @@ static void *winConvertFromUtf8Filename(const char *zFilename){ } #ifdef SQLITE_WIN32_HAS_ANSI else{ - zConverted = sqlite3_win32_utf8_to_mbcs(zFilename); + zConverted = winUtf8ToMbcs(zFilename, osAreFileApisANSI()); } #endif /* caller will handle out of memory */ @@ -39831,7 +41857,7 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){ return winLogError(SQLITE_IOERR_GETTEMPPATH, osGetLastError(), "winGetTempname3", 0); } - zUtf8 = sqlite3_win32_mbcs_to_utf8(zMbcsPath); + zUtf8 = winMbcsToUtf8(zMbcsPath, osAreFileApisANSI()); if( zUtf8 ){ sqlite3_snprintf(nMax, zBuf, "%s", zUtf8); sqlite3_free(zUtf8); @@ -39921,7 +41947,7 @@ static int winIsDir(const void *zConverted){ ** Open a file. */ static int winOpen( - sqlite3_vfs *pVfs, /* Used to get maximum path name length */ + sqlite3_vfs *pVfs, /* Used to get maximum path length and AppData */ const char *zName, /* Name of the file (UTF-8) */ sqlite3_file *id, /* Write the SQLite file handle here */ int flags, /* Open mode flags */ @@ -39936,6 +41962,7 @@ static int winOpen( #if SQLITE_OS_WINCE int isTemp = 0; #endif + winVfsAppData *pAppData; winFile *pFile = (winFile*)id; void *zConverted; /* Filename in OS encoding */ const char *zUtf8Name = zName; /* Filename in UTF-8 encoding */ @@ -40157,15 +42184,20 @@ static int winOpen( "rc=%s\n", h, zUtf8Name, dwDesiredAccess, pOutFlags, pOutFlags ? *pOutFlags : 0, (h==INVALID_HANDLE_VALUE) ? "failed" : "ok")); + pAppData = (winVfsAppData*)pVfs->pAppData; + #if SQLITE_OS_WINCE - if( isReadWrite && eType==SQLITE_OPEN_MAIN_DB - && (rc = winceCreateLock(zName, pFile))!=SQLITE_OK - ){ - osCloseHandle(h); - sqlite3_free(zConverted); - sqlite3_free(zTmpname); - OSTRACE(("OPEN-CE-LOCK name=%s, rc=%s\n", zName, sqlite3ErrName(rc))); - return rc; + { + if( isReadWrite && eType==SQLITE_OPEN_MAIN_DB + && ((pAppData==NULL) || !pAppData->bNoLock) + && (rc = winceCreateLock(zName, pFile))!=SQLITE_OK + ){ + osCloseHandle(h); + sqlite3_free(zConverted); + sqlite3_free(zTmpname); + OSTRACE(("OPEN-CE-LOCK name=%s, rc=%s\n", zName, sqlite3ErrName(rc))); + return rc; + } } if( isTemp ){ pFile->zDeleteOnClose = zConverted; @@ -40176,7 +42208,7 @@ static int winOpen( } sqlite3_free(zTmpname); - pFile->pMethod = &winIoMethod; + pFile->pMethod = pAppData ? pAppData->pMethod : &winIoMethod; pFile->pVfs = pVfs; pFile->h = h; if( isReadonly ){ @@ -40451,6 +42483,18 @@ static int winFullPathname( int nFull, /* Size of output buffer in bytes */ char *zFull /* Output buffer */ ){ +#if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) + DWORD nByte; + void *zConverted; + char *zOut; +#endif + + /* If this path name begins with "/X:", where "X" is any alphabetic + ** character, discard the initial "/" from the pathname. + */ + if( zRelative[0]=='/' && winIsDriveLetterAndColon(zRelative+1) ){ + zRelative++; + } #if defined(__CYGWIN__) SimulateIOError( return SQLITE_ERROR ); @@ -40529,17 +42573,6 @@ static int winFullPathname( #endif #if !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && !defined(__CYGWIN__) - DWORD nByte; - void *zConverted; - char *zOut; - - /* If this path name begins with "/X:", where "X" is any alphabetic - ** character, discard the initial "/" from the pathname. - */ - if( zRelative[0]=='/' && winIsDriveLetterAndColon(zRelative+1) ){ - zRelative++; - } - /* It's odd to simulate an io-error here, but really this is just ** using the io-error infrastructure to test that SQLite handles this ** function failing. This function could fail if, for example, the @@ -40609,7 +42642,7 @@ static int winFullPathname( "winFullPathname4", zRelative); } sqlite3_free(zConverted); - zOut = sqlite3_win32_mbcs_to_utf8(zTemp); + zOut = winMbcsToUtf8(zTemp, osAreFileApisANSI()); sqlite3_free(zTemp); } #endif @@ -40898,53 +42931,103 @@ static int winGetLastError(sqlite3_vfs *pVfs, int nBuf, char *zBuf){ */ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){ static sqlite3_vfs winVfs = { - 3, /* iVersion */ - sizeof(winFile), /* szOsFile */ + 3, /* iVersion */ + sizeof(winFile), /* szOsFile */ SQLITE_WIN32_MAX_PATH_BYTES, /* mxPathname */ - 0, /* pNext */ - "win32", /* zName */ - 0, /* pAppData */ - winOpen, /* xOpen */ - winDelete, /* xDelete */ - winAccess, /* xAccess */ - winFullPathname, /* xFullPathname */ - winDlOpen, /* xDlOpen */ - winDlError, /* xDlError */ - winDlSym, /* xDlSym */ - winDlClose, /* xDlClose */ - winRandomness, /* xRandomness */ - winSleep, /* xSleep */ - winCurrentTime, /* xCurrentTime */ - winGetLastError, /* xGetLastError */ - winCurrentTimeInt64, /* xCurrentTimeInt64 */ - winSetSystemCall, /* xSetSystemCall */ - winGetSystemCall, /* xGetSystemCall */ - winNextSystemCall, /* xNextSystemCall */ + 0, /* pNext */ + "win32", /* zName */ + &winAppData, /* pAppData */ + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError, /* xGetLastError */ + winCurrentTimeInt64, /* xCurrentTimeInt64 */ + winSetSystemCall, /* xSetSystemCall */ + winGetSystemCall, /* xGetSystemCall */ + winNextSystemCall, /* xNextSystemCall */ }; #if defined(SQLITE_WIN32_HAS_WIDE) static sqlite3_vfs winLongPathVfs = { - 3, /* iVersion */ - sizeof(winFile), /* szOsFile */ + 3, /* iVersion */ + sizeof(winFile), /* szOsFile */ SQLITE_WINNT_MAX_PATH_BYTES, /* mxPathname */ - 0, /* pNext */ - "win32-longpath", /* zName */ - 0, /* pAppData */ - winOpen, /* xOpen */ - winDelete, /* xDelete */ - winAccess, /* xAccess */ - winFullPathname, /* xFullPathname */ - winDlOpen, /* xDlOpen */ - winDlError, /* xDlError */ - winDlSym, /* xDlSym */ - winDlClose, /* xDlClose */ - winRandomness, /* xRandomness */ - winSleep, /* xSleep */ - winCurrentTime, /* xCurrentTime */ - winGetLastError, /* xGetLastError */ - winCurrentTimeInt64, /* xCurrentTimeInt64 */ - winSetSystemCall, /* xSetSystemCall */ - winGetSystemCall, /* xGetSystemCall */ - winNextSystemCall, /* xNextSystemCall */ + 0, /* pNext */ + "win32-longpath", /* zName */ + &winAppData, /* pAppData */ + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError, /* xGetLastError */ + winCurrentTimeInt64, /* xCurrentTimeInt64 */ + winSetSystemCall, /* xSetSystemCall */ + winGetSystemCall, /* xGetSystemCall */ + winNextSystemCall, /* xNextSystemCall */ + }; +#endif + static sqlite3_vfs winNolockVfs = { + 3, /* iVersion */ + sizeof(winFile), /* szOsFile */ + SQLITE_WIN32_MAX_PATH_BYTES, /* mxPathname */ + 0, /* pNext */ + "win32-none", /* zName */ + &winNolockAppData, /* pAppData */ + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError, /* xGetLastError */ + winCurrentTimeInt64, /* xCurrentTimeInt64 */ + winSetSystemCall, /* xSetSystemCall */ + winGetSystemCall, /* xGetSystemCall */ + winNextSystemCall, /* xNextSystemCall */ + }; +#if defined(SQLITE_WIN32_HAS_WIDE) + static sqlite3_vfs winLongPathNolockVfs = { + 3, /* iVersion */ + sizeof(winFile), /* szOsFile */ + SQLITE_WINNT_MAX_PATH_BYTES, /* mxPathname */ + 0, /* pNext */ + "win32-longpath-none", /* zName */ + &winNolockAppData, /* pAppData */ + winOpen, /* xOpen */ + winDelete, /* xDelete */ + winAccess, /* xAccess */ + winFullPathname, /* xFullPathname */ + winDlOpen, /* xDlOpen */ + winDlError, /* xDlError */ + winDlSym, /* xDlSym */ + winDlClose, /* xDlClose */ + winRandomness, /* xRandomness */ + winSleep, /* xSleep */ + winCurrentTime, /* xCurrentTime */ + winGetLastError, /* xGetLastError */ + winCurrentTimeInt64, /* xCurrentTimeInt64 */ + winSetSystemCall, /* xSetSystemCall */ + winGetSystemCall, /* xGetSystemCall */ + winNextSystemCall, /* xNextSystemCall */ }; #endif @@ -40968,6 +43051,12 @@ SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void){ sqlite3_vfs_register(&winLongPathVfs, 0); #endif + sqlite3_vfs_register(&winNolockVfs, 0); + +#if defined(SQLITE_WIN32_HAS_WIDE) + sqlite3_vfs_register(&winLongPathNolockVfs, 0); +#endif + return SQLITE_OK; } @@ -41415,7 +43504,29 @@ bitvec_end: /* #include "sqliteInt.h" */ /* -** A complete page cache is an instance of this structure. +** A complete page cache is an instance of this structure. Every +** entry in the cache holds a single page of the database file. The +** btree layer only operates on the cached copy of the database pages. +** +** A page cache entry is "clean" if it exactly matches what is currently +** on disk. A page is "dirty" if it has been modified and needs to be +** persisted to disk. +** +** pDirty, pDirtyTail, pSynced: +** All dirty pages are linked into the doubly linked list using +** PgHdr.pDirtyNext and pDirtyPrev. The list is maintained in LRU order +** such that p was added to the list more recently than p->pDirtyNext. +** PCache.pDirty points to the first (newest) element in the list and +** pDirtyTail to the last (oldest). +** +** The PCache.pSynced variable is used to optimize searching for a dirty +** page to eject from the cache mid-transaction. It is better to eject +** a page that does not require a journal sync than one that does. +** Therefore, pSynced is maintained to that it *almost* always points +** to either the oldest page in the pDirty/pDirtyTail list that has a +** clear PGHDR_NEED_SYNC flag or to a page that is older than this one +** (so that the right page to eject can be found by following pDirtyPrev +** pointers). */ struct PCache { PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ @@ -41432,6 +43543,95 @@ struct PCache { sqlite3_pcache *pCache; /* Pluggable cache module */ }; +/********************************** Test and Debug Logic **********************/ +/* +** Debug tracing macros. Enable by by changing the "0" to "1" and +** recompiling. +** +** When sqlite3PcacheTrace is 1, single line trace messages are issued. +** When sqlite3PcacheTrace is 2, a dump of the pcache showing all cache entries +** is displayed for many operations, resulting in a lot of output. +*/ +#if defined(SQLITE_DEBUG) && 0 + int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */ + int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */ +# define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;} + void pcacheDump(PCache *pCache){ + int N; + int i, j; + sqlite3_pcache_page *pLower; + PgHdr *pPg; + unsigned char *a; + + if( sqlite3PcacheTrace<2 ) return; + if( pCache->pCache==0 ) return; + N = sqlite3PcachePagecount(pCache); + if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump; + for(i=1; i<=N; i++){ + pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0); + if( pLower==0 ) continue; + pPg = (PgHdr*)pLower->pExtra; + printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags); + a = (unsigned char *)pLower->pBuf; + for(j=0; j<12; j++) printf("%02x", a[j]); + printf("\n"); + if( pPg->pPage==0 ){ + sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0); + } + } + } + #else +# define pcacheTrace(X) +# define pcacheDump(X) +#endif + +/* +** Check invariants on a PgHdr entry. Return true if everything is OK. +** Return false if any invariant is violated. +** +** This routine is for use inside of assert() statements only. For +** example: +** +** assert( sqlite3PcachePageSanity(pPg) ); +*/ +#if SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){ + PCache *pCache; + assert( pPg!=0 ); + assert( pPg->pgno>0 ); /* Page number is 1 or more */ + pCache = pPg->pCache; + assert( pCache!=0 ); /* Every page has an associated PCache */ + if( pPg->flags & PGHDR_CLEAN ){ + assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */ + assert( pCache->pDirty!=pPg ); /* CLEAN pages not on dirty list */ + assert( pCache->pDirtyTail!=pPg ); + } + /* WRITEABLE pages must also be DIRTY */ + if( pPg->flags & PGHDR_WRITEABLE ){ + assert( pPg->flags & PGHDR_DIRTY ); /* WRITEABLE implies DIRTY */ + } + /* NEED_SYNC can be set independently of WRITEABLE. This can happen, + ** for example, when using the sqlite3PagerDontWrite() optimization: + ** (1) Page X is journalled, and gets WRITEABLE and NEED_SEEK. + ** (2) Page X moved to freelist, WRITEABLE is cleared + ** (3) Page X reused, WRITEABLE is set again + ** If NEED_SYNC had been cleared in step 2, then it would not be reset + ** in step 3, and page might be written into the database without first + ** syncing the rollback journal, which might cause corruption on a power + ** loss. + ** + ** Another example is when the database page size is smaller than the + ** disk sector size. When any page of a sector is journalled, all pages + ** in that sector are marked NEED_SYNC even if they are still CLEAN, just + ** in case they are later modified, since all pages in the same sector + ** must be journalled and synced before any of those pages can be safely + ** written. + */ + return 1; +} +#endif /* SQLITE_DEBUG */ + + /********************************** Linked List Management ********************/ /* Allowed values for second argument to pcacheManageDirtyList() */ @@ -41448,17 +43648,16 @@ struct PCache { static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ PCache *p = pPage->pCache; + pcacheTrace(("%p.DIRTYLIST.%s %d\n", p, + addRemove==1 ? "REMOVE" : addRemove==2 ? "ADD" : "FRONT", + pPage->pgno)); if( addRemove & PCACHE_DIRTYLIST_REMOVE ){ assert( pPage->pDirtyNext || pPage==p->pDirtyTail ); assert( pPage->pDirtyPrev || pPage==p->pDirty ); /* Update the PCache1.pSynced variable if necessary. */ if( p->pSynced==pPage ){ - PgHdr *pSynced = pPage->pDirtyPrev; - while( pSynced && (pSynced->flags&PGHDR_NEED_SYNC) ){ - pSynced = pSynced->pDirtyPrev; - } - p->pSynced = pSynced; + p->pSynced = pPage->pDirtyPrev; } if( pPage->pDirtyNext ){ @@ -41470,10 +43669,15 @@ static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ if( pPage->pDirtyPrev ){ pPage->pDirtyPrev->pDirtyNext = pPage->pDirtyNext; }else{ + /* If there are now no dirty pages in the cache, set eCreate to 2. + ** This is an optimization that allows sqlite3PcacheFetch() to skip + ** searching for a dirty page to eject from the cache when it might + ** otherwise have to. */ assert( pPage==p->pDirty ); p->pDirty = pPage->pDirtyNext; - if( p->pDirty==0 && p->bPurgeable ){ - assert( p->eCreate==1 ); + assert( p->bPurgeable || p->eCreate==2 ); + if( p->pDirty==0 ){ /*OPTIMIZATION-IF-TRUE*/ + assert( p->bPurgeable==0 || p->eCreate==1 ); p->eCreate = 2; } } @@ -41495,10 +43699,19 @@ static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ } } p->pDirty = pPage; - if( !p->pSynced && 0==(pPage->flags&PGHDR_NEED_SYNC) ){ + + /* If pSynced is NULL and this page has a clear NEED_SYNC flag, set + ** pSynced to point to it. Checking the NEED_SYNC flag is an + ** optimization, as if pSynced points to a page with the NEED_SYNC + ** flag set sqlite3PcacheFetchStress() searches through all newer + ** entries of the dirty-list for a page with NEED_SYNC clear anyway. */ + if( !p->pSynced + && 0==(pPage->flags&PGHDR_NEED_SYNC) /*OPTIMIZATION-IF-FALSE*/ + ){ p->pSynced = pPage; } } + pcacheDump(p); } /* @@ -41507,7 +43720,9 @@ static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove){ */ static void pcacheUnpin(PgHdr *p){ if( p->pCache->bPurgeable ){ + pcacheTrace(("%p.UNPIN %d\n", p->pCache, p->pgno)); sqlite3GlobalConfig.pcache2.xUnpin(p->pCache->pCache, p->pPage, 0); + pcacheDump(p->pCache); } } @@ -41577,6 +43792,7 @@ SQLITE_PRIVATE int sqlite3PcacheOpen( p->pStress = pStress; p->szCache = 100; p->szSpill = 1; + pcacheTrace(("%p.OPEN szPage %d bPurgeable %d\n",p,szPage,bPurgeable)); return sqlite3PcacheSetPageSize(p, szPage); } @@ -41599,6 +43815,7 @@ SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *pCache, int szPage){ } pCache->pCache = pNew; pCache->szPage = szPage; + pcacheTrace(("%p.PAGESIZE %d\n",pCache,szPage)); } return SQLITE_OK; } @@ -41633,11 +43850,13 @@ SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch( int createFlag /* If true, create page if it does not exist already */ ){ int eCreate; + sqlite3_pcache_page *pRes; assert( pCache!=0 ); assert( pCache->pCache!=0 ); assert( createFlag==3 || createFlag==0 ); assert( pgno>0 ); + assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) ); /* eCreate defines what to do if the page does not exist. ** 0 Do not allocate a new page. (createFlag==0) @@ -41650,12 +43869,15 @@ SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch( assert( eCreate==0 || eCreate==1 || eCreate==2 ); assert( createFlag==0 || pCache->eCreate==eCreate ); assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); - return sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); + pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); + pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno, + createFlag?" create":"",pRes)); + return pRes; } /* ** If the sqlite3PcacheFetch() routine is unable to allocate a new -** page because new clean pages are available for reuse and the cache +** page because no clean pages are available for reuse and the cache ** size limit has been reached, then this routine can be invoked to ** try harder to allocate a page. This routine might invoke the stress ** callback to spill dirty pages to the journal. It will then try to @@ -41677,7 +43899,11 @@ SQLITE_PRIVATE int sqlite3PcacheFetchStress( ** page that does not require a journal-sync (one with PGHDR_NEED_SYNC ** cleared), but if that is not possible settle for any other ** unreferenced dirty page. - */ + ** + ** If the LRU page in the dirty list that has a clear PGHDR_NEED_SYNC + ** flag is currently referenced, then the following may leave pSynced + ** set incorrectly (pointing to other than the LRU page with NEED_SYNC + ** cleared). This is Ok, as pSynced is just an optimization. */ for(pPg=pCache->pSynced; pPg && (pPg->nRef || (pPg->flags&PGHDR_NEED_SYNC)); pPg=pPg->pDirtyPrev @@ -41695,7 +43921,9 @@ SQLITE_PRIVATE int sqlite3PcacheFetchStress( sqlite3GlobalConfig.pcache.xPagecount(pCache->pCache), numberOfCachePages(pCache)); #endif + pcacheTrace(("%p.SPILL %d\n",pCache,pPg->pgno)); rc = pCache->xStress(pCache->pStress, pPg); + pcacheDump(pCache); if( rc!=SQLITE_OK && rc!=SQLITE_BUSY ){ return rc; } @@ -41755,6 +43983,7 @@ SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish( } pCache->nRefSum++; pPgHdr->nRef++; + assert( sqlite3PcachePageSanity(pPgHdr) ); return pPgHdr; } @@ -41768,8 +43997,11 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){ if( (--p->nRef)==0 ){ if( p->flags&PGHDR_CLEAN ){ pcacheUnpin(p); - }else if( p->pDirtyPrev!=0 ){ - /* Move the page to the head of the dirty list. */ + }else if( p->pDirtyPrev!=0 ){ /*OPTIMIZATION-IF-FALSE*/ + /* Move the page to the head of the dirty list. If p->pDirtyPrev==0, + ** then page p is already at the head of the dirty list and the + ** following call would be a no-op. Hence the OPTIMIZATION-IF-FALSE + ** tag above. */ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); } } @@ -41780,6 +44012,7 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){ */ SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){ assert(p->nRef>0); + assert( sqlite3PcachePageSanity(p) ); p->nRef++; p->pCache->nRefSum++; } @@ -41791,6 +44024,7 @@ SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr *p){ */ SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){ assert( p->nRef==1 ); + assert( sqlite3PcachePageSanity(p) ); if( p->flags&PGHDR_DIRTY ){ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); } @@ -41804,13 +44038,16 @@ SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){ */ SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ assert( p->nRef>0 ); - if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ + assert( sqlite3PcachePageSanity(p) ); + if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/ p->flags &= ~PGHDR_DONT_WRITE; if( p->flags & PGHDR_CLEAN ){ p->flags ^= (PGHDR_DIRTY|PGHDR_CLEAN); + pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno)); assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); } + assert( sqlite3PcachePageSanity(p) ); } } @@ -41819,11 +44056,14 @@ SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ ** make it so. */ SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){ - if( (p->flags & PGHDR_DIRTY) ){ + assert( sqlite3PcachePageSanity(p) ); + if( ALWAYS((p->flags & PGHDR_DIRTY)!=0) ){ assert( (p->flags & PGHDR_CLEAN)==0 ); pcacheManageDirtyList(p, PCACHE_DIRTYLIST_REMOVE); p->flags &= ~(PGHDR_DIRTY|PGHDR_NEED_SYNC|PGHDR_WRITEABLE); p->flags |= PGHDR_CLEAN; + pcacheTrace(("%p.CLEAN %d\n",p->pCache,p->pgno)); + assert( sqlite3PcachePageSanity(p) ); if( p->nRef==0 ){ pcacheUnpin(p); } @@ -41835,11 +44075,24 @@ SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr *p){ */ SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache *pCache){ PgHdr *p; + pcacheTrace(("%p.CLEAN-ALL\n",pCache)); while( (p = pCache->pDirty)!=0 ){ sqlite3PcacheMakeClean(p); } } +/* +** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages. +*/ +SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache *pCache){ + PgHdr *p; + pcacheTrace(("%p.CLEAR-WRITEABLE\n",pCache)); + for(p=pCache->pDirty; p; p=p->pDirtyNext){ + p->flags &= ~(PGHDR_NEED_SYNC|PGHDR_WRITEABLE); + } + pCache->pSynced = pCache->pDirtyTail; +} + /* ** Clear the PGHDR_NEED_SYNC flag from all dirty pages. */ @@ -41858,6 +44111,8 @@ SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){ PCache *pCache = p->pCache; assert( p->nRef>0 ); assert( newPgno>0 ); + assert( sqlite3PcachePageSanity(p) ); + pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno)); sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno); p->pgno = newPgno; if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){ @@ -41878,6 +44133,7 @@ SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ if( pCache->pCache ){ PgHdr *p; PgHdr *pNext; + pcacheTrace(("%p.TRUNCATE %d\n",pCache,pgno)); for(p=pCache->pDirty; p; p=pNext){ pNext = p->pDirtyNext; /* This routine never gets call with a positive pgno except right @@ -41885,7 +44141,7 @@ SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ ** it must be that pgno==0. */ assert( p->pgno>0 ); - if( ALWAYS(p->pgno>pgno) ){ + if( p->pgno>pgno ){ assert( p->flags&PGHDR_DIRTY ); sqlite3PcacheMakeClean(p); } @@ -41908,6 +44164,7 @@ SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache *pCache, Pgno pgno){ */ SQLITE_PRIVATE void sqlite3PcacheClose(PCache *pCache){ assert( pCache->pCache!=0 ); + pcacheTrace(("%p.CLOSE\n",pCache)); sqlite3GlobalConfig.pcache2.xDestroy(pCache->pCache); } @@ -41920,29 +44177,31 @@ SQLITE_PRIVATE void sqlite3PcacheClear(PCache *pCache){ /* ** Merge two lists of pages connected by pDirty and in pgno order. -** Do not both fixing the pDirtyPrev pointers. +** Do not bother fixing the pDirtyPrev pointers. */ static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){ PgHdr result, *pTail; pTail = &result; - while( pA && pB ){ + assert( pA!=0 && pB!=0 ); + for(;;){ if( pA->pgnopgno ){ pTail->pDirty = pA; pTail = pA; pA = pA->pDirty; + if( pA==0 ){ + pTail->pDirty = pB; + break; + } }else{ pTail->pDirty = pB; pTail = pB; pB = pB->pDirty; + if( pB==0 ){ + pTail->pDirty = pA; + break; + } } } - if( pA ){ - pTail->pDirty = pA; - }else if( pB ){ - pTail->pDirty = pB; - }else{ - pTail->pDirty = 0; - } return result.pDirty; } @@ -41983,7 +44242,8 @@ static PgHdr *pcacheSortDirtyList(PgHdr *pIn){ } p = a[0]; for(i=1; ipDirty; pDirty; pDirty=pDirty->pDirtyNext) nDirty++; + return nCache ? (int)(((i64)nDirty * 100) / nCache) : 0; +} #if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG) /* @@ -42785,8 +45056,8 @@ static int pcache1Init(void *NotUsed){ #if SQLITE_THREADSAFE if( sqlite3GlobalConfig.bCoreMutex ){ - pcache1.grp.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_LRU); - pcache1.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_STATIC_PMEM); + pcache1.grp.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_LRU); + pcache1.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_PMEM); } #endif if( pcache1.separateCache @@ -43392,8 +45663,9 @@ SQLITE_PRIVATE void sqlite3PcacheStats( ** of the first SMALLEST is O(NlogN). Second and subsequent SMALLEST ** primitives are constant time. The cost of DESTROY is O(N). ** -** There is an added cost of O(N) when switching between TEST and -** SMALLEST primitives. +** TEST and SMALLEST may not be used by the same RowSet. This used to +** be possible, but the feature was not used, so it was removed in order +** to simplify the code. */ /* #include "sqliteInt.h" */ @@ -43514,7 +45786,9 @@ SQLITE_PRIVATE void sqlite3RowSetClear(RowSet *p){ */ static struct RowSetEntry *rowSetEntryAlloc(RowSet *p){ assert( p!=0 ); - if( p->nFresh==0 ){ + if( p->nFresh==0 ){ /*OPTIMIZATION-IF-FALSE*/ + /* We could allocate a fresh RowSetEntry each time one is needed, but it + ** is more efficient to pull a preallocated entry from the pool */ struct RowSetChunk *pNew; pNew = sqlite3DbMallocRawNN(p->db, sizeof(*pNew)); if( pNew==0 ){ @@ -43548,7 +45822,9 @@ SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet *p, i64 rowid){ pEntry->pRight = 0; pLast = p->pLast; if( pLast ){ - if( (p->rsFlags & ROWSET_SORTED)!=0 && rowid<=pLast->v ){ + if( rowid<=pLast->v ){ /*OPTIMIZATION-IF-FALSE*/ + /* Avoid unnecessary sorts by preserving the ROWSET_SORTED flags + ** where possible */ p->rsFlags &= ~ROWSET_SORTED; } pLast->pRight = pEntry; @@ -43572,28 +45848,26 @@ static struct RowSetEntry *rowSetEntryMerge( struct RowSetEntry *pTail; pTail = &head; - while( pA && pB ){ + assert( pA!=0 && pB!=0 ); + for(;;){ assert( pA->pRight==0 || pA->v<=pA->pRight->v ); assert( pB->pRight==0 || pB->v<=pB->pRight->v ); - if( pA->vv ){ - pTail->pRight = pA; + if( pA->v<=pB->v ){ + if( pA->vv ) pTail = pTail->pRight = pA; pA = pA->pRight; - pTail = pTail->pRight; - }else if( pB->vv ){ - pTail->pRight = pB; - pB = pB->pRight; - pTail = pTail->pRight; + if( pA==0 ){ + pTail->pRight = pB; + break; + } }else{ - pA = pA->pRight; + pTail = pTail->pRight = pB; + pB = pB->pRight; + if( pB==0 ){ + pTail->pRight = pA; + break; + } } } - if( pA ){ - assert( pA->pRight==0 || pA->v<=pA->pRight->v ); - pTail->pRight = pA; - }else{ - assert( pB==0 || pB->pRight==0 || pB->v<=pB->pRight->v ); - pTail->pRight = pB; - } return head.pRight; } @@ -43616,9 +45890,10 @@ static struct RowSetEntry *rowSetEntrySort(struct RowSetEntry *pIn){ aBucket[i] = pIn; pIn = pNext; } - pIn = 0; - for(i=0; i1 ){ /*OPTIMIZATION-IF-TRUE*/ + /* This branch causes a *balanced* tree to be generated. A valid tree + ** is still generated without this branch, but the tree is wildly + ** unbalanced and inefficient. */ + pLeft = rowSetNDeepTree(ppList, iDepth-1); + p = *ppList; + if( p==0 ){ /*OPTIMIZATION-IF-FALSE*/ + /* It is safe to always return here, but the resulting tree + ** would be unbalanced */ + return pLeft; + } + p->pLeft = pLeft; + *ppList = p->pRight; + p->pRight = rowSetNDeepTree(ppList, iDepth-1); + }else{ p = *ppList; *ppList = p->pRight; p->pLeft = p->pRight = 0; - return p; - } - pLeft = rowSetNDeepTree(ppList, iDepth-1); - p = *ppList; - if( p==0 ){ - return pLeft; } - p->pLeft = pLeft; - *ppList = p->pRight; - p->pRight = rowSetNDeepTree(ppList, iDepth-1); return p; } @@ -43713,59 +45994,37 @@ static struct RowSetEntry *rowSetListToTree(struct RowSetEntry *pList){ return p; } -/* -** Take all the entries on p->pEntry and on the trees in p->pForest and -** sort them all together into one big ordered list on p->pEntry. -** -** This routine should only be called once in the life of a RowSet. -*/ -static void rowSetToList(RowSet *p){ - - /* This routine is called only once */ - assert( p!=0 && (p->rsFlags & ROWSET_NEXT)==0 ); - - if( (p->rsFlags & ROWSET_SORTED)==0 ){ - p->pEntry = rowSetEntrySort(p->pEntry); - } - - /* While this module could theoretically support it, sqlite3RowSetNext() - ** is never called after sqlite3RowSetText() for the same RowSet. So - ** there is never a forest to deal with. Should this change, simply - ** remove the assert() and the #if 0. */ - assert( p->pForest==0 ); -#if 0 - while( p->pForest ){ - struct RowSetEntry *pTree = p->pForest->pLeft; - if( pTree ){ - struct RowSetEntry *pHead, *pTail; - rowSetTreeToList(pTree, &pHead, &pTail); - p->pEntry = rowSetEntryMerge(p->pEntry, pHead); - } - p->pForest = p->pForest->pRight; - } -#endif - p->rsFlags |= ROWSET_NEXT; /* Verify this routine is never called again */ -} - /* ** Extract the smallest element from the RowSet. ** Write the element into *pRowid. Return 1 on success. Return ** 0 if the RowSet is already empty. ** ** After this routine has been called, the sqlite3RowSetInsert() -** routine may not be called again. +** routine may not be called again. +** +** This routine may not be called after sqlite3RowSetTest() has +** been used. Older versions of RowSet allowed that, but as the +** capability was not used by the code generator, it was removed +** for code economy. */ SQLITE_PRIVATE int sqlite3RowSetNext(RowSet *p, i64 *pRowid){ assert( p!=0 ); + assert( p->pForest==0 ); /* Cannot be used with sqlite3RowSetText() */ /* Merge the forest into a single sorted list on first call */ - if( (p->rsFlags & ROWSET_NEXT)==0 ) rowSetToList(p); + if( (p->rsFlags & ROWSET_NEXT)==0 ){ /*OPTIMIZATION-IF-FALSE*/ + if( (p->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/ + p->pEntry = rowSetEntrySort(p->pEntry); + } + p->rsFlags |= ROWSET_SORTED|ROWSET_NEXT; + } /* Return the next entry on the list */ if( p->pEntry ){ *pRowid = p->pEntry->v; p->pEntry = p->pEntry->pRight; - if( p->pEntry==0 ){ + if( p->pEntry==0 ){ /*OPTIMIZATION-IF-TRUE*/ + /* Free memory immediately, rather than waiting on sqlite3_finalize() */ sqlite3RowSetClear(p); } return 1; @@ -43788,13 +46047,15 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 /* This routine is never called after sqlite3RowSetNext() */ assert( pRowSet!=0 && (pRowSet->rsFlags & ROWSET_NEXT)==0 ); - /* Sort entries into the forest on the first test of a new batch + /* Sort entries into the forest on the first test of a new batch. + ** To save unnecessary work, only do this when the batch number changes. */ - if( iBatch!=pRowSet->iBatch ){ + if( iBatch!=pRowSet->iBatch ){ /*OPTIMIZATION-IF-FALSE*/ p = pRowSet->pEntry; if( p ){ struct RowSetEntry **ppPrevTree = &pRowSet->pForest; - if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ + if( (pRowSet->rsFlags & ROWSET_SORTED)==0 ){ /*OPTIMIZATION-IF-FALSE*/ + /* Only sort the current set of entiries if they need it */ p = rowSetEntrySort(p); } for(pTree = pRowSet->pForest; pTree; pTree=pTree->pRight){ @@ -43884,8 +46145,8 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 ** the implementation of each function in log.c for further details. */ -#ifndef _WAL_H_ -#define _WAL_H_ +#ifndef SQLITE_WAL_H +#define SQLITE_WAL_H /* #include "sqliteInt.h" */ @@ -44013,7 +46274,7 @@ SQLITE_PRIVATE int sqlite3WalFramesize(Wal *pWal); SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal); #endif /* ifndef SQLITE_OMIT_WAL */ -#endif /* _WAL_H_ */ +#endif /* SQLITE_WAL_H */ /************** End of wal.h *************************************************/ /************** Continuing where we left off in pager.c **********************/ @@ -44868,6 +47129,7 @@ static int assert_pager_state(Pager *p){ ** state. */ if( MEMDB ){ + assert( !isOpen(p->fd) ); assert( p->noSync ); assert( p->journalMode==PAGER_JOURNALMODE_OFF || p->journalMode==PAGER_JOURNALMODE_MEMORY @@ -44954,7 +47216,7 @@ static int assert_pager_state(Pager *p){ ** back to OPEN state. */ assert( pPager->errCode!=SQLITE_OK ); - assert( sqlite3PcacheRefCount(pPager->pPCache)>0 ); + assert( sqlite3PcacheRefCount(pPager->pPCache)>0 || pPager->tempFile ); break; } @@ -45166,6 +47428,8 @@ static int jrnlBufferSize(Pager *pPager){ return JOURNAL_HDR_SZ(pPager) + JOURNAL_PG_SZ(pPager); } +#else +# define jrnlBufferSize(x) 0 #endif /* @@ -45814,13 +48078,17 @@ static void pager_unlock(Pager *pPager){ ** it can safely move back to PAGER_OPEN state. This happens in both ** normal and exclusive-locking mode. */ + assert( pPager->errCode==SQLITE_OK || !MEMDB ); if( pPager->errCode ){ - assert( !MEMDB ); - pager_reset(pPager); - pPager->changeCountDone = pPager->tempFile; - pPager->eState = PAGER_OPEN; - pPager->errCode = SQLITE_OK; + if( pPager->tempFile==0 ){ + pager_reset(pPager); + pPager->changeCountDone = 0; + pPager->eState = PAGER_OPEN; + }else{ + pPager->eState = (isOpen(pPager->jfd) ? PAGER_OPEN : PAGER_READER); + } if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0); + pPager->errCode = SQLITE_OK; } pPager->journalOff = 0; @@ -45864,6 +48132,29 @@ static int pager_error(Pager *pPager, int rc){ static int pager_truncate(Pager *pPager, Pgno nPage); +/* +** The write transaction open on pPager is being committed (bCommit==1) +** or rolled back (bCommit==0). +** +** Return TRUE if and only if all dirty pages should be flushed to disk. +** +** Rules: +** +** * For non-TEMP databases, always sync to disk. This is necessary +** for transactions to be durable. +** +** * Sync TEMP database only on a COMMIT (not a ROLLBACK) when the backing +** file has been created already (via a spill on pagerStress()) and +** when the number of dirty pages in memory exceeds 25% of the total +** cache size. +*/ +static int pagerFlushOnCommit(Pager *pPager, int bCommit){ + if( pPager->tempFile==0 ) return 1; + if( !bCommit ) return 0; + if( !isOpen(pPager->fd) ) return 0; + return (sqlite3PCachePercentDirty(pPager->pPCache)>=25); +} + /* ** This routine ends a transaction. A transaction is usually ended by ** either a COMMIT or a ROLLBACK operation. This routine may be called @@ -45967,7 +48258,7 @@ static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){ }else if( pPager->journalMode==PAGER_JOURNALMODE_PERSIST || (pPager->exclusiveMode && pPager->journalMode!=PAGER_JOURNALMODE_WAL) ){ - rc = zeroJournalHdr(pPager, hasMaster); + rc = zeroJournalHdr(pPager, hasMaster||pPager->tempFile); pPager->journalOff = 0; }else{ /* This branch may be executed with Pager.journalMode==MEMORY if @@ -46002,8 +48293,14 @@ static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){ sqlite3BitvecDestroy(pPager->pInJournal); pPager->pInJournal = 0; pPager->nRec = 0; - sqlite3PcacheCleanAll(pPager->pPCache); - sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); + if( rc==SQLITE_OK ){ + if( pagerFlushOnCommit(pPager, bCommit) ){ + sqlite3PcacheCleanAll(pPager->pPCache); + }else{ + sqlite3PcacheClearWritable(pPager->pPCache); + } + sqlite3PcacheTruncate(pPager->pPCache, pPager->dbSize); + } if( pagerUseWal(pPager) ){ /* Drop the WAL write-lock, if any. Also, if the connection was in @@ -46287,7 +48584,7 @@ static int pager_playback_one_page( pPg = sqlite3PagerLookup(pPager, pgno); } assert( pPg || !MEMDB ); - assert( pPager->eState!=PAGER_OPEN || pPg==0 ); + assert( pPager->eState!=PAGER_OPEN || pPg==0 || pPager->tempFile ); PAGERTRACE(("PLAYBACK %d page %d hash(%08x) %s\n", PAGERID(pPager), pgno, pager_datahash(pPager->pageSize, (u8*)aData), (isMainJrnl?"main-journal":"sub-journal") @@ -46337,7 +48634,6 @@ static int pager_playback_one_page( assert( (pPager->doNotSpill & SPILLFLAG_ROLLBACK)!=0 ); pPager->doNotSpill &= ~SPILLFLAG_ROLLBACK; if( rc!=SQLITE_OK ) return rc; - pPg->flags &= ~PGHDR_NEED_READ; sqlite3PcacheMakeDirty(pPg); } if( pPg ){ @@ -46351,29 +48647,10 @@ static int pager_playback_one_page( pData = pPg->pData; memcpy(pData, (u8*)aData, pPager->pageSize); pPager->xReiniter(pPg); - if( isMainJrnl && (!isSavepnt || *pOffset<=pPager->journalHdr) ){ - /* If the contents of this page were just restored from the main - ** journal file, then its content must be as they were when the - ** transaction was first opened. In this case we can mark the page - ** as clean, since there will be no need to write it out to the - ** database. - ** - ** There is one exception to this rule. If the page is being rolled - ** back as part of a savepoint (or statement) rollback from an - ** unsynced portion of the main journal file, then it is not safe - ** to mark the page as clean. This is because marking the page as - ** clean will clear the PGHDR_NEED_SYNC flag. Since the page is - ** already in the journal file (recorded in Pager.pInJournal) and - ** the PGHDR_NEED_SYNC flag is cleared, if the page is written to - ** again within this transaction, it will be marked as dirty but - ** the PGHDR_NEED_SYNC flag will not be set. It could then potentially - ** be written out into the database file before its journal file - ** segment is synced. If a crash occurs during or following this, - ** database corruption may ensue. - */ - assert( !pagerUseWal(pPager) ); - sqlite3PcacheMakeClean(pPg); - } + /* It used to be that sqlite3PcacheMakeClean(pPg) was called here. But + ** that call was dangerous and had no detectable benefit since the cache + ** is normally cleaned by sqlite3PcacheCleanAll() after rollback and so + ** has been removed. */ pager_set_pagehash(pPg); /* If this was page 1, then restore the value of Pager.dbFileVers. @@ -47164,6 +49441,8 @@ static int pagerPagecount(Pager *pPager, Pgno *pnPage){ */ assert( pPager->eState==PAGER_OPEN ); assert( pPager->eLock>=SHARED_LOCK ); + assert( isOpen(pPager->fd) ); + assert( pPager->tempFile==0 ); nPage = sqlite3WalDbsize(pPager->pWal); /* If the number of pages in the database is not available from the @@ -47171,14 +49450,11 @@ static int pagerPagecount(Pager *pPager, Pgno *pnPage){ ** the database file. If the size of the database file is not an ** integer multiple of the page-size, round up the result. */ - if( nPage==0 ){ + if( nPage==0 && ALWAYS(isOpen(pPager->fd)) ){ i64 n = 0; /* Size of db file in bytes */ - assert( isOpen(pPager->fd) || pPager->tempFile ); - if( isOpen(pPager->fd) ){ - int rc = sqlite3OsFileSize(pPager->fd, &n); - if( rc!=SQLITE_OK ){ - return rc; - } + int rc = sqlite3OsFileSize(pPager->fd, &n); + if( rc!=SQLITE_OK ){ + return rc; } nPage = (Pgno)((n+pPager->pageSize-1) / pPager->pageSize); } @@ -48254,8 +50530,9 @@ static int pager_write_pagelist(Pager *pPager, PgHdr *pList){ /* This function is only called for rollback pagers in WRITER_DBMOD state. */ assert( !pagerUseWal(pPager) ); - assert( pPager->eState==PAGER_WRITER_DBMOD ); + assert( pPager->tempFile || pPager->eState==PAGER_WRITER_DBMOD ); assert( pPager->eLock==EXCLUSIVE_LOCK ); + assert( isOpen(pPager->fd) || pList->pDirty==0 ); /* If the file is a temp-file has not yet been opened, open it now. It ** is not possible for rc to be other than SQLITE_OK if this branch @@ -48923,6 +51200,7 @@ static int hasHotJournal(Pager *pPager, int *pExists){ if( rc==SQLITE_OK && !locked ){ Pgno nPage; /* Number of pages in database file */ + assert( pPager->tempFile==0 ); rc = pagerPagecount(pPager, &nPage); if( rc==SQLITE_OK ){ /* If the database is zero pages in size, that means that either (1) the @@ -49015,17 +51293,17 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){ /* This routine is only called from b-tree and only when there are no ** outstanding pages. This implies that the pager state should either ** be OPEN or READER. READER is only possible if the pager is or was in - ** exclusive access mode. - */ + ** exclusive access mode. */ assert( sqlite3PcacheRefCount(pPager->pPCache)==0 ); assert( assert_pager_state(pPager) ); assert( pPager->eState==PAGER_OPEN || pPager->eState==PAGER_READER ); - if( NEVER(MEMDB && pPager->errCode) ){ return pPager->errCode; } + assert( pPager->errCode==SQLITE_OK ); if( !pagerUseWal(pPager) && pPager->eState==PAGER_OPEN ){ int bHotJournal = 1; /* True if there exists a hot journal-file */ assert( !MEMDB ); + assert( pPager->tempFile==0 || pPager->eLock==EXCLUSIVE_LOCK ); rc = pager_wait_on_lock(pPager, SHARED_LOCK); if( rc!=SQLITE_OK ){ @@ -49111,7 +51389,7 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){ assert( rc==SQLITE_OK ); rc = pagerSyncHotJournal(pPager); if( rc==SQLITE_OK ){ - rc = pager_playback(pPager, 1); + rc = pager_playback(pPager, !pPager->tempFile); pPager->eState = PAGER_OPEN; } }else if( !pPager->exclusiveMode ){ @@ -49207,7 +51485,7 @@ SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager){ rc = pagerBeginReadTransaction(pPager); } - if( pPager->eState==PAGER_OPEN && rc==SQLITE_OK ){ + if( pPager->tempFile==0 && pPager->eState==PAGER_OPEN && rc==SQLITE_OK ){ rc = pagerPagecount(pPager, &pPager->dbSize); } @@ -49340,7 +51618,7 @@ SQLITE_PRIVATE int sqlite3PagerGet( ); if( rc==SQLITE_OK && pData ){ - if( pPager->eState>PAGER_READER ){ + if( pPager->eState>PAGER_READER || pPager->tempFile ){ pPg = sqlite3PagerLookup(pPager, pgno); } if( pPg==0 ){ @@ -49407,7 +51685,8 @@ SQLITE_PRIVATE int sqlite3PagerGet( goto pager_acquire_err; } - if( MEMDB || pPager->dbSizefd) ){ + assert( !isOpen(pPager->fd) || !MEMDB ); + if( !isOpen(pPager->fd) || pPager->dbSizepPager->mxPgno ){ rc = SQLITE_FULL; goto pager_acquire_err; @@ -49549,24 +51828,24 @@ static int pager_open_journal(Pager *pPager){ if( pPager->journalMode==PAGER_JOURNALMODE_MEMORY ){ sqlite3MemJournalOpen(pPager->jfd); }else{ - const int flags = /* VFS flags to open journal file */ - SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE| - (pPager->tempFile ? - (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL): - (SQLITE_OPEN_MAIN_JOURNAL) - ); + int flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE; + int nSpill; + if( pPager->tempFile ){ + flags |= (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL); + nSpill = sqlite3Config.nStmtSpill; + }else{ + flags |= SQLITE_OPEN_MAIN_JOURNAL; + nSpill = jrnlBufferSize(pPager); + } + /* Verify that the database still has the same name as it did when ** it was originally opened. */ rc = databaseIsUnmoved(pPager); if( rc==SQLITE_OK ){ -#ifdef SQLITE_ENABLE_ATOMIC_WRITE - rc = sqlite3JournalOpen( - pVfs, pPager->zJournal, pPager->jfd, flags, jrnlBufferSize(pPager) + rc = sqlite3JournalOpen ( + pVfs, pPager->zJournal, pPager->jfd, flags, nSpill ); -#else - rc = sqlite3OsOpen(pVfs, pPager->zJournal, pPager->jfd, flags, 0); -#endif } } assert( rc!=SQLITE_OK || isOpen(pPager->jfd) ); @@ -49937,6 +52216,7 @@ SQLITE_PRIVATE int sqlite3PagerWrite(PgHdr *pPg){ if( pPager->nSavepoint ) return subjournalPageIfRequired(pPg); return SQLITE_OK; }else if( pPager->sectorSize > (u32)pPager->pageSize ){ + assert( pPager->tempFile==0 ); return pagerWriteLargeSector(pPg); }else{ return pager_write(pPg); @@ -49967,14 +52247,21 @@ SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage *pPg){ ** ** Tests show that this optimization can quadruple the speed of large ** DELETE operations. +** +** This optimization cannot be used with a temp-file, as the page may +** have been dirty at the start of the transaction. In that case, if +** memory pressure forces page pPg out of the cache, the data does need +** to be written out to disk so that it may be read back in if the +** current transaction is rolled back. */ SQLITE_PRIVATE void sqlite3PagerDontWrite(PgHdr *pPg){ Pager *pPager = pPg->pPager; - if( (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){ + if( !pPager->tempFile && (pPg->flags&PGHDR_DIRTY) && pPager->nSavepoint==0 ){ PAGERTRACE(("DONT_WRITE page %d of %d\n", pPg->pgno, PAGERID(pPager))); IOTRACE(("CLEAN %p %d\n", pPager, pPg->pgno)) pPg->flags |= PGHDR_DONT_WRITE; pPg->flags &= ~PGHDR_WRITEABLE; + testcase( pPg->flags & PGHDR_NEED_SYNC ); pager_set_pagehash(pPg); } } @@ -50169,17 +52456,21 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne( /* If a prior error occurred, report that error again. */ if( NEVER(pPager->errCode) ) return pPager->errCode; + /* Provide the ability to easily simulate an I/O error during testing */ + if( sqlite3FaultSim(400) ) return SQLITE_IOERR; + PAGERTRACE(("DATABASE SYNC: File=%s zMaster=%s nSize=%d\n", pPager->zFilename, zMaster, pPager->dbSize)); /* If no database changes have been made, return early. */ if( pPager->eStatetempFile ); + assert( isOpen(pPager->fd) || pPager->tempFile ); + if( 0==pagerFlushOnCommit(pPager, 1) ){ /* If this is an in-memory db, or no pages have been written to, or this ** function has already been called, it is mostly a no-op. However, any - ** backup in progress needs to be restarted. - */ + ** backup in progress needs to be restarted. */ sqlite3BackupRestart(pPager->pBackup); }else{ if( pagerUseWal(pPager) ){ @@ -50518,10 +52809,10 @@ SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, i } /* -** Return true if this is an in-memory pager. +** Return true if this is an in-memory or temp-file backed pager. */ SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager *pPager){ - return MEMDB; + return pPager->tempFile; } /* @@ -50801,7 +53092,8 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i /* In order to be able to rollback, an in-memory database must journal ** the page we are moving from. */ - if( MEMDB ){ + assert( pPager->tempFile || !MEMDB ); + if( pPager->tempFile ){ rc = sqlite3PagerWrite(pPg); if( rc ) return rc; } @@ -50858,7 +53150,7 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i assert( !pPgOld || pPgOld->nRef==1 ); if( pPgOld ){ pPg->flags |= (pPgOld->flags&PGHDR_NEED_SYNC); - if( MEMDB ){ + if( pPager->tempFile ){ /* Do not discard pages from an in-memory database since we might ** need to rollback later. Just move the page out of the way. */ sqlite3PcacheMove(pPgOld, pPager->dbSize+1); @@ -50875,8 +53167,7 @@ SQLITE_PRIVATE int sqlite3PagerMovepage(Pager *pPager, DbPage *pPg, Pgno pgno, i ** to exist, in case the transaction needs to roll back. Use pPgOld ** as the original page since it has already been allocated. */ - if( MEMDB ){ - assert( pPgOld ); + if( pPager->tempFile && pPgOld ){ sqlite3PcacheMove(pPgOld, origPgno); sqlite3PagerUnrefNotNull(pPgOld); } @@ -51128,10 +53419,12 @@ SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager *pPager){ ** Unless this is an in-memory or temporary database, clear the pager cache. */ SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *pPager){ - if( !MEMDB && pPager->tempFile==0 ) pager_reset(pPager); + assert( MEMDB==0 || pPager->tempFile ); + if( pPager->tempFile==0 ) pager_reset(pPager); } #endif + #ifndef SQLITE_OMIT_WAL /* ** This function is called when the user invokes "PRAGMA wal_checkpoint", @@ -51307,6 +53600,7 @@ SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager){ pPager->pageSize, (u8*)pPager->pTmpSpace); pPager->pWal = 0; pagerFixMaplimit(pPager); + if( rc && !pPager->exclusiveMode ) pagerUnlockDb(pPager, SHARED_LOCK); } } return rc; @@ -51356,7 +53650,6 @@ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){ } #endif - #endif /* SQLITE_OMIT_DISKIO */ /************** End of pager.c ***********************************************/ @@ -54472,16 +56765,21 @@ SQLITE_PRIVATE int sqlite3WalFrames( ** past the sector boundary is written after the sync. */ if( isCommit && (sync_flags & WAL_SYNC_TRANSACTIONS)!=0 ){ + int bSync = 1; if( pWal->padToSectorBoundary ){ int sectorSize = sqlite3SectorSize(pWal->pWalFd); w.iSyncPoint = ((iOffset+sectorSize-1)/sectorSize)*sectorSize; + bSync = (w.iSyncPoint==iOffset); + testcase( bSync ); while( iOffsetpSnapshot = (WalIndexHdr*)pSnapshot; } + +/* +** Return a +ve value if snapshot p1 is newer than p2. A -ve value if +** p1 is older than p2 and zero if p1 and p2 are the same snapshot. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3_snapshot_cmp(sqlite3_snapshot *p1, sqlite3_snapshot *p2){ + WalIndexHdr *pHdr1 = (WalIndexHdr*)p1; + WalIndexHdr *pHdr2 = (WalIndexHdr*)p2; + + /* aSalt[0] is a copy of the value stored in the wal file header. It + ** is incremented each time the wal file is restarted. */ + if( pHdr1->aSalt[0]aSalt[0] ) return -1; + if( pHdr1->aSalt[0]>pHdr2->aSalt[0] ) return +1; + if( pHdr1->mxFramemxFrame ) return -1; + if( pHdr1->mxFrame>pHdr2->mxFrame ) return +1; + return 0; +} #endif /* SQLITE_ENABLE_SNAPSHOT */ #ifdef SQLITE_ENABLE_ZIPVFS @@ -56251,6 +58566,15 @@ static void releasePage(MemPage *pPage); /* Forward reference */ static int cursorHoldsMutex(BtCursor *p){ return sqlite3_mutex_held(p->pBt->mutex); } + +/* Verify that the cursor and the BtShared agree about what is the current +** database connetion. This is important in shared-cache mode. If the database +** connection pointers get out-of-sync, it is possible for routines like +** btreeInitPage() to reference an stale connection pointer that references a +** a connection that has already closed. This routine is used inside assert() +** statements only and for the purpose of double-checking that the btree code +** does keep the database connection pointers up-to-date. +*/ static int cursorOwnsBtShared(BtCursor *p){ assert( cursorHoldsMutex(p) ); return (p->pBtree->db==p->pBt->db); @@ -56410,21 +58734,19 @@ static void btreeReleaseAllCursorPages(BtCursor *pCur){ ** the key. */ static int saveCursorKey(BtCursor *pCur){ - int rc; + int rc = SQLITE_OK; assert( CURSOR_VALID==pCur->eState ); assert( 0==pCur->pKey ); assert( cursorHoldsMutex(pCur) ); - rc = sqlite3BtreeKeySize(pCur, &pCur->nKey); - assert( rc==SQLITE_OK ); /* KeySize() cannot fail */ - - /* If this is an intKey table, then the above call to BtreeKeySize() - ** stores the integer key in pCur->nKey. In this case this value is - ** all that is required. Otherwise, if pCur is not open on an intKey - ** table, then malloc space for and store the pCur->nKey bytes of key - ** data. */ - if( 0==pCur->curIntKey ){ - void *pKey = sqlite3Malloc( pCur->nKey ); + if( pCur->curIntKey ){ + /* Only the rowid is required for a table btree */ + pCur->nKey = sqlite3BtreeIntegerKey(pCur); + }else{ + /* For an index btree, save the complete key content */ + void *pKey; + pCur->nKey = sqlite3BtreePayloadSize(pCur); + pKey = sqlite3Malloc( pCur->nKey ); if( pKey ){ rc = sqlite3BtreeKey(pCur, 0, (int)pCur->nKey, pKey); if( rc==SQLITE_OK ){ @@ -57477,11 +59799,11 @@ static int decodeFlags(MemPage *pPage, int flagByte){ pPage->xCellSize = cellSizePtr; pBt = pPage->pBt; if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ - /* EVIDENCE-OF: R-03640-13415 A value of 5 means the page is an interior - ** table b-tree page. */ + /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an + ** interior table b-tree page. */ assert( (PTF_LEAFDATA|PTF_INTKEY)==5 ); - /* EVIDENCE-OF: R-20501-61796 A value of 13 means the page is a leaf - ** table b-tree page. */ + /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a + ** leaf table b-tree page. */ assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 ); pPage->intKey = 1; if( pPage->leaf ){ @@ -57495,11 +59817,11 @@ static int decodeFlags(MemPage *pPage, int flagByte){ pPage->maxLocal = pBt->maxLeaf; pPage->minLocal = pBt->minLeaf; }else if( flagByte==PTF_ZERODATA ){ - /* EVIDENCE-OF: R-27225-53936 A value of 2 means the page is an interior - ** index b-tree page. */ + /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an + ** interior index b-tree page. */ assert( (PTF_ZERODATA)==2 ); - /* EVIDENCE-OF: R-16571-11615 A value of 10 means the page is a leaf - ** index b-tree page. */ + /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a + ** leaf index b-tree page. */ assert( (PTF_ZERODATA|PTF_LEAF)==10 ); pPage->intKey = 0; pPage->intKeyLeaf = 0; @@ -58131,9 +60453,9 @@ SQLITE_PRIVATE int sqlite3BtreeOpen( #if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) /* Add the new BtShared object to the linked list sharable BtShareds. */ + pBt->nRef = 1; if( p->sharable ){ MUTEX_LOGIC( sqlite3_mutex *mutexShared; ) - pBt->nRef = 1; MUTEX_LOGIC( mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);) if( SQLITE_THREADSAFE && sqlite3GlobalConfig.bCoreMutex ){ pBt->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_FAST); @@ -58204,6 +60526,7 @@ btree_open_out: assert( sqlite3_mutex_held(mutexOpen) ); sqlite3_mutex_leave(mutexOpen); } + assert( rc!=SQLITE_OK || sqlite3BtreeConnectionCount(*ppBtree)>0 ); return rc; } @@ -60063,46 +62386,33 @@ SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor *pCur){ #endif /* NDEBUG */ /* -** Set *pSize to the size of the buffer needed to hold the value of -** the key for the current entry. If the cursor is not pointing -** to a valid entry, *pSize is set to 0. -** -** For a table with the INTKEY flag set, this routine returns the key -** itself, not the number of bytes in the key. -** -** The caller must position the cursor prior to invoking this routine. -** -** This routine cannot fail. It always returns SQLITE_OK. +** Return the value of the integer key or "rowid" for a table btree. +** This routine is only valid for a cursor that is pointing into a +** ordinary table btree. If the cursor points to an index btree or +** is invalid, the result of this routine is undefined. */ -SQLITE_PRIVATE int sqlite3BtreeKeySize(BtCursor *pCur, i64 *pSize){ +SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); + assert( pCur->curIntKey ); getCellInfo(pCur); - *pSize = pCur->info.nKey; - return SQLITE_OK; + return pCur->info.nKey; } /* -** Set *pSize to the number of bytes of data in the entry the -** cursor currently points to. +** Return the number of bytes of payload for the entry that pCur is +** currently pointing to. For table btrees, this will be the amount +** of data. For index btrees, this will be the size of the key. ** ** The caller must guarantee that the cursor is pointing to a non-NULL ** valid entry. In other words, the calling procedure must guarantee ** that the cursor has Cursor.eState==CURSOR_VALID. -** -** Failure is not possible. This function always returns SQLITE_OK. -** It might just as well be a procedure (returning void) but we continue -** to return an integer result code for historical reasons. */ -SQLITE_PRIVATE int sqlite3BtreeDataSize(BtCursor *pCur, u32 *pSize){ - assert( cursorOwnsBtShared(pCur) ); +SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor *pCur){ + assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); - assert( pCur->iPage>=0 ); - assert( pCur->iPageapPage[pCur->iPage]->intKeyLeaf==1 ); getCellInfo(pCur); - *pSize = pCur->info.nPayload; - return SQLITE_OK; + return pCur->info.nPayload; } /* @@ -60544,10 +62854,7 @@ static const void *fetchPayload( ** These routines is used to get quick access to key and data ** in the common case where no overflow pages are used. */ -SQLITE_PRIVATE const void *sqlite3BtreeKeyFetch(BtCursor *pCur, u32 *pAmt){ - return fetchPayload(pCur, pAmt); -} -SQLITE_PRIVATE const void *sqlite3BtreeDataFetch(BtCursor *pCur, u32 *pAmt){ +SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor *pCur, u32 *pAmt){ return fetchPayload(pCur, pAmt); } @@ -60880,11 +63187,12 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked( assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); assert( pRes ); assert( (pIdxKey==0)==(pCur->pKeyInfo==0) ); + assert( pCur->eState!=CURSOR_VALID || (pIdxKey==0)==(pCur->curIntKey!=0) ); /* If the cursor is already positioned at the point we are trying ** to move to, then just return without doing any work */ - if( pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0 - && pCur->curIntKey + if( pIdxKey==0 + && pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0 ){ if( pCur->info.nKey==intKey ){ *pRes = 0; @@ -61873,9 +64181,7 @@ static int clearCell( static int fillInCell( MemPage *pPage, /* The page that contains the cell */ unsigned char *pCell, /* Complete text of the cell */ - const void *pKey, i64 nKey, /* The key */ - const void *pData,int nData, /* The data */ - int nZero, /* Extra zero bytes to append to pData */ + const BtreePayload *pX, /* Payload with which to construct the cell */ int *pnSize /* Write cell size here */ ){ int nPayload; @@ -61899,26 +64205,23 @@ static int fillInCell( /* Fill in the header. */ nHeader = pPage->childPtrSize; - nPayload = nData + nZero; - if( pPage->intKeyLeaf ){ + if( pPage->intKey ){ + nPayload = pX->nData + pX->nZero; + pSrc = pX->pData; + nSrc = pX->nData; + assert( pPage->intKeyLeaf ); /* fillInCell() only called for leaves */ nHeader += putVarint32(&pCell[nHeader], nPayload); + nHeader += putVarint(&pCell[nHeader], *(u64*)&pX->nKey); }else{ - assert( nData==0 ); - assert( nZero==0 ); + assert( pX->nData==0 ); + assert( pX->nZero==0 ); + assert( pX->nKey<=0x7fffffff && pX->pKey!=0 ); + nSrc = nPayload = (int)pX->nKey; + pSrc = pX->pKey; + nHeader += putVarint32(&pCell[nHeader], nPayload); } - nHeader += putVarint(&pCell[nHeader], *(u64*)&nKey); - /* Fill in the payload size */ - if( pPage->intKey ){ - pSrc = pData; - nSrc = nData; - nData = 0; - }else{ - assert( nKey<=0x7fffffff && pKey!=0 ); - nPayload = (int)nKey; - pSrc = pKey; - nSrc = (int)nKey; - } + /* Fill in the payload */ if( nPayload<=pPage->maxLocal ){ n = nHeader + nPayload; testcase( n==3 ); @@ -61956,7 +64259,7 @@ static int fillInCell( CellInfo info; pPage->xParseCell(pPage, pCell, &info); assert( nHeader==(int)(info.pPayload - pCell) ); - assert( info.nKey==nKey ); + assert( info.nKey==pX->nKey ); assert( *pnSize == info.nSize ); assert( spaceLeft == info.nLocal ); } @@ -62041,10 +64344,6 @@ static int fillInCell( pSrc += n; nSrc -= n; spaceLeft -= n; - if( nSrc==0 ){ - nSrc = nData; - pSrc = pData; - } } releasePage(pToRelease); return SQLITE_OK; @@ -62111,6 +64410,8 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){ ** in pTemp or the original pCell) and also record its index. ** Allocating a new entry in pPage->aCell[] implies that ** pPage->nOverflow is incremented. +** +** *pRC must be SQLITE_OK when this routine is called. */ static void insertCell( MemPage *pPage, /* Page into which we are copying */ @@ -62126,8 +64427,7 @@ static void insertCell( u8 *data; /* The content of the whole page */ u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */ - if( *pRC ) return; - + assert( *pRC==SQLITE_OK ); assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); assert( MX_CELL(pPage->pBt)<=10921 ); assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB ); @@ -62201,7 +64501,7 @@ static void insertCell( /* ** A CellArray object contains a cache of pointers and sizes for a -** consecutive sequence of cells that might be held multiple pages. +** consecutive sequence of cells that might be held on multiple pages. */ typedef struct CellArray CellArray; struct CellArray { @@ -62346,8 +64646,8 @@ static int pageInsertArray( u8 *pSlot; sz = cachedCellSize(pCArray, i); if( (aData[1]==0 && aData[2]==0) || (pSlot = pageFindSlot(pPg,sz,&rc))==0 ){ + if( (pData - pBegin)apCell[i] will never overlap on a well-formed @@ -62509,7 +64809,7 @@ static int editPage( for(i=0; iapCell[i+iNew]; int iOff = get2byteAligned(&pPg->aCellIdx[i*2]); - if( pCell>=aData && pCell<&aData[pPg->pBt->usableSize] ){ + if( SQLITE_WITHIN(pCell, aData, &aData[pPg->pBt->usableSize]) ){ pCell = &pTmp[pCell - aData]; } assert( 0==memcmp(pCell, &aData[iOff], @@ -62633,8 +64933,10 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ while( ((*(pOut++) = *(pCell++))&0x80) && pCellnCell, pSpace, (int)(pOut-pSpace), - 0, pPage->pgno, &rc); + if( rc==SQLITE_OK ){ + insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace), + 0, pPage->pgno, &rc); + } /* Set the right-child pointer of pParent to point to the new page. */ put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew); @@ -63154,7 +65456,7 @@ static int balance_nonroot( assert( r szLeft-(b.szCell[r]+2)) ){ + && (bBulk || szRight+b.szCell[d]+2 > szLeft-(b.szCell[r]+(i==k-1?0:2)))){ break; } szRight += b.szCell[d] + 2; @@ -63726,13 +66028,19 @@ static int balance(BtCursor *pCur){ /* -** Insert a new record into the BTree. The key is given by (pKey,nKey) -** and the data is given by (pData,nData). The cursor is used only to -** define what table the record should be inserted into. The cursor -** is left pointing at a random location. +** Insert a new record into the BTree. The content of the new record +** is described by the pX object. The pCur cursor is used only to +** define what table the record should be inserted into, and is left +** pointing at a random location. +** +** For a table btree (used for rowid tables), only the pX.nKey value of +** the key is used. The pX.pKey value must be NULL. The pX.nKey is the +** rowid or INTEGER PRIMARY KEY of the row. The pX.nData,pData,nZero fields +** hold the content of the row. ** -** For an INTKEY table, only the nKey value of the key is used. pKey is -** ignored. For a ZERODATA table, the pData and nData are both ignored. +** For an index btree (used for indexes and WITHOUT ROWID tables), the +** key is an arbitrary byte sequence stored in pX.pKey,nKey. The +** pX.pData,nData,nZero fields must be zero. ** ** If the seekResult parameter is non-zero, then a successful call to ** MovetoUnpacked() to seek cursor pCur to (pKey, nKey) has already @@ -63749,9 +66057,7 @@ static int balance(BtCursor *pCur){ */ SQLITE_PRIVATE int sqlite3BtreeInsert( BtCursor *pCur, /* Insert data into the table of this cursor */ - const void *pKey, i64 nKey, /* The key of the new record */ - const void *pData, int nData, /* The data of the new record */ - int nZero, /* Number of extra 0 bytes to append to data */ + const BtreePayload *pX, /* Content of the row to be inserted */ int appendBias, /* True if this is likely an append */ int seekResult /* Result of prior MovetoUnpacked() call */ ){ @@ -63781,7 +66087,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** keys with no associated data. If the cursor was opened expecting an ** intkey table, the caller should be inserting integer keys with a ** blob of associated data. */ - assert( (pKey==0)==(pCur->pKeyInfo==0) ); + assert( (pX->pKey==0)==(pCur->pKeyInfo==0) ); /* Save the positions of any other cursors open on this table. ** @@ -63800,38 +66106,38 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( } if( pCur->pKeyInfo==0 ){ - assert( pKey==0 ); + assert( pX->pKey==0 ); /* If this is an insert into a table b-tree, invalidate any incrblob ** cursors open on the row being replaced */ - invalidateIncrblobCursors(p, nKey, 0); + invalidateIncrblobCursors(p, pX->nKey, 0); /* If the cursor is currently on the last row and we are appending a ** new row onto the end, set the "loc" to avoid an unnecessary ** btreeMoveto() call */ - if( (pCur->curFlags&BTCF_ValidNKey)!=0 && nKey>0 - && pCur->info.nKey==nKey-1 ){ + if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey>0 + && pCur->info.nKey==pX->nKey-1 ){ loc = -1; }else if( loc==0 ){ - rc = sqlite3BtreeMovetoUnpacked(pCur, 0, nKey, appendBias, &loc); + rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, appendBias, &loc); if( rc ) return rc; } }else if( loc==0 ){ - rc = btreeMoveto(pCur, pKey, nKey, appendBias, &loc); + rc = btreeMoveto(pCur, pX->pKey, pX->nKey, appendBias, &loc); if( rc ) return rc; } assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) ); pPage = pCur->apPage[pCur->iPage]; - assert( pPage->intKey || nKey>=0 ); + assert( pPage->intKey || pX->nKey>=0 ); assert( pPage->leaf || !pPage->intKey ); TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", - pCur->pgnoRoot, nKey, nData, pPage->pgno, + pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno, loc==0 ? "overwrite" : "new entry")); assert( pPage->isInit ); newCell = pBt->pTmpSpace; assert( newCell!=0 ); - rc = fillInCell(pPage, newCell, pKey, nKey, pData, nData, nZero, &szNew); + rc = fillInCell(pPage, newCell, pX, &szNew); if( rc ) goto end_insert; assert( szNew==pPage->xCellSize(pPage, newCell) ); assert( szNew <= MX_CELL_SIZE(pBt) ); @@ -63857,6 +66163,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( assert( pPage->leaf ); } insertCell(pPage, idx, newCell, szNew, 0, 0, &rc); + assert( pPage->nOverflow==0 || rc==SQLITE_OK ); assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 ); /* If no error has occurred and pPage has an overflow cell, call balance() @@ -63880,7 +66187,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** row without seeking the cursor. This can be a big performance boost. */ pCur->info.nSize = 0; - if( rc==SQLITE_OK && pPage->nOverflow ){ + if( pPage->nOverflow ){ + assert( rc==SQLITE_OK ); pCur->curFlags &= ~(BTCF_ValidNKey); rc = balance(pCur); @@ -64016,7 +66324,9 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ pTmp = pBt->pTmpSpace; assert( pTmp!=0 ); rc = sqlite3PagerWrite(pLeaf->pDbPage); - insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc); + if( rc==SQLITE_OK ){ + insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc); + } dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc); if( rc ) return rc; } @@ -65505,6 +67815,16 @@ SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); SQLITE_PRIVATE int sqlite3BtreeSharable(Btree *p){ return p->sharable; } + +/* +** Return the number of connections to the BtShared object accessed by +** the Btree handle passed as the only argument. For private caches +** this is always 1. For shared caches it may be 1 or greater. +*/ +SQLITE_PRIVATE int sqlite3BtreeConnectionCount(Btree *p){ + testcase( p->sharable ); + return p->pBt->nRef; +} #endif /************** End of btree.c ***********************************************/ @@ -66288,10 +68608,10 @@ SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *pTo, Btree *pFrom){ ** sqlite3_backup_step(), we can guarantee that the copy finishes ** within a single call (unless an error occurs). The assert() statement ** checks this assumption - (p->rc) should be set to either SQLITE_DONE - ** or an error code. - */ + ** or an error code. */ sqlite3_backup_step(&b, 0x7FFFFFFF); assert( b.rc!=SQLITE_OK ); + rc = sqlite3_backup_finish(&b); if( rc==SQLITE_OK ){ pTo->pBt->btsFlags &= ~BTS_PAGESIZE_FIXED; @@ -67113,10 +69433,6 @@ SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem *pTo, const Mem *pFrom, int sr SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem *pTo, const Mem *pFrom){ int rc = SQLITE_OK; - /* The pFrom==0 case in the following assert() is when an sqlite3_value - ** from sqlite3_value_dup() is used as the argument - ** to sqlite3_result_value(). */ - assert( pTo->db==pFrom->db || pFrom->db==0 ); assert( (pFrom->flags & MEM_RowSet)==0 ); if( VdbeMemDynamic(pTo) ) vdbeMemClearExternAndSetNull(pTo); memcpy(pTo, pFrom, MEMCELLSIZE); @@ -67306,11 +69622,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemFromBtree( /* Note: the calls to BtreeKeyFetch() and DataFetch() below assert() ** that both the BtShared and database handle mutexes are held. */ assert( (pMem->flags & MEM_RowSet)==0 ); - if( key ){ - zData = (char *)sqlite3BtreeKeyFetch(pCur, &available); - }else{ - zData = (char *)sqlite3BtreeDataFetch(pCur, &available); - } + zData = (char *)sqlite3BtreePayloadFetch(pCur, &available); assert( zData!=0 ); if( offset+amt<=available ){ @@ -68090,20 +70402,13 @@ SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe *p, const char *z, int n, int isPrepa p->isPrepareV2 = (u8)isPrepareV2; } -/* -** Return the SQL associated with a prepared statement -*/ -SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt){ - Vdbe *p = (Vdbe *)pStmt; - return p ? p->zSql : 0; -} - /* ** Swap all content between two VDBE structures. */ SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){ Vdbe tmp, *pTmp; char *zTmp; + assert( pA->db==pB->db ); tmp = *pA; *pA = *pB; *pB = tmp; @@ -68570,73 +70875,84 @@ SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){ ** (4) Initialize the p4.xAdvance pointer on opcodes that use it. ** ** (5) Reclaim the memory allocated for storing labels. +** +** This routine will only function correctly if the mkopcodeh.tcl generator +** script numbers the opcodes correctly. Changes to this routine must be +** coordinated with changes to mkopcodeh.tcl. */ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ - int i; int nMaxArgs = *pMaxFuncArgs; Op *pOp; Parse *pParse = p->pParse; int *aLabel = pParse->aLabel; p->readOnly = 1; p->bIsReader = 0; - for(pOp=p->aOp, i=p->nOp-1; i>=0; i--, pOp++){ - u8 opcode = pOp->opcode; - - /* NOTE: Be sure to update mkopcodeh.tcl when adding or removing - ** cases from this switch! */ - switch( opcode ){ - case OP_Transaction: { - if( pOp->p2!=0 ) p->readOnly = 0; - /* fall thru */ - } - case OP_AutoCommit: - case OP_Savepoint: { - p->bIsReader = 1; - break; - } + pOp = &p->aOp[p->nOp-1]; + while(1){ + + /* Only JUMP opcodes and the short list of special opcodes in the switch + ** below need to be considered. The mkopcodeh.tcl generator script groups + ** all these opcodes together near the front of the opcode list. Skip + ** any opcode that does not need processing by virtual of the fact that + ** it is larger than SQLITE_MX_JUMP_OPCODE, as a performance optimization. + */ + if( pOp->opcode<=SQLITE_MX_JUMP_OPCODE ){ + /* NOTE: Be sure to update mkopcodeh.tcl when adding or removing + ** cases from this switch! */ + switch( pOp->opcode ){ + case OP_Transaction: { + if( pOp->p2!=0 ) p->readOnly = 0; + /* fall thru */ + } + case OP_AutoCommit: + case OP_Savepoint: { + p->bIsReader = 1; + break; + } #ifndef SQLITE_OMIT_WAL - case OP_Checkpoint: + case OP_Checkpoint: #endif - case OP_Vacuum: - case OP_JournalMode: { - p->readOnly = 0; - p->bIsReader = 1; - break; - } + case OP_Vacuum: + case OP_JournalMode: { + p->readOnly = 0; + p->bIsReader = 1; + break; + } #ifndef SQLITE_OMIT_VIRTUALTABLE - case OP_VUpdate: { - if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; - break; - } - case OP_VFilter: { - int n; - assert( p->nOp - i >= 3 ); - assert( pOp[-1].opcode==OP_Integer ); - n = pOp[-1].p1; - if( n>nMaxArgs ) nMaxArgs = n; - break; - } + case OP_VUpdate: { + if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; + break; + } + case OP_VFilter: { + int n; + assert( (pOp - p->aOp) >= 3 ); + assert( pOp[-1].opcode==OP_Integer ); + n = pOp[-1].p1; + if( n>nMaxArgs ) nMaxArgs = n; + break; + } #endif - case OP_Next: - case OP_NextIfOpen: - case OP_SorterNext: { - pOp->p4.xAdvance = sqlite3BtreeNext; - pOp->p4type = P4_ADVANCE; - break; + case OP_Next: + case OP_NextIfOpen: + case OP_SorterNext: { + pOp->p4.xAdvance = sqlite3BtreeNext; + pOp->p4type = P4_ADVANCE; + break; + } + case OP_Prev: + case OP_PrevIfOpen: { + pOp->p4.xAdvance = sqlite3BtreePrevious; + pOp->p4type = P4_ADVANCE; + break; + } } - case OP_Prev: - case OP_PrevIfOpen: { - pOp->p4.xAdvance = sqlite3BtreePrevious; - pOp->p4type = P4_ADVANCE; - break; + if( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)!=0 && pOp->p2<0 ){ + assert( ADDR(pOp->p2)nLabel ); + pOp->p2 = aLabel[ADDR(pOp->p2)]; } } - - pOp->opflags = sqlite3OpcodeProperty[opcode]; - if( (pOp->opflags & OPFLG_JUMP)!=0 && pOp->p2<0 ){ - assert( ADDR(pOp->p2)nLabel ); - pOp->p2 = aLabel[ADDR(pOp->p2)]; - } + if( pOp==p->aOp ) break; + pOp--; } sqlite3DbFree(p->db, pParse->aLabel); pParse->aLabel = 0; @@ -68805,7 +71121,7 @@ SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe *p, int addr){ ** the FuncDef is not ephermal, then do nothing. */ static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){ - if( ALWAYS(pDef) && (pDef->funcFlags & SQLITE_FUNC_EPHEM)!=0 ){ + if( (pDef->funcFlags & SQLITE_FUNC_EPHEM)!=0 ){ sqlite3DbFree(db, pDef); } } @@ -68815,53 +71131,57 @@ static void vdbeFreeOpArray(sqlite3 *, Op *, int); /* ** Delete a P4 value if necessary. */ +static SQLITE_NOINLINE void freeP4Mem(sqlite3 *db, Mem *p){ + if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc); + sqlite3DbFree(db, p); +} +static SQLITE_NOINLINE void freeP4FuncCtx(sqlite3 *db, sqlite3_context *p){ + freeEphemeralFunction(db, p->pFunc); + sqlite3DbFree(db, p); +} static void freeP4(sqlite3 *db, int p4type, void *p4){ - if( p4 ){ - assert( db ); - switch( p4type ){ - case P4_FUNCCTX: { - freeEphemeralFunction(db, ((sqlite3_context*)p4)->pFunc); - /* Fall through into the next case */ - } - case P4_REAL: - case P4_INT64: - case P4_DYNAMIC: - case P4_INTARRAY: { - sqlite3DbFree(db, p4); - break; - } - case P4_KEYINFO: { - if( db->pnBytesFreed==0 ) sqlite3KeyInfoUnref((KeyInfo*)p4); - break; - } + assert( db ); + switch( p4type ){ + case P4_FUNCCTX: { + freeP4FuncCtx(db, (sqlite3_context*)p4); + break; + } + case P4_REAL: + case P4_INT64: + case P4_DYNAMIC: + case P4_INTARRAY: { + sqlite3DbFree(db, p4); + break; + } + case P4_KEYINFO: { + if( db->pnBytesFreed==0 ) sqlite3KeyInfoUnref((KeyInfo*)p4); + break; + } #ifdef SQLITE_ENABLE_CURSOR_HINTS - case P4_EXPR: { - sqlite3ExprDelete(db, (Expr*)p4); - break; - } + case P4_EXPR: { + sqlite3ExprDelete(db, (Expr*)p4); + break; + } #endif - case P4_MPRINTF: { - if( db->pnBytesFreed==0 ) sqlite3_free(p4); - break; - } - case P4_FUNCDEF: { - freeEphemeralFunction(db, (FuncDef*)p4); - break; - } - case P4_MEM: { - if( db->pnBytesFreed==0 ){ - sqlite3ValueFree((sqlite3_value*)p4); - }else{ - Mem *p = (Mem*)p4; - if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc); - sqlite3DbFree(db, p); - } - break; - } - case P4_VTAB : { - if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4); - break; + case P4_MPRINTF: { + if( db->pnBytesFreed==0 ) sqlite3_free(p4); + break; + } + case P4_FUNCDEF: { + freeEphemeralFunction(db, (FuncDef*)p4); + break; + } + case P4_MEM: { + if( db->pnBytesFreed==0 ){ + sqlite3ValueFree((sqlite3_value*)p4); + }else{ + freeP4Mem(db, (Mem*)p4); } + break; + } + case P4_VTAB : { + if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4); + break; } } } @@ -69343,6 +71663,10 @@ static char *displayP4(Op *pOp, char *zTemp, int nTemp){ zTemp[0] = 0; break; } + case P4_TABLE: { + sqlite3XPrintf(&x, "%s", pOp->p4.pTab->zName); + break; + } default: { zP4 = pOp->p4.z; if( zP4==0 ){ @@ -71533,6 +73857,7 @@ SQLITE_PRIVATE void sqlite3VdbeRecordUnpack( pMem->db = pKeyInfo->db; /* pMem->flags = 0; // sqlite3VdbeSerialGet() will set this for us */ pMem->szMalloc = 0; + pMem->z = 0; d += sqlite3VdbeSerialGet(&aKey[d], serial_type, pMem); pMem++; if( (++u)>=p->nField ) break; @@ -72320,8 +74645,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){ ** this code can safely assume that nCellKey is 32-bits */ assert( sqlite3BtreeCursorIsValid(pCur) ); - VVA_ONLY(rc =) sqlite3BtreeKeySize(pCur, &nCellKey); - assert( rc==SQLITE_OK ); /* pCur is always valid so KeySize cannot fail */ + nCellKey = sqlite3BtreePayloadSize(pCur); assert( (nCellKey & SQLITE_MAX_U32)==(u64)nCellKey ); /* Read in the complete content of the index entry */ @@ -72398,8 +74722,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare( assert( pC->eCurType==CURTYPE_BTREE ); pCur = pC->uc.pCursor; assert( sqlite3BtreeCursorIsValid(pCur) ); - VVA_ONLY(rc =) sqlite3BtreeKeySize(pCur, &nCellKey); - assert( rc==SQLITE_OK ); /* pCur is always valid so KeySize cannot fail */ + nCellKey = sqlite3BtreePayloadSize(pCur); /* nCellKey will always be between 0 and 0xffffffff because of the way ** that btreeParseCellPtr() and sqlite3GetVarint32() are implemented */ if( nCellKey<=0 || nCellKey>0x7fffffff ){ @@ -72513,6 +74836,90 @@ SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){ } #endif /* SQLITE_OMIT_VIRTUALTABLE */ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + +/* +** If the second argument is not NULL, release any allocations associated +** with the memory cells in the p->aMem[] array. Also free the UnpackedRecord +** structure itself, using sqlite3DbFree(). +** +** This function is used to free UnpackedRecord structures allocated by +** the vdbeUnpackRecord() function found in vdbeapi.c. +*/ +static void vdbeFreeUnpacked(sqlite3 *db, UnpackedRecord *p){ + if( p ){ + int i; + for(i=0; inField; i++){ + Mem *pMem = &p->aMem[i]; + if( pMem->zMalloc ) sqlite3VdbeMemRelease(pMem); + } + sqlite3DbFree(db, p); + } +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** Invoke the pre-update hook. If this is an UPDATE or DELETE pre-update call, +** then cursor passed as the second argument should point to the row about +** to be update or deleted. If the application calls sqlite3_preupdate_old(), +** the required value will be read from the row the cursor points to. +*/ +SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( + Vdbe *v, /* Vdbe pre-update hook is invoked by */ + VdbeCursor *pCsr, /* Cursor to grab old.* values from */ + int op, /* SQLITE_INSERT, UPDATE or DELETE */ + const char *zDb, /* Database name */ + Table *pTab, /* Modified table */ + i64 iKey1, /* Initial key value */ + int iReg /* Register for new.* record */ +){ + sqlite3 *db = v->db; + i64 iKey2; + PreUpdate preupdate; + const char *zTbl = pTab->zName; + static const u8 fakeSortOrder = 0; + + assert( db->pPreUpdate==0 ); + memset(&preupdate, 0, sizeof(PreUpdate)); + if( op==SQLITE_UPDATE ){ + iKey2 = v->aMem[iReg].u.i; + }else{ + iKey2 = iKey1; + } + + assert( pCsr->nField==pTab->nCol + || (pCsr->nField==pTab->nCol+1 && op==SQLITE_DELETE && iReg==-1) + ); + + preupdate.v = v; + preupdate.pCsr = pCsr; + preupdate.op = op; + preupdate.iNewReg = iReg; + preupdate.keyinfo.db = db; + preupdate.keyinfo.enc = ENC(db); + preupdate.keyinfo.nField = pTab->nCol; + preupdate.keyinfo.aSortOrder = (u8*)&fakeSortOrder; + preupdate.iKey1 = iKey1; + preupdate.iKey2 = iKey2; + preupdate.iPKey = pTab->iPKey; + + db->pPreUpdate = &preupdate; + db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2); + db->pPreUpdate = 0; + sqlite3DbFree(db, preupdate.aRecord); + vdbeFreeUnpacked(db, preupdate.pUnpacked); + vdbeFreeUnpacked(db, preupdate.pNewUnpacked); + if( preupdate.aNew ){ + int i; + for(i=0; inField; i++){ + sqlite3VdbeMemRelease(&preupdate.aNew[i]); + } + sqlite3DbFree(db, preupdate.aNew); + } +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + /************** End of vdbeaux.c *********************************************/ /************** Begin file vdbeapi.c *****************************************/ /* @@ -72577,12 +74984,19 @@ static int vdbeSafetyNotNull(Vdbe *p){ */ static SQLITE_NOINLINE void invokeProfileCallback(sqlite3 *db, Vdbe *p){ sqlite3_int64 iNow; + sqlite3_int64 iElapse; assert( p->startTime>0 ); - assert( db->xProfile!=0 ); + assert( db->xProfile!=0 || (db->mTrace & SQLITE_TRACE_PROFILE)!=0 ); assert( db->init.busy==0 ); assert( p->zSql!=0 ); sqlite3OsCurrentTimeInt64(db->pVfs, &iNow); - db->xProfile(db->pProfileArg, p->zSql, (iNow - p->startTime)*1000000); + iElapse = (iNow - p->startTime)*1000000; + if( db->xProfile ){ + db->xProfile(db->pProfileArg, p->zSql, iElapse); + } + if( db->mTrace & SQLITE_TRACE_PROFILE ){ + db->xTrace(SQLITE_TRACE_PROFILE, db->pTraceArg, p, (void*)&iElapse); + } p->startTime = 0; } /* @@ -73086,7 +75500,8 @@ static int sqlite3Step(Vdbe *p){ ); #ifndef SQLITE_OMIT_TRACE - if( db->xProfile && !db->init.busy && p->zSql ){ + if( (db->xProfile || (db->mTrace & SQLITE_TRACE_PROFILE)!=0) + && !db->init.busy && p->zSql ){ sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime); }else{ assert( p->startTime==0 ); @@ -74121,6 +76536,219 @@ SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, i return (int)v; } +/* +** Return the SQL associated with a prepared statement +*/ +SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt){ + Vdbe *p = (Vdbe *)pStmt; + return p ? p->zSql : 0; +} + +/* +** Return the SQL associated with a prepared statement with +** bound parameters expanded. Space to hold the returned string is +** obtained from sqlite3_malloc(). The caller is responsible for +** freeing the returned string by passing it to sqlite3_free(). +** +** The SQLITE_TRACE_SIZE_LIMIT puts an upper bound on the size of +** expanded bound parameters. +*/ +SQLITE_API char *SQLITE_STDCALL sqlite3_expanded_sql(sqlite3_stmt *pStmt){ +#ifdef SQLITE_OMIT_TRACE + return 0; +#else + char *z = 0; + const char *zSql = sqlite3_sql(pStmt); + if( zSql ){ + Vdbe *p = (Vdbe *)pStmt; + sqlite3_mutex_enter(p->db->mutex); + z = sqlite3VdbeExpandSql(p, zSql); + sqlite3_mutex_leave(p->db->mutex); + } + return z; +#endif +} + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** Allocate and populate an UnpackedRecord structure based on the serialized +** record in nKey/pKey. Return a pointer to the new UnpackedRecord structure +** if successful, or a NULL pointer if an OOM error is encountered. +*/ +static UnpackedRecord *vdbeUnpackRecord( + KeyInfo *pKeyInfo, + int nKey, + const void *pKey +){ + char *dummy; /* Dummy argument for AllocUnpackedRecord() */ + UnpackedRecord *pRet; /* Return value */ + + pRet = sqlite3VdbeAllocUnpackedRecord(pKeyInfo, 0, 0, &dummy); + if( pRet ){ + memset(pRet->aMem, 0, sizeof(Mem)*(pKeyInfo->nField+1)); + sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, pRet); + } + return pRet; +} + +/* +** This function is called from within a pre-update callback to retrieve +** a field of the row currently being updated or deleted. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){ + PreUpdate *p = db->pPreUpdate; + int rc = SQLITE_OK; + + /* Test that this call is being made from within an SQLITE_DELETE or + ** SQLITE_UPDATE pre-update callback, and that iIdx is within range. */ + if( !p || p->op==SQLITE_INSERT ){ + rc = SQLITE_MISUSE_BKPT; + goto preupdate_old_out; + } + if( iIdx>=p->pCsr->nField || iIdx<0 ){ + rc = SQLITE_RANGE; + goto preupdate_old_out; + } + + /* If the old.* record has not yet been loaded into memory, do so now. */ + if( p->pUnpacked==0 ){ + u32 nRec; + u8 *aRec; + + nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor); + aRec = sqlite3DbMallocRaw(db, nRec); + if( !aRec ) goto preupdate_old_out; + rc = sqlite3BtreeData(p->pCsr->uc.pCursor, 0, nRec, aRec); + if( rc==SQLITE_OK ){ + p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec); + if( !p->pUnpacked ) rc = SQLITE_NOMEM; + } + if( rc!=SQLITE_OK ){ + sqlite3DbFree(db, aRec); + goto preupdate_old_out; + } + p->aRecord = aRec; + } + + if( iIdx>=p->pUnpacked->nField ){ + *ppValue = (sqlite3_value *)columnNullValue(); + }else{ + *ppValue = &p->pUnpacked->aMem[iIdx]; + if( iIdx==p->iPKey ){ + sqlite3VdbeMemSetInt64(*ppValue, p->iKey1); + } + } + + preupdate_old_out: + sqlite3Error(db, rc); + return sqlite3ApiExit(db, rc); +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** This function is called from within a pre-update callback to retrieve +** the number of columns in the row being updated, deleted or inserted. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_count(sqlite3 *db){ + PreUpdate *p = db->pPreUpdate; + return (p ? p->keyinfo.nField : 0); +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** This function is designed to be called from within a pre-update callback +** only. It returns zero if the change that caused the callback was made +** immediately by a user SQL statement. Or, if the change was made by a +** trigger program, it returns the number of trigger programs currently +** on the stack (1 for a top-level trigger, 2 for a trigger fired by a +** top-level trigger etc.). +** +** For the purposes of the previous paragraph, a foreign key CASCADE, SET NULL +** or SET DEFAULT action is considered a trigger. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_depth(sqlite3 *db){ + PreUpdate *p = db->pPreUpdate; + return (p ? p->v->nFrame : 0); +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** This function is called from within a pre-update callback to retrieve +** a field of the row currently being updated or inserted. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppValue){ + PreUpdate *p = db->pPreUpdate; + int rc = SQLITE_OK; + Mem *pMem; + + if( !p || p->op==SQLITE_DELETE ){ + rc = SQLITE_MISUSE_BKPT; + goto preupdate_new_out; + } + if( iIdx>=p->pCsr->nField || iIdx<0 ){ + rc = SQLITE_RANGE; + goto preupdate_new_out; + } + + if( p->op==SQLITE_INSERT ){ + /* For an INSERT, memory cell p->iNewReg contains the serialized record + ** that is being inserted. Deserialize it. */ + UnpackedRecord *pUnpack = p->pNewUnpacked; + if( !pUnpack ){ + Mem *pData = &p->v->aMem[p->iNewReg]; + rc = sqlite3VdbeMemExpandBlob(pData); + if( rc!=SQLITE_OK ) goto preupdate_new_out; + pUnpack = vdbeUnpackRecord(&p->keyinfo, pData->n, pData->z); + if( !pUnpack ){ + rc = SQLITE_NOMEM; + goto preupdate_new_out; + } + p->pNewUnpacked = pUnpack; + } + if( iIdx>=pUnpack->nField ){ + pMem = (sqlite3_value *)columnNullValue(); + }else{ + pMem = &pUnpack->aMem[iIdx]; + if( iIdx==p->iPKey ){ + sqlite3VdbeMemSetInt64(pMem, p->iKey2); + } + } + }else{ + /* For an UPDATE, memory cell (p->iNewReg+1+iIdx) contains the required + ** value. Make a copy of the cell contents and return a pointer to it. + ** It is not safe to return a pointer to the memory cell itself as the + ** caller may modify the value text encoding. + */ + assert( p->op==SQLITE_UPDATE ); + if( !p->aNew ){ + p->aNew = (Mem *)sqlite3DbMallocZero(db, sizeof(Mem) * p->pCsr->nField); + if( !p->aNew ){ + rc = SQLITE_NOMEM; + goto preupdate_new_out; + } + } + assert( iIdx>=0 && iIdxpCsr->nField ); + pMem = &p->aNew[iIdx]; + if( pMem->flags==0 ){ + if( iIdx==p->iPKey ){ + sqlite3VdbeMemSetInt64(pMem, p->iKey2); + }else{ + rc = sqlite3VdbeMemCopy(pMem, &p->v->aMem[p->iNewReg+1+iIdx]); + if( rc!=SQLITE_OK ) goto preupdate_new_out; + } + } + } + *ppValue = pMem; + + preupdate_new_out: + sqlite3Error(db, rc); + return sqlite3ApiExit(db, rc); +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + #ifdef SQLITE_ENABLE_STMT_SCANSTATUS /* ** Return status data for a single loop within query pStmt. @@ -74275,10 +76903,13 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( int i; /* Loop counter */ Mem *pVar; /* Value of a host parameter */ StrAccum out; /* Accumulate the output here */ +#ifndef SQLITE_OMIT_UTF16 + Mem utf8; /* Used to convert UTF16 parameters into UTF8 for display */ +#endif char zBase[100]; /* Initial working space */ db = p->db; - sqlite3StrAccumInit(&out, db, zBase, sizeof(zBase), + sqlite3StrAccumInit(&out, 0, zBase, sizeof(zBase), db->aLimit[SQLITE_LIMIT_LENGTH]); if( db->nVdbeExec>1 ){ while( *zRawSql ){ @@ -74329,12 +76960,14 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( int nOut; /* Number of bytes of the string text to include in output */ #ifndef SQLITE_OMIT_UTF16 u8 enc = ENC(db); - Mem utf8; if( enc!=SQLITE_UTF8 ){ memset(&utf8, 0, sizeof(utf8)); utf8.db = db; sqlite3VdbeMemSetStr(&utf8, pVar->z, pVar->n, enc, SQLITE_STATIC); - sqlite3VdbeChangeEncoding(&utf8, SQLITE_UTF8); + if( SQLITE_NOMEM==sqlite3VdbeChangeEncoding(&utf8, SQLITE_UTF8) ){ + out.accError = STRACCUM_NOMEM; + out.nAlloc = 0; + } pVar = &utf8; } #endif @@ -74376,6 +77009,7 @@ SQLITE_PRIVATE char *sqlite3VdbeExpandSql( } } } + if( out.accError ) sqlite3StrAccumReset(&out); return sqlite3StrAccumFinish(&out); } @@ -74471,6 +77105,16 @@ static void updateMaxBlobsize(Mem *p){ } #endif +/* +** This macro evaluates to true if either the update hook or the preupdate +** hook are enabled for database connect DB. +*/ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +# define HAS_UPDATE_HOOK(DB) ((DB)->xPreUpdateCallback||(DB)->xUpdateCallback) +#else +# define HAS_UPDATE_HOOK(DB) ((DB)->xUpdateCallback) +#endif + /* ** The next global variable is incremented each time the OP_Found opcode ** is executed. This is used to test whether or not the foreign key @@ -74590,7 +77234,7 @@ static VdbeCursor *allocateCursor( (eCurType==CURTYPE_BTREE?sqlite3BtreeCursorSize():0); assert( iCur>=0 && iCurnCursor ); - if( p->apCsr[iCur] ){ + if( p->apCsr[iCur] ){ /*OPTIMIZATION-IF-FALSE*/ sqlite3VdbeFreeCursor(p, p->apCsr[iCur]); p->apCsr[iCur] = 0; } @@ -74667,7 +77311,7 @@ static void applyAffinity( if( affinity>=SQLITE_AFF_NUMERIC ){ assert( affinity==SQLITE_AFF_INTEGER || affinity==SQLITE_AFF_REAL || affinity==SQLITE_AFF_NUMERIC ); - if( (pRec->flags & MEM_Int)==0 ){ + if( (pRec->flags & MEM_Int)==0 ){ /*OPTIMIZATION-IF-FALSE*/ if( (pRec->flags & MEM_Real)==0 ){ if( pRec->flags & MEM_Str ) applyNumericAffinity(pRec,1); }else{ @@ -74677,10 +77321,13 @@ static void applyAffinity( }else if( affinity==SQLITE_AFF_TEXT ){ /* Only attempt the conversion to TEXT if there is an integer or real ** representation (blob and NULL do not get converted) but no string - ** representation. - */ - if( 0==(pRec->flags&MEM_Str) && (pRec->flags&(MEM_Real|MEM_Int)) ){ - sqlite3VdbeMemStringify(pRec, enc, 1); + ** representation. It would be harmless to repeat the conversion if + ** there is already a string rep, but it is pointless to waste those + ** CPU cycles. */ + if( 0==(pRec->flags&MEM_Str) ){ /*OPTIMIZATION-IF-FALSE*/ + if( (pRec->flags&(MEM_Real|MEM_Int)) ){ + sqlite3VdbeMemStringify(pRec, enc, 1); + } } pRec->flags &= ~(MEM_Real|MEM_Int); } @@ -74895,8 +77542,8 @@ static void registerTrace(int iReg, Mem *p){ ** This file contains inline asm code for retrieving "high-performance" ** counters for x86 class CPUs. */ -#ifndef _HWTIME_H_ -#define _HWTIME_H_ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H /* ** The following routine only works on pentium-class (or newer) processors. @@ -74964,7 +77611,7 @@ SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } #endif -#endif /* !defined(_HWTIME_H_) */ +#endif /* !defined(SQLITE_HWTIME_H) */ /************** End of hwtime.h **********************************************/ /************** Continuing where we left off in vdbe.c ***********************/ @@ -75006,7 +77653,7 @@ static Mem *out2Prerelease(Vdbe *p, VdbeOp *pOp){ assert( pOp->p2<=(p->nMem+1 - p->nCursor) ); pOut = &p->aMem[pOp->p2]; memAboutToChange(p, pOut); - if( VdbeMemDynamic(pOut) ){ + if( VdbeMemDynamic(pOut) ){ /*OPTIMIZATION-IF-FALSE*/ return out2PrereleaseWithClear(pOut); }else{ pOut->flags = MEM_Int; @@ -75138,37 +77785,39 @@ SQLITE_PRIVATE int sqlite3VdbeExec( /* Sanity checking on other operands */ #ifdef SQLITE_DEBUG - assert( pOp->opflags==sqlite3OpcodeProperty[pOp->opcode] ); - if( (pOp->opflags & OPFLG_IN1)!=0 ){ - assert( pOp->p1>0 ); - assert( pOp->p1<=(p->nMem+1 - p->nCursor) ); - assert( memIsValid(&aMem[pOp->p1]) ); - assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p1]) ); - REGISTER_TRACE(pOp->p1, &aMem[pOp->p1]); - } - if( (pOp->opflags & OPFLG_IN2)!=0 ){ - assert( pOp->p2>0 ); - assert( pOp->p2<=(p->nMem+1 - p->nCursor) ); - assert( memIsValid(&aMem[pOp->p2]) ); - assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p2]) ); - REGISTER_TRACE(pOp->p2, &aMem[pOp->p2]); - } - if( (pOp->opflags & OPFLG_IN3)!=0 ){ - assert( pOp->p3>0 ); - assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); - assert( memIsValid(&aMem[pOp->p3]) ); - assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p3]) ); - REGISTER_TRACE(pOp->p3, &aMem[pOp->p3]); - } - if( (pOp->opflags & OPFLG_OUT2)!=0 ){ - assert( pOp->p2>0 ); - assert( pOp->p2<=(p->nMem+1 - p->nCursor) ); - memAboutToChange(p, &aMem[pOp->p2]); - } - if( (pOp->opflags & OPFLG_OUT3)!=0 ){ - assert( pOp->p3>0 ); - assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); - memAboutToChange(p, &aMem[pOp->p3]); + { + u8 opProperty = sqlite3OpcodeProperty[pOp->opcode]; + if( (opProperty & OPFLG_IN1)!=0 ){ + assert( pOp->p1>0 ); + assert( pOp->p1<=(p->nMem+1 - p->nCursor) ); + assert( memIsValid(&aMem[pOp->p1]) ); + assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p1]) ); + REGISTER_TRACE(pOp->p1, &aMem[pOp->p1]); + } + if( (opProperty & OPFLG_IN2)!=0 ){ + assert( pOp->p2>0 ); + assert( pOp->p2<=(p->nMem+1 - p->nCursor) ); + assert( memIsValid(&aMem[pOp->p2]) ); + assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p2]) ); + REGISTER_TRACE(pOp->p2, &aMem[pOp->p2]); + } + if( (opProperty & OPFLG_IN3)!=0 ){ + assert( pOp->p3>0 ); + assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); + assert( memIsValid(&aMem[pOp->p3]) ); + assert( sqlite3VdbeCheckMemInvariants(&aMem[pOp->p3]) ); + REGISTER_TRACE(pOp->p3, &aMem[pOp->p3]); + } + if( (opProperty & OPFLG_OUT2)!=0 ){ + assert( pOp->p2>0 ); + assert( pOp->p2<=(p->nMem+1 - p->nCursor) ); + memAboutToChange(p, &aMem[pOp->p2]); + } + if( (opProperty & OPFLG_OUT3)!=0 ){ + assert( pOp->p3>0 ); + assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); + memAboutToChange(p, &aMem[pOp->p3]); + } } #endif #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) @@ -75408,8 +78057,6 @@ case OP_HaltIfNull: { /* in3 */ ** is the same as executing Halt. */ case OP_Halt: { - const char *zType; - const char *zLogFmt; VdbeFrame *pFrame; int pcx; @@ -75438,34 +78085,28 @@ case OP_Halt: { p->rc = pOp->p1; p->errorAction = (u8)pOp->p2; p->pc = pcx; + assert( pOp->p5>=0 && pOp->p5<=4 ); if( p->rc ){ if( pOp->p5 ){ static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK", "FOREIGN KEY" }; - assert( pOp->p5>=1 && pOp->p5<=4 ); testcase( pOp->p5==1 ); testcase( pOp->p5==2 ); testcase( pOp->p5==3 ); testcase( pOp->p5==4 ); - zType = azType[pOp->p5-1]; + sqlite3VdbeError(p, "%s constraint failed", azType[pOp->p5-1]); + if( pOp->p4.z ){ + p->zErrMsg = sqlite3MPrintf(db, "%z: %s", p->zErrMsg, pOp->p4.z); + } }else{ - zType = 0; - } - assert( zType!=0 || pOp->p4.z!=0 ); - zLogFmt = "abort at %d in [%s]: %s"; - if( zType && pOp->p4.z ){ - sqlite3VdbeError(p, "%s constraint failed: %s", zType, pOp->p4.z); - }else if( pOp->p4.z ){ sqlite3VdbeError(p, "%s", pOp->p4.z); - }else{ - sqlite3VdbeError(p, "%s constraint failed", zType); } - sqlite3_log(pOp->p1, zLogFmt, pcx, p->zSql, p->zErrMsg); + sqlite3_log(pOp->p1, "abort at %d in [%s]: %s", pcx, p->zSql, p->zErrMsg); } rc = sqlite3VdbeHalt(p); assert( rc==SQLITE_BUSY || rc==SQLITE_OK || rc==SQLITE_ERROR ); if( rc==SQLITE_BUSY ){ - p->rc = rc = SQLITE_BUSY; + p->rc = SQLITE_BUSY; }else{ assert( rc==SQLITE_OK || (p->rc&0xff)==SQLITE_CONSTRAINT ); assert( rc==SQLITE_OK || db->nDeferredCons>0 || db->nDeferredImmCons>0 ); @@ -75531,10 +78172,7 @@ case OP_String8: { /* same as TK_STRING, out2 */ #ifndef SQLITE_OMIT_UTF16 if( encoding!=SQLITE_UTF8 ){ rc = sqlite3VdbeMemSetStr(pOut, pOp->p4.z, -1, SQLITE_UTF8, SQLITE_STATIC); - if( rc ){ - assert( rc==SQLITE_TOOBIG ); /* This is the only possible error here */ - goto too_big; - } + assert( rc==SQLITE_OK || rc==SQLITE_TOOBIG ); if( SQLITE_OK!=sqlite3VdbeChangeEncoding(pOut, encoding) ) goto no_mem; assert( pOut->szMalloc>0 && pOut->zMalloc==pOut->z ); assert( VdbeMemDynamic(pOut)==0 ); @@ -75547,10 +78185,12 @@ case OP_String8: { /* same as TK_STRING, out2 */ pOp->p4.z = pOut->z; pOp->p1 = pOut->n; } + testcase( rc==SQLITE_TOOBIG ); #endif if( pOp->p1>db->aLimit[SQLITE_LIMIT_LENGTH] ){ goto too_big; } + assert( rc==SQLITE_OK ); /* Fall through to the next case, OP_String */ } @@ -75559,10 +78199,12 @@ case OP_String8: { /* same as TK_STRING, out2 */ ** ** The string value P4 of length P1 (bytes) is stored in register P2. ** -** If P5!=0 and the content of register P3 is greater than zero, then +** If P3 is not zero and the content of register P3 is equal to P5, then ** the datatype of the register P2 is converted to BLOB. The content is ** the same sequence of bytes, it is merely interpreted as a BLOB instead -** of a string, as if it had been CAST. +** of a string, as if it had been CAST. In other words: +** +** if( P3!=0 and reg[P3]==P5 ) reg[P2] := CAST(reg[P2] as BLOB) */ case OP_String: { /* out2 */ assert( pOp->p4.z!=0 ); @@ -75573,12 +78215,11 @@ case OP_String: { /* out2 */ pOut->enc = encoding; UPDATE_MAX_BLOBSIZE(pOut); #ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS - if( pOp->p5 ){ - assert( pOp->p3>0 ); + if( pOp->p3>0 ){ assert( pOp->p3<=(p->nMem+1 - p->nCursor) ); pIn3 = &aMem[pOp->p3]; assert( pIn3->flags & MEM_Int ); - if( pIn3->u.i ) pOut->flags = MEM_Blob|MEM_Static|MEM_Term; + if( pIn3->u.i==pOp->p5 ) pOut->flags = MEM_Blob|MEM_Static|MEM_Term; } #endif break; @@ -75850,6 +78491,10 @@ case OP_ResultRow: { } if( db->mallocFailed ) goto no_mem; + if( db->mTrace & SQLITE_TRACE_ROW ){ + db->xTrace(SQLITE_TRACE_ROW, db->pTraceArg, p, 0); + } + /* Return SQLITE_ROW */ p->pc = (int)(pOp - aOp) + 1; @@ -76480,11 +79125,14 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ /* Neither operand is NULL. Do a comparison. */ affinity = pOp->p5 & SQLITE_AFF_MASK; if( affinity>=SQLITE_AFF_NUMERIC ){ - if( (flags1 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ - applyNumericAffinity(pIn1,0); - } - if( (flags3 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ - applyNumericAffinity(pIn3,0); + if( (flags1 | flags3)&MEM_Str ){ + if( (flags1 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ + applyNumericAffinity(pIn1,0); + flags3 = pIn3->flags; + } + if( (flags3 & (MEM_Int|MEM_Real|MEM_Str))==MEM_Str ){ + applyNumericAffinity(pIn3,0); + } } }else if( affinity==SQLITE_AFF_TEXT ){ if( (flags1 & MEM_Str)==0 && (flags1 & (MEM_Int|MEM_Real))!=0 ){ @@ -76493,6 +79141,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ sqlite3VdbeMemStringify(pIn1, encoding, 1); testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) ); flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask); + flags3 = pIn3->flags; } if( (flags3 & MEM_Str)==0 && (flags3 & (MEM_Int|MEM_Real))!=0 ){ testcase( pIn3->flags & MEM_Int ); @@ -76845,7 +79494,6 @@ case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */ ** skipped for length() and all content loading can be skipped for typeof(). */ case OP_Column: { - i64 payloadSize64; /* Number of bytes in the record */ int p2; /* column number to retrieve */ VdbeCursor *pC; /* The VDBE cursor */ BtCursor *pCrsr; /* The BTree cursor */ @@ -76868,6 +79516,7 @@ case OP_Column: { /* If the cursor cache is stale, bring it up-to-date */ rc = sqlite3VdbeCursorMoveto(&pC, &p2); + if( rc ) goto abort_due_to_error; assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) ); pDest = &aMem[pOp->p3]; @@ -76881,8 +79530,7 @@ case OP_Column: { assert( pC->eCurType!=CURTYPE_SORTER ); pCrsr = pC->uc.pCursor; - if( rc ) goto abort_due_to_error; - if( pC->cacheStatus!=p->cacheCtr ){ + if( pC->cacheStatus!=p->cacheCtr ){ /*OPTIMIZATION-IF-FALSE*/ if( pC->nullRow ){ if( pC->eCurType==CURTYPE_PSEUDO ){ assert( pC->uc.pseudoTableReg>0 ); @@ -76898,22 +79546,9 @@ case OP_Column: { }else{ assert( pC->eCurType==CURTYPE_BTREE ); assert( pCrsr ); - if( pC->isTable==0 ){ - assert( sqlite3BtreeCursorIsValid(pCrsr) ); - VVA_ONLY(rc =) sqlite3BtreeKeySize(pCrsr, &payloadSize64); - assert( rc==SQLITE_OK ); /* True because of CursorMoveto() call above */ - /* sqlite3BtreeParseCellPtr() uses getVarint32() to extract the - ** payload size, so it is impossible for payloadSize64 to be - ** larger than 32 bits. */ - assert( (payloadSize64 & SQLITE_MAX_U32)==(u64)payloadSize64 ); - pC->aRow = sqlite3BtreeKeyFetch(pCrsr, &avail); - pC->payloadSize = (u32)payloadSize64; - }else{ - assert( sqlite3BtreeCursorIsValid(pCrsr) ); - VVA_ONLY(rc =) sqlite3BtreeDataSize(pCrsr, &pC->payloadSize); - assert( rc==SQLITE_OK ); /* DataSize() cannot fail */ - pC->aRow = sqlite3BtreeDataFetch(pCrsr, &avail); - } + assert( sqlite3BtreeCursorIsValid(pCrsr) ); + pC->payloadSize = sqlite3BtreePayloadSize(pCrsr); + pC->aRow = sqlite3BtreePayloadFetch(pCrsr, &avail); assert( avail<=65536 ); /* Maximum page size is 64KiB */ if( pC->payloadSize <= (u32)avail ){ pC->szRow = pC->payloadSize; @@ -76929,7 +79564,7 @@ case OP_Column: { aOffset[0] = offset; - if( availaRow does not have to hold the entire row, but it does at least ** need to cover the header of the record. If pC->aRow does not contain ** the complete header, then set it to zero, forcing the header to be @@ -76950,14 +79585,15 @@ case OP_Column: { rc = SQLITE_CORRUPT_BKPT; goto abort_due_to_error; } + }else if( offset>0 ){ /*OPTIMIZATION-IF-TRUE*/ + /* The following goto is an optimization. It can be omitted and + ** everything will still work. But OP_Column is measurably faster + ** by skipping the subsequent conditional, which is always true. + */ + zData = pC->aRow; + assert( pC->nHdrParsed<=p2 ); /* Conditional skipped */ + goto op_column_read_header; } - - /* The following goto is an optimization. It can be omitted and - ** everything will still work. But OP_Column is measurably faster - ** by skipping the subsequent conditional, which is always true. - */ - assert( pC->nHdrParsed<=p2 ); /* Conditional skipped */ - goto op_column_read_header; } /* Make sure at least the first p2+1 entries of the header have been @@ -76967,7 +79603,6 @@ case OP_Column: { /* If there is more header available for parsing in the record, try ** to extract additional fields up through the p2+1-th field */ - op_column_read_header: if( pC->iHdrOffsetaRow==0 ){ @@ -76980,11 +79615,11 @@ case OP_Column: { } /* Fill in pC->aType[i] and aOffset[i] values through the p2-th field. */ + op_column_read_header: i = pC->nHdrParsed; offset64 = aOffset[i]; zHdr = zData + pC->iHdrOffset; zEndHdr = zData + aOffset[0]; - assert( i<=p2 && zHdraType[i++] = t; aOffset[i] = (u32)(offset64 & 0xffffffff); }while( i<=p2 && zHdrnHdrParsed = i; - pC->iHdrOffset = (u32)(zHdr - zData); - + /* The record is corrupt if any of the following are true: ** (1) the bytes of the header extend past the declared header size ** (2) the entire header was used but not all data was used @@ -77011,8 +79644,10 @@ case OP_Column: { rc = SQLITE_CORRUPT_BKPT; goto abort_due_to_error; } - if( pC->aRow==0 ) sqlite3VdbeMemRelease(&sMem); + pC->nHdrParsed = i; + pC->iHdrOffset = (u32)(zHdr - zData); + if( pC->aRow==0 ) sqlite3VdbeMemRelease(&sMem); }else{ t = 0; } @@ -77040,9 +79675,10 @@ case OP_Column: { assert( p2nHdrParsed ); assert( rc==SQLITE_OK ); assert( sqlite3VdbeCheckMemInvariants(pDest) ); - if( VdbeMemDynamic(pDest) ) sqlite3VdbeMemSetNull(pDest); + if( VdbeMemDynamic(pDest) ){ + sqlite3VdbeMemSetNull(pDest); + } assert( t==pC->aType[p2] ); - pDest->enc = encoding; if( pC->szRow>=aOffset[p2+1] ){ /* This is the common case where the desired content fits on the original ** page - where the content is not on an overflow page */ @@ -77056,6 +79692,7 @@ case OP_Column: { */ static const u16 aFlag[] = { MEM_Blob, MEM_Str|MEM_Term }; pDest->n = len = (t-12)/2; + pDest->enc = encoding; if( pDest->szMalloc < len+2 ){ pDest->flags = MEM_Null; if( sqlite3VdbeMemGrow(pDest, len+2, 0) ) goto no_mem; @@ -77068,6 +79705,7 @@ case OP_Column: { pDest->flags = aFlag[t&1]; } }else{ + pDest->enc = encoding; /* This branch happens only when content is on overflow pages */ if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0 && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0)) @@ -77217,7 +79855,9 @@ case OP_MakeRecord: { testcase( serial_type==127 ); testcase( serial_type==128 ); nHdr += serial_type<=127 ? 1 : sqlite3VarintLen(serial_type); - }while( (--pRec)>=pData0 ); + if( pRec==pData0 ) break; + pRec--; + }while(1); /* EVIDENCE-OF: R-22564-11647 The header begins with a single varint ** which determines the total number of bytes in the header. The varint @@ -77365,7 +80005,7 @@ case OP_Savepoint: { }else{ db->nSavepoint++; } - + /* Link the new savepoint into the database handle's list. */ pNew->pNext = db->pSavepoint; db->pSavepoint = pNew; @@ -78483,6 +81123,30 @@ case OP_Found: { /* jump, in3 */ break; } +/* Opcode: SeekRowid P1 P2 P3 * * +** Synopsis: intkey=r[P3] +** +** P1 is the index of a cursor open on an SQL table btree (with integer +** keys). If register P3 does not contain an integer or if P1 does not +** contain a record with rowid P3 then jump immediately to P2. +** Or, if P2 is 0, raise an SQLITE_CORRUPT error. If P1 does contain +** a record with rowid P3 then +** leave the cursor pointing at that record and fall through to the next +** instruction. +** +** The OP_NotExists opcode performs the same operation, but with OP_NotExists +** the P3 register must be guaranteed to contain an integer value. With this +** opcode, register P3 might not contain an integer. +** +** The OP_NotFound opcode performs the same operation on index btrees +** (with arbitrary multi-value keys). +** +** This opcode leaves the cursor in a state where it cannot be advanced +** in either direction. In other words, the Next and Prev opcodes will +** not work following this opcode. +** +** See also: Found, NotFound, NoConflict, SeekRowid +*/ /* Opcode: NotExists P1 P2 P3 * * ** Synopsis: intkey=r[P3] ** @@ -78493,6 +81157,10 @@ case OP_Found: { /* jump, in3 */ ** leave the cursor pointing at that record and fall through to the next ** instruction. ** +** The OP_SeekRowid opcode performs the same operation but also allows the +** P3 register to contain a non-integer value, in which case the jump is +** always taken. This opcode requires that P3 always contain an integer. +** ** The OP_NotFound opcode performs the same operation on index btrees ** (with arbitrary multi-value keys). ** @@ -78500,14 +81168,21 @@ case OP_Found: { /* jump, in3 */ ** in either direction. In other words, the Next and Prev opcodes will ** not work following this opcode. ** -** See also: Found, NotFound, NoConflict +** See also: Found, NotFound, NoConflict, SeekRowid */ -case OP_NotExists: { /* jump, in3 */ +case OP_SeekRowid: { /* jump, in3 */ VdbeCursor *pC; BtCursor *pCrsr; int res; u64 iKey; + pIn3 = &aMem[pOp->p3]; + if( (pIn3->flags & MEM_Int)==0 ){ + applyAffinity(pIn3, SQLITE_AFF_NUMERIC, encoding); + if( (pIn3->flags & MEM_Int)==0 ) goto jump_to_p2; + } + /* Fall through into OP_NotExists */ +case OP_NotExists: /* jump, in3 */ pIn3 = &aMem[pOp->p3]; assert( pIn3->flags & MEM_Int ); assert( pOp->p1>=0 && pOp->p1nCursor ); @@ -78626,8 +81301,7 @@ case OP_NewRowid: { /* out2 */ v = 1; /* IMP: R-61914-48074 */ }else{ assert( sqlite3BtreeCursorIsValid(pC->uc.pCursor) ); - rc = sqlite3BtreeKeySize(pC->uc.pCursor, &v); - assert( rc==SQLITE_OK ); /* Cannot fail following BtreeLast() */ + v = sqlite3BtreeIntegerKey(pC->uc.pCursor); if( v>=MAX_ROWID ){ pC->useRandomRowid = 1; }else{ @@ -78710,10 +81384,12 @@ case OP_NewRowid: { /* out2 */ ** sqlite3_last_insert_rowid() function (otherwise it is unmodified). ** ** If the OPFLAG_USESEEKRESULT flag of P5 is set and if the result of -** the last seek operation (OP_NotExists) was a success, then this +** the last seek operation (OP_NotExists or OP_SeekRowid) was a success, +** then this ** operation will not attempt to find the appropriate row before doing ** the insert but will instead overwrite the row that the cursor is -** currently pointing to. Presumably, the prior OP_NotExists opcode +** currently pointing to. Presumably, the prior OP_NotExists or +** OP_SeekRowid opcode ** has already positioned the cursor correctly. This is an optimization ** that boosts performance by avoiding redundant seeks. ** @@ -78722,9 +81398,9 @@ case OP_NewRowid: { /* out2 */ ** is part of an INSERT operation. The difference is only important to ** the update hook. ** -** Parameter P4 may point to a string containing the table-name, or -** may be NULL. If it is not NULL, then the update-hook -** (sqlite3.xUpdateCallback) is invoked following a successful insert. +** Parameter P4 may point to a Table structure, or may be NULL. If it is +** not NULL, then the update-hook (sqlite3.xUpdateCallback) is invoked +** following a successful insert. ** ** (WARNING/TODO: If P1 is a pseudo-cursor and P2 is dynamically ** allocated, then ownership of P2 is transferred to the pseudo-cursor @@ -78745,14 +81421,14 @@ case OP_Insert: case OP_InsertInt: { Mem *pData; /* MEM cell holding data for the record to be inserted */ Mem *pKey; /* MEM cell holding key for the record */ - i64 iKey; /* The integer ROWID or key for the record to be inserted */ VdbeCursor *pC; /* Cursor to table into which insert is written */ - int nZero; /* Number of zero-bytes to append */ int seekResult; /* Result of prior seek or 0 if no USESEEKRESULT flag */ const char *zDb; /* database name - used by the update hook */ - const char *zTbl; /* Table name - used by the opdate hook */ + Table *pTab; /* Table structure - used by update and pre-update hooks */ int op; /* Opcode for update hook: SQLITE_UPDATE or SQLITE_INSERT */ + BtreePayload x; /* Payload to be inserted */ + op = 0; pData = &aMem[pOp->p2]; assert( pOp->p1>=0 && pOp->p1nCursor ); assert( memIsValid(pData) ); @@ -78761,6 +81437,7 @@ case OP_InsertInt: { assert( pC->eCurType==CURTYPE_BTREE ); assert( pC->uc.pCursor!=0 ); assert( pC->isTable ); + assert( pOp->p4type==P4_TABLE || pOp->p4type>=P4_STATIC ); REGISTER_TRACE(pOp->p2, pData); if( pOp->opcode==OP_Insert ){ @@ -78768,28 +81445,52 @@ case OP_InsertInt: { assert( pKey->flags & MEM_Int ); assert( memIsValid(pKey) ); REGISTER_TRACE(pOp->p3, pKey); - iKey = pKey->u.i; + x.nKey = pKey->u.i; }else{ assert( pOp->opcode==OP_InsertInt ); - iKey = pOp->p3; + x.nKey = pOp->p3; } + if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){ + assert( pC->isTable ); + assert( pC->iDb>=0 ); + zDb = db->aDb[pC->iDb].zName; + pTab = pOp->p4.pTab; + assert( HasRowid(pTab) ); + op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT); + }else{ + pTab = 0; /* Not needed. Silence a comiler warning. */ + zDb = 0; /* Not needed. Silence a compiler warning. */ + } + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + /* Invoke the pre-update hook, if any */ + if( db->xPreUpdateCallback + && pOp->p4type==P4_TABLE + && !(pOp->p5 & OPFLAG_ISUPDATE) + ){ + sqlite3VdbePreUpdateHook(p, pC, SQLITE_INSERT, zDb, pTab, x.nKey, pOp->p2); + } +#endif + if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; - if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = lastRowid = iKey; + if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = lastRowid = x.nKey; if( pData->flags & MEM_Null ){ - pData->z = 0; - pData->n = 0; + x.pData = 0; + x.nData = 0; }else{ assert( pData->flags & (MEM_Blob|MEM_Str) ); + x.pData = pData->z; + x.nData = pData->n; } seekResult = ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0); if( pData->flags & MEM_Zero ){ - nZero = pData->u.nZero; + x.nZero = pData->u.nZero; }else{ - nZero = 0; + x.nZero = 0; } - rc = sqlite3BtreeInsert(pC->uc.pCursor, 0, iKey, - pData->z, pData->n, nZero, + x.pKey = 0; + rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, (pOp->p5 & OPFLAG_APPEND)!=0, seekResult ); pC->deferredMoveto = 0; @@ -78797,18 +81498,13 @@ case OP_InsertInt: { /* Invoke the update-hook if required. */ if( rc ) goto abort_due_to_error; - if( db->xUpdateCallback && pOp->p4.z ){ - zDb = db->aDb[pC->iDb].zName; - zTbl = pOp->p4.z; - op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT); - assert( pC->isTable ); - db->xUpdateCallback(db->pUpdateArg, op, zDb, zTbl, iKey); - assert( pC->iDb>=0 ); + if( db->xUpdateCallback && op ){ + db->xUpdateCallback(db->pUpdateArg, op, zDb, pTab->zName, x.nKey); } break; } -/* Opcode: Delete P1 P2 * P4 P5 +/* Opcode: Delete P1 P2 P3 P4 P5 ** ** Delete the record at which the P1 cursor is currently pointing. ** @@ -78832,15 +81528,24 @@ case OP_InsertInt: { ** P1 must not be pseudo-table. It has to be a real table with ** multiple rows. ** -** If P4 is not NULL, then it is the name of the table that P1 is -** pointing to. The update hook will be invoked, if it exists. -** If P4 is not NULL then the P1 cursor must have been positioned -** using OP_NotFound prior to invoking this opcode. +** If P4 is not NULL then it points to a Table struture. In this case either +** the update or pre-update hook, or both, may be invoked. The P1 cursor must +** have been positioned using OP_NotFound prior to invoking this opcode in +** this case. Specifically, if one is configured, the pre-update hook is +** invoked if P4 is not NULL. The update-hook is invoked if one is configured, +** P4 is not NULL, and the OPFLAG_NCHANGE flag is set in P2. +** +** If the OPFLAG_ISUPDATE flag is set in P2, then P3 contains the address +** of the memory cell that contains the value that the rowid of the row will +** be set to by the update. */ case OP_Delete: { VdbeCursor *pC; - u8 hasUpdateCallback; + const char *zDb; + Table *pTab; + int opflags; + opflags = pOp->p2; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); @@ -78848,22 +81553,47 @@ case OP_Delete: { assert( pC->uc.pCursor!=0 ); assert( pC->deferredMoveto==0 ); - hasUpdateCallback = db->xUpdateCallback && pOp->p4.z && pC->isTable; - if( pOp->p5 && hasUpdateCallback ){ - sqlite3BtreeKeySize(pC->uc.pCursor, &pC->movetoTarget); - } - #ifdef SQLITE_DEBUG - /* The seek operation that positioned the cursor prior to OP_Delete will - ** have also set the pC->movetoTarget field to the rowid of the row that - ** is being deleted */ - if( pOp->p4.z && pC->isTable && pOp->p5==0 ){ - i64 iKey = 0; - sqlite3BtreeKeySize(pC->uc.pCursor, &iKey); - assert( pC->movetoTarget==iKey ); + if( pOp->p4type==P4_TABLE && HasRowid(pOp->p4.pTab) && pOp->p5==0 ){ + /* If p5 is zero, the seek operation that positioned the cursor prior to + ** OP_Delete will have also set the pC->movetoTarget field to the rowid of + ** the row that is being deleted */ + i64 iKey = sqlite3BtreeIntegerKey(pC->uc.pCursor); + assert( pC->movetoTarget==iKey ); } #endif + /* If the update-hook or pre-update-hook will be invoked, set zDb to + ** the name of the db to pass as to it. Also set local pTab to a copy + ** of p4.pTab. Finally, if p5 is true, indicating that this cursor was + ** last moved with OP_Next or OP_Prev, not Seek or NotFound, set + ** VdbeCursor.movetoTarget to the current rowid. */ + if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){ + assert( pC->iDb>=0 ); + assert( pOp->p4.pTab!=0 ); + zDb = db->aDb[pC->iDb].zName; + pTab = pOp->p4.pTab; + if( (pOp->p5 & OPFLAG_SAVEPOSITION)!=0 && pC->isTable ){ + pC->movetoTarget = sqlite3BtreeIntegerKey(pC->uc.pCursor); + } + }else{ + zDb = 0; /* Not needed. Silence a compiler warning. */ + pTab = 0; /* Not needed. Silence a compiler warning. */ + } + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + /* Invoke the pre-update-hook if required. */ + if( db->xPreUpdateCallback && pOp->p4.pTab && HasRowid(pTab) ){ + assert( !(opflags & OPFLAG_ISUPDATE) || (aMem[pOp->p3].flags & MEM_Int) ); + sqlite3VdbePreUpdateHook(p, pC, + (opflags & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_DELETE, + zDb, pTab, pC->movetoTarget, + pOp->p3 + ); + } + if( opflags & OPFLAG_ISNOOP ) break; +#endif + /* Only flags that can be set are SAVEPOISTION and AUXDELETE */ assert( (pOp->p5 & ~(OPFLAG_SAVEPOSITION|OPFLAG_AUXDELETE))==0 ); assert( OPFLAG_SAVEPOSITION==BTREE_SAVEPOSITION ); @@ -78885,15 +81615,18 @@ case OP_Delete: { rc = sqlite3BtreeDelete(pC->uc.pCursor, pOp->p5); pC->cacheStatus = CACHE_STALE; + if( rc ) goto abort_due_to_error; /* Invoke the update-hook if required. */ - if( rc ) goto abort_due_to_error; - if( hasUpdateCallback ){ - db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, - db->aDb[pC->iDb].zName, pOp->p4.z, pC->movetoTarget); - assert( pC->iDb>=0 ); + if( opflags & OPFLAG_NCHANGE ){ + p->nChange++; + if( db->xUpdateCallback && HasRowid(pTab) ){ + db->xUpdateCallback(db->pUpdateArg, SQLITE_DELETE, zDb, pTab->zName, + pC->movetoTarget); + assert( pC->iDb>=0 ); + } } - if( pOp->p2 & OPFLAG_NCHANGE ) p->nChange++; + break; } /* Opcode: ResetCount * * * * * @@ -78995,7 +81728,6 @@ case OP_RowData: { VdbeCursor *pC; BtCursor *pCrsr; u32 n; - i64 n64; pOut = &aMem[pOp->p2]; memAboutToChange(p, pOut); @@ -79013,8 +81745,9 @@ case OP_RowData: { pCrsr = pC->uc.pCursor; /* The OP_RowKey and OP_RowData opcodes always follow OP_NotExists or - ** OP_Rewind/Op_Next with no intervening instructions that might invalidate - ** the cursor. If this where not the case, on of the following assert()s + ** OP_SeekRowid or OP_Rewind/Op_Next with no intervening instructions + ** that might invalidate the cursor. + ** If this where not the case, on of the following assert()s ** would fail. Should this ever change (because of changes in the code ** generator) then the fix would be to insert a call to ** sqlite3VdbeCursorMoveto(). @@ -79026,20 +81759,9 @@ case OP_RowData: { if( rc!=SQLITE_OK ) goto abort_due_to_error; #endif - if( pC->isTable==0 ){ - assert( !pC->isTable ); - VVA_ONLY(rc =) sqlite3BtreeKeySize(pCrsr, &n64); - assert( rc==SQLITE_OK ); /* True because of CursorMoveto() call above */ - if( n64>db->aLimit[SQLITE_LIMIT_LENGTH] ){ - goto too_big; - } - n = (u32)n64; - }else{ - VVA_ONLY(rc =) sqlite3BtreeDataSize(pCrsr, &n); - assert( rc==SQLITE_OK ); /* DataSize() cannot fail */ - if( n>(u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ - goto too_big; - } + n = sqlite3BtreePayloadSize(pCrsr); + if( n>(u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){ + goto too_big; } testcase( n==0 ); if( sqlite3VdbeMemClearAndResize(pOut, MAX(n,32)) ){ @@ -79104,8 +81826,7 @@ case OP_Rowid: { /* out2 */ pOut->flags = MEM_Null; break; } - rc = sqlite3BtreeKeySize(pC->uc.pCursor, &v); - assert( rc==SQLITE_OK ); /* Always so because of CursorRestore() above */ + v = sqlite3BtreeIntegerKey(pC->uc.pCursor); } pOut->u.i = v; break; @@ -79380,8 +82101,7 @@ next_tail: case OP_SorterInsert: /* in2 */ case OP_IdxInsert: { /* in2 */ VdbeCursor *pC; - int nKey; - const char *zKey; + BtreePayload x; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; @@ -79397,9 +82117,12 @@ case OP_IdxInsert: { /* in2 */ if( pOp->opcode==OP_SorterInsert ){ rc = sqlite3VdbeSorterWrite(pC, pIn2); }else{ - nKey = pIn2->n; - zKey = pIn2->z; - rc = sqlite3BtreeInsert(pC->uc.pCursor, zKey, nKey, "", 0, 0, pOp->p3, + x.nKey = pIn2->n; + x.pKey = pIn2->z; + x.nData = 0; + x.nZero = 0; + x.pData = 0; + rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, pOp->p3, ((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0) ); assert( pC->deferredMoveto==0 ); @@ -80373,21 +83096,6 @@ case OP_DecrJumpZero: { /* jump, in1 */ } -/* Opcode: JumpZeroIncr P1 P2 * * * -** Synopsis: if (r[P1]++)==0 ) goto P2 -** -** The register P1 must contain an integer. If register P1 is initially -** zero, then jump to P2. Increment register P1 regardless of whether or -** not the jump is taken. -*/ -case OP_JumpZeroIncr: { /* jump, in1 */ - pIn1 = &aMem[pOp->p1]; - assert( pIn1->flags&MEM_Int ); - VdbeBranchTaken(pIn1->u.i==0, 2); - if( (pIn1->u.i++)==0 ) goto jump_to_p2; - break; -} - /* Opcode: AggStep0 * P2 P3 P4 P5 ** Synopsis: accum=r[P3] step(r[P2@P5]) ** @@ -81182,16 +83890,34 @@ case OP_MaxPgcnt: { /* out2 */ */ case OP_Init: { /* jump */ char *zTrace; - char *z; + + /* If the P4 argument is not NULL, then it must be an SQL comment string. + ** The "--" string is broken up to prevent false-positives with srcck1.c. + ** + ** This assert() provides evidence for: + ** EVIDENCE-OF: R-50676-09860 The callback can compute the same text that + ** would have been returned by the legacy sqlite3_trace() interface by + ** using the X argument when X begins with "--" and invoking + ** sqlite3_expanded_sql(P) otherwise. + */ + assert( pOp->p4.z==0 || strncmp(pOp->p4.z, "-" "- ", 3)==0 ); #ifndef SQLITE_OMIT_TRACE - if( db->xTrace + if( (db->mTrace & (SQLITE_TRACE_STMT|SQLITE_TRACE_LEGACY))!=0 && !p->doingRerun && (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0 ){ - z = sqlite3VdbeExpandSql(p, zTrace); - db->xTrace(db->pTraceArg, z); - sqlite3DbFree(db, z); +#ifndef SQLITE_OMIT_DEPRECATED + if( db->mTrace & SQLITE_TRACE_LEGACY ){ + void (*x)(void*,const char*) = (void(*)(void*,const char*))db->xTrace; + char *z = sqlite3VdbeExpandSql(p, zTrace); + x(db->pTraceArg, z); + sqlite3_free(z); + }else +#endif + { + (void)db->xTrace(SQLITE_TRACE_STMT, db->pTraceArg, p, zTrace); + } } #ifdef SQLITE_USE_FCNTL_TRACE zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql); @@ -81280,11 +84006,12 @@ default: { /* This is really OP_Noop and OP_Explain */ #ifdef SQLITE_DEBUG if( db->flags & SQLITE_VdbeTrace ){ + u8 opProperty = sqlite3OpcodeProperty[pOrigOp->opcode]; if( rc!=0 ) printf("rc=%d\n",rc); - if( pOrigOp->opflags & (OPFLG_OUT2) ){ + if( opProperty & (OPFLG_OUT2) ){ registerTrace(pOrigOp->p2, &aMem[pOrigOp->p2]); } - if( pOrigOp->opflags & OPFLG_OUT3 ){ + if( opProperty & OPFLG_OUT3 ){ registerTrace(pOrigOp->p3, &aMem[pOrigOp->p3]); } } @@ -81388,6 +84115,8 @@ struct Incrblob { BtCursor *pCsr; /* Cursor pointing at blob row */ sqlite3_stmt *pStmt; /* Statement holding cursor open */ sqlite3 *db; /* The associated database */ + char *zDb; /* Database name */ + Table *pTab; /* Table object */ }; @@ -81531,6 +84260,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_blob_open( sqlite3BtreeLeaveAll(db); goto blob_open_out; } + pBlob->pTab = pTab; + pBlob->zDb = db->aDb[sqlite3SchemaToIndex(db, pTab->pSchema)].zName; /* Now search pTab for the exact column. */ for(iCol=0; iColnCol; iCol++) { @@ -81752,6 +84483,30 @@ static int blobReadWrite( */ assert( db == v->db ); sqlite3BtreeEnterCursor(p->pCsr); + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( xCall==sqlite3BtreePutData && db->xPreUpdateCallback ){ + /* If a pre-update hook is registered and this is a write cursor, + ** invoke it here. + ** + ** TODO: The preupdate-hook is passed SQLITE_DELETE, even though this + ** operation should really be an SQLITE_UPDATE. This is probably + ** incorrect, but is convenient because at this point the new.* values + ** are not easily obtainable. And for the sessions module, an + ** SQLITE_UPDATE where the PK columns do not change is handled in the + ** same way as an SQLITE_DELETE (the SQLITE_DELETE code is actually + ** slightly more efficient). Since you cannot write to a PK column + ** using the incremental-blob API, this works. For the sessions module + ** anyhow. + */ + sqlite3_int64 iKey; + iKey = sqlite3BtreeIntegerKey(p->pCsr); + sqlite3VdbePreUpdateHook( + v, v->apCsr[0], SQLITE_DELETE, p->zDb, p->pTab, iKey, -1 + ); + } +#endif + rc = xCall(p->pCsr, iOffset+p->iOffset, n, z); sqlite3BtreeLeaveCursor(p->pCsr); if( rc==SQLITE_ABORT ){ @@ -83180,19 +85935,18 @@ static int vdbeSortAllocUnpacked(SortSubtask *pTask){ /* ** Merge the two sorted lists p1 and p2 into a single list. -** Set *ppOut to the head of the new list. */ -static void vdbeSorterMerge( +static SorterRecord *vdbeSorterMerge( SortSubtask *pTask, /* Calling thread context */ SorterRecord *p1, /* First list to merge */ - SorterRecord *p2, /* Second list to merge */ - SorterRecord **ppOut /* OUT: Head of merged list */ + SorterRecord *p2 /* Second list to merge */ ){ SorterRecord *pFinal = 0; SorterRecord **pp = &pFinal; int bCached = 0; - while( p1 && p2 ){ + assert( p1!=0 && p2!=0 ); + for(;;){ int res; res = pTask->xCompare( pTask, &bCached, SRVAL(p1), p1->nVal, SRVAL(p2), p2->nVal @@ -83202,15 +85956,22 @@ static void vdbeSorterMerge( *pp = p1; pp = &p1->u.pNext; p1 = p1->u.pNext; + if( p1==0 ){ + *pp = p2; + break; + } }else{ *pp = p2; pp = &p2->u.pNext; p2 = p2->u.pNext; bCached = 0; + if( p2==0 ){ + *pp = p1; + break; + } } } - *pp = p1 ? p1 : p2; - *ppOut = pFinal; + return pFinal; } /* @@ -83263,7 +86024,7 @@ static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){ p->u.pNext = 0; for(i=0; aSlot[i]; i++){ - vdbeSorterMerge(pTask, p, aSlot[i], &p); + p = vdbeSorterMerge(pTask, p, aSlot[i]); aSlot[i] = 0; } aSlot[i] = p; @@ -83272,7 +86033,8 @@ static int vdbeSorterSort(SortSubtask *pTask, SorterList *pList){ p = 0; for(i=0; i<64; i++){ - vdbeSorterMerge(pTask, p, aSlot[i], &p); + if( aSlot[i]==0 ) continue; + p = p ? vdbeSorterMerge(pTask, p, aSlot[i]) : aSlot[i]; } pList->pList = p; @@ -84602,6 +87364,15 @@ SQLITE_PRIVATE int sqlite3VdbeSorterCompare( ** This file contains code use to implement an in-memory rollback journal. ** The in-memory rollback journal is used to journal transactions for ** ":memory:" databases and when the journal_mode=MEMORY pragma is used. +** +** Update: The in-memory journal is also used to temporarily cache +** smaller journals that are not critical for power-loss recovery. +** For example, statement journals that are not too big will be held +** entirely in memory, thus reducing the number of file I/O calls, and +** more importantly, reducing temporary file creation events. If these +** journals become too large for memory, they are spilled to disk. But +** in the common case, they are usually small and no file I/O needs to +** occur. */ /* #include "sqliteInt.h" */ @@ -85883,7 +88654,11 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ sqlite3ErrorMsg(pParse, "misuse of aggregate function %.*s()", nId,zId); pNC->nErr++; is_agg = 0; - }else if( no_such_func && pParse->db->init.busy==0 ){ + }else if( no_such_func && pParse->db->init.busy==0 +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + && pParse->explain==0 +#endif + ){ sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); pNC->nErr++; }else if( wrong_num_args ){ @@ -85928,6 +88703,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( pNC->nRef>=nRef ); if( nRef!=pNC->nRef ){ ExprSetProperty(pExpr, EP_VarSelect); + pNC->ncFlags |= NC_VarSelect; } } break; @@ -87135,15 +89911,13 @@ SQLITE_PRIVATE Expr *sqlite3ExprAlloc( pNew->flags |= EP_IntValue; pNew->u.iValue = iValue; }else{ - int c; pNew->u.zToken = (char*)&pNew[1]; assert( pToken->z!=0 || pToken->n==0 ); if( pToken->n ) memcpy(pNew->u.zToken, pToken->z, pToken->n); pNew->u.zToken[pToken->n] = 0; - if( dequote && nExtra>=3 - && ((c = pToken->z[0])=='\'' || c=='"' || c=='[' || c=='`') ){ + if( dequote && sqlite3Isquote(pNew->u.zToken[0]) ){ + if( pNew->u.zToken[0]=='"' ) pNew->flags |= EP_DblQuoted; sqlite3Dequote(pNew->u.zToken); - if( c=='"' ) pNew->flags |= EP_DblQuoted; } } } @@ -87226,6 +90000,22 @@ SQLITE_PRIVATE Expr *sqlite3PExpr( return p; } +/* +** Add pSelect to the Expr.x.pSelect field. Or, if pExpr is NULL (due +** do a memory allocation failure) then delete the pSelect object. +*/ +SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse *pParse, Expr *pExpr, Select *pSelect){ + if( pExpr ){ + pExpr->x.pSelect = pSelect; + ExprSetProperty(pExpr, EP_xIsSelect|EP_Subquery); + sqlite3ExprSetHeightAndFlags(pParse, pExpr); + }else{ + assert( pParse->db->mallocFailed ); + sqlite3SelectDelete(pParse->db, pSelect); + } +} + + /* ** If the expression is always either TRUE or FALSE (respectively), ** then return 1. If one cannot determine the truth value of the @@ -87386,8 +90176,8 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr){ /* ** Recursively delete an expression tree. */ -SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ - if( p==0 ) return; +static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ + assert( p!=0 ); /* Sanity check: Assert that the IntValue is non-negative if it exists */ assert( !ExprHasProperty(p, EP_IntValue) || p->u.iValue>=0 ); if( !ExprHasProperty(p, EP_TokenOnly) ){ @@ -87406,6 +90196,9 @@ SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ sqlite3DbFree(db, p); } } +SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ + if( p ) sqlite3ExprDeleteNN(db, p); +} /* ** Return the number of bytes allocated for the expression structure @@ -87457,7 +90250,7 @@ static int dupedExprStructSize(Expr *p, int flags){ assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */ assert( EXPR_FULLSIZE<=0xfff ); assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 ); - if( 0==(flags&EXPRDUP_REDUCE) ){ + if( 0==flags ){ nSize = EXPR_FULLSIZE; }else{ assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); @@ -87519,88 +90312,88 @@ static int dupedExprSize(Expr *p, int flags){ ** if any. Before returning, *pzBuffer is set to the first byte past the ** portion of the buffer copied into by this function. */ -static Expr *exprDup(sqlite3 *db, Expr *p, int flags, u8 **pzBuffer){ - Expr *pNew = 0; /* Value to return */ - assert( flags==0 || flags==EXPRDUP_REDUCE ); +static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){ + Expr *pNew; /* Value to return */ + u8 *zAlloc; /* Memory space from which to build Expr object */ + u32 staticFlag; /* EP_Static if space not obtained from malloc */ + assert( db!=0 ); - if( p ){ - const int isReduced = (flags&EXPRDUP_REDUCE); - u8 *zAlloc; - u32 staticFlag = 0; + assert( p ); + assert( dupFlags==0 || dupFlags==EXPRDUP_REDUCE ); + assert( pzBuffer==0 || dupFlags==EXPRDUP_REDUCE ); - assert( pzBuffer==0 || isReduced ); + /* Figure out where to write the new Expr structure. */ + if( pzBuffer ){ + zAlloc = *pzBuffer; + staticFlag = EP_Static; + }else{ + zAlloc = sqlite3DbMallocRawNN(db, dupedExprSize(p, dupFlags)); + staticFlag = 0; + } + pNew = (Expr *)zAlloc; - /* Figure out where to write the new Expr structure. */ - if( pzBuffer ){ - zAlloc = *pzBuffer; - staticFlag = EP_Static; + if( pNew ){ + /* Set nNewSize to the size allocated for the structure pointed to + ** by pNew. This is either EXPR_FULLSIZE, EXPR_REDUCEDSIZE or + ** EXPR_TOKENONLYSIZE. nToken is set to the number of bytes consumed + ** by the copy of the p->u.zToken string (if any). + */ + const unsigned nStructSize = dupedExprStructSize(p, dupFlags); + const int nNewSize = nStructSize & 0xfff; + int nToken; + if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + nToken = sqlite3Strlen30(p->u.zToken) + 1; }else{ - zAlloc = sqlite3DbMallocRawNN(db, dupedExprSize(p, flags)); + nToken = 0; } - pNew = (Expr *)zAlloc; - - if( pNew ){ - /* Set nNewSize to the size allocated for the structure pointed to - ** by pNew. This is either EXPR_FULLSIZE, EXPR_REDUCEDSIZE or - ** EXPR_TOKENONLYSIZE. nToken is set to the number of bytes consumed - ** by the copy of the p->u.zToken string (if any). - */ - const unsigned nStructSize = dupedExprStructSize(p, flags); - const int nNewSize = nStructSize & 0xfff; - int nToken; - if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ - nToken = sqlite3Strlen30(p->u.zToken) + 1; - }else{ - nToken = 0; - } - if( isReduced ){ - assert( ExprHasProperty(p, EP_Reduced)==0 ); - memcpy(zAlloc, p, nNewSize); - }else{ - u32 nSize = (u32)exprStructSize(p); - memcpy(zAlloc, p, nSize); - if( nSizeflags &= ~(EP_Reduced|EP_TokenOnly|EP_Static|EP_MemToken); - pNew->flags |= nStructSize & (EP_Reduced|EP_TokenOnly); - pNew->flags |= staticFlag; - - /* Copy the p->u.zToken string, if any. */ - if( nToken ){ - char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize]; - memcpy(zToken, p->u.zToken, nToken); - } + /* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */ + pNew->flags &= ~(EP_Reduced|EP_TokenOnly|EP_Static|EP_MemToken); + pNew->flags |= nStructSize & (EP_Reduced|EP_TokenOnly); + pNew->flags |= staticFlag; - if( 0==((p->flags|pNew->flags) & EP_TokenOnly) ){ - /* Fill in the pNew->x.pSelect or pNew->x.pList member. */ - if( ExprHasProperty(p, EP_xIsSelect) ){ - pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, isReduced); - }else{ - pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, isReduced); - } - } + /* Copy the p->u.zToken string, if any. */ + if( nToken ){ + char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize]; + memcpy(zToken, p->u.zToken, nToken); + } - /* Fill in pNew->pLeft and pNew->pRight. */ - if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly) ){ - zAlloc += dupedExprNodeSize(p, flags); - if( ExprHasProperty(pNew, EP_Reduced) ){ - pNew->pLeft = exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc); - pNew->pRight = exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc); - } - if( pzBuffer ){ - *pzBuffer = zAlloc; - } + if( 0==((p->flags|pNew->flags) & EP_TokenOnly) ){ + /* Fill in the pNew->x.pSelect or pNew->x.pList member. */ + if( ExprHasProperty(p, EP_xIsSelect) ){ + pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, dupFlags); }else{ - if( !ExprHasProperty(p, EP_TokenOnly) ){ - pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); - pNew->pRight = sqlite3ExprDup(db, p->pRight, 0); - } + pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, dupFlags); } + } + /* Fill in pNew->pLeft and pNew->pRight. */ + if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly) ){ + zAlloc += dupedExprNodeSize(p, dupFlags); + if( ExprHasProperty(pNew, EP_Reduced) ){ + pNew->pLeft = p->pLeft ? + exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc) : 0; + pNew->pRight = p->pRight ? + exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc) : 0; + } + if( pzBuffer ){ + *pzBuffer = zAlloc; + } + }else{ + if( !ExprHasProperty(p, EP_TokenOnly) ){ + pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); + pNew->pRight = sqlite3ExprDup(db, p->pRight, 0); + } } } return pNew; @@ -87652,7 +90445,7 @@ static With *withDup(sqlite3 *db, With *p){ */ SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3 *db, Expr *p, int flags){ assert( flags==0 || flags==EXPRDUP_REDUCE ); - return exprDup(db, p, flags, 0); + return p ? exprDup(db, p, flags, 0) : 0; } SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags){ ExprList *pNew; @@ -87874,7 +90667,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetName( pItem = &pList->a[pList->nExpr-1]; assert( pItem->zName==0 ); pItem->zName = sqlite3DbStrNDup(pParse->db, pName->z, pName->n); - if( dequote && pItem->zName ) sqlite3Dequote(pItem->zName); + if( dequote ) sqlite3Dequote(pItem->zName); } } @@ -87923,10 +90716,9 @@ SQLITE_PRIVATE void sqlite3ExprListCheckLength( /* ** Delete an entire expression list. */ -SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ +static SQLITE_NOINLINE void exprListDeleteNN(sqlite3 *db, ExprList *pList){ int i; struct ExprList_item *pItem; - if( pList==0 ) return; assert( pList->a!=0 || pList->nExpr==0 ); for(pItem=pList->a, i=0; inExpr; i++, pItem++){ sqlite3ExprDelete(db, pItem->pExpr); @@ -87936,6 +90728,9 @@ SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ sqlite3DbFree(db, pList->a); sqlite3DbFree(db, pList); } +SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ + if( pList ) exprListDeleteNN(db, pList); +} /* ** Return the bitwise-OR of all Expr.flags fields in the given @@ -88463,6 +91258,11 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int eType = IN_INDEX_INDEX_ASC + pIdx->aSortOrder[0]; if( prRhsHasNull && !pTab->aCol[iCol].notNull ){ +#ifdef SQLITE_ENABLE_COLUMN_USED_MASK + const i64 sOne = 1; + sqlite3VdbeAddOp4Dup8(v, OP_ColumnsUsed, + iTab, 0, 0, (u8*)&sOne, P4_INT64); +#endif *prRhsHasNull = ++pParse->nMem; sqlite3SetHasNullFlag(v, iTab, *prRhsHasNull); } @@ -88475,7 +91275,7 @@ SQLITE_PRIVATE int sqlite3FindInIndex(Parse *pParse, Expr *pX, u32 inFlags, int /* If no preexisting index is available for the IN clause ** and IN_INDEX_NOOP is an allowed reply ** and the RHS of the IN operator is a list, not a subquery - ** and the RHS is not contant or has two or fewer terms, + ** and the RHS is not constant or has two or fewer terms, ** then it is not worth creating an ephemeral table to evaluate ** the IN operator so return IN_INDEX_NOOP. */ @@ -88867,8 +91667,7 @@ static void sqlite3ExprCodeIN( if( eType==IN_INDEX_ROWID ){ /* In this case, the RHS is the ROWID of table b-tree */ - sqlite3VdbeAddOp2(v, OP_MustBeInt, r1, destIfFalse); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_NotExists, pExpr->iTable, destIfFalse, r1); + sqlite3VdbeAddOp3(v, OP_SeekRowid, pExpr->iTable, destIfFalse, r1); VdbeCoverage(v); }else{ /* In this case, the RHS is an index b-tree. @@ -88980,6 +91779,19 @@ static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){ } } +#if defined(SQLITE_DEBUG) +/* +** Verify the consistency of the column cache +*/ +static int cacheIsValid(Parse *pParse){ + int i, n; + for(i=n=0; iaColCache[i].iReg>0 ) n++; + } + return n==pParse->nColCache; +} +#endif + /* ** Clear a cache entry. */ @@ -88990,6 +91802,9 @@ static void cacheEntryClear(Parse *pParse, struct yColCache *p){ } p->tempReg = 0; } + p->iReg = 0; + pParse->nColCache--; + assert( pParse->db->mallocFailed || cacheIsValid(pParse) ); } @@ -89033,6 +91848,8 @@ SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int p->iReg = iReg; p->tempReg = 0; p->lru = pParse->iCacheCnt++; + pParse->nColCache++; + assert( pParse->db->mallocFailed || cacheIsValid(pParse) ); return; } } @@ -89054,6 +91871,7 @@ SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int p->iReg = iReg; p->tempReg = 0; p->lru = pParse->iCacheCnt++; + assert( cacheIsValid(pParse) ); return; } } @@ -89063,15 +91881,13 @@ SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse *pParse, int iTab, int iCol, int ** Purge the range of registers from the column cache. */ SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse *pParse, int iReg, int nReg){ - int i; - int iLast = iReg + nReg - 1; struct yColCache *p; - for(i=0, p=pParse->aColCache; iiReg; - if( r>=iReg && r<=iLast ){ - cacheEntryClear(pParse, p); - p->iReg = 0; - } + if( iReg<=0 || pParse->nColCache==0 ) return; + p = &pParse->aColCache[SQLITE_N_COLCACHE-1]; + while(1){ + if( p->iReg >= iReg && p->iReg < iReg+nReg ) cacheEntryClear(pParse, p); + if( p==pParse->aColCache ) break; + p--; } } @@ -89107,7 +91923,6 @@ SQLITE_PRIVATE void sqlite3ExprCachePop(Parse *pParse){ for(i=0, p=pParse->aColCache; iiReg && p->iLevel>pParse->iCacheLevel ){ cacheEntryClear(pParse, p); - p->iReg = 0; } } } @@ -89165,7 +91980,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable( }else{ int op = IsVirtual(pTab) ? OP_VColumn : OP_Column; int x = iCol; - if( !HasRowid(pTab) ){ + if( !HasRowid(pTab) && !IsVirtual(pTab) ){ x = sqlite3ColumnOfIndex(sqlite3PrimaryKeyIndex(pTab), iCol); } sqlite3VdbeAddOp3(v, op, iTabCur, x, regOut); @@ -89242,7 +92057,6 @@ SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse *pParse){ for(i=0, p=pParse->aColCache; iiReg ){ cacheEntryClear(pParse, p); - p->iReg = 0; } } } @@ -89284,6 +92098,7 @@ static int usedAsColumnCache(Parse *pParse, int iFrom, int iTo){ } #endif /* SQLITE_DEBUG || SQLITE_COVERAGE_TEST */ + /* ** Convert an expression node to a TK_REGISTER */ @@ -89568,6 +92383,11 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) assert( !ExprHasProperty(pExpr, EP_IntValue) ); zId = pExpr->u.zToken; pDef = sqlite3FindFunction(db, zId, nFarg, enc, 0); +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + if( pDef==0 && pParse->explain ){ + pDef = sqlite3FindFunction(db, "unknown", nFarg, enc, 0); + } +#endif if( pDef==0 || pDef->xFinalize!=0 ){ sqlite3ErrorMsg(pParse, "unknown function: %s()", zId); break; @@ -90590,6 +93410,61 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr *pE1, Expr *pE2, int iTab){ return 0; } +/* +** An instance of the following structure is used by the tree walker +** to determine if an expression can be evaluated by reference to the +** index only, without having to do a search for the corresponding +** table entry. The IdxCover.pIdx field is the index. IdxCover.iCur +** is the cursor for the table. +*/ +struct IdxCover { + Index *pIdx; /* The index to be tested for coverage */ + int iCur; /* Cursor number for the table corresponding to the index */ +}; + +/* +** Check to see if there are references to columns in table +** pWalker->u.pIdxCover->iCur can be satisfied using the index +** pWalker->u.pIdxCover->pIdx. +*/ +static int exprIdxCover(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_COLUMN + && pExpr->iTable==pWalker->u.pIdxCover->iCur + && sqlite3ColumnOfIndex(pWalker->u.pIdxCover->pIdx, pExpr->iColumn)<0 + ){ + pWalker->eCode = 1; + return WRC_Abort; + } + return WRC_Continue; +} + +/* +** Determine if an index pIdx on table with cursor iCur contains will +** the expression pExpr. Return true if the index does cover the +** expression and false if the pExpr expression references table columns +** that are not found in the index pIdx. +** +** An index covering an expression means that the expression can be +** evaluated using only the index and without having to lookup the +** corresponding table entry. +*/ +SQLITE_PRIVATE int sqlite3ExprCoveredByIndex( + Expr *pExpr, /* The index to be tested */ + int iCur, /* The cursor number for the corresponding table */ + Index *pIdx /* The index that might be used for coverage */ +){ + Walker w; + struct IdxCover xcov; + memset(&w, 0, sizeof(w)); + xcov.iCur = iCur; + xcov.pIdx = pIdx; + w.xExprCallback = exprIdxCover; + w.u.pIdxCover = &xcov; + sqlite3WalkExpr(&w, pExpr); + return !w.eCode; +} + + /* ** An instance of the following structure is used by the tree walker ** to count references to table columns in the arguments of an @@ -91544,6 +94419,7 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ Expr *pDflt; /* Default value for the new column */ sqlite3 *db; /* The database connection; */ Vdbe *v = pParse->pVdbe; /* The prepared statement under construction */ + int r1; /* Temporary registers */ db = pParse->db; if( pParse->nErr || db->mallocFailed ) return; @@ -91638,16 +94514,18 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ db->flags = savedDbFlags; } - /* If the default value of the new column is NULL, then the file - ** format to 2. If the default value of the new column is not NULL, - ** the file format be 3. Back when this feature was first added - ** in 2006, we went to the trouble to upgrade the file format to the - ** minimum support values. But 10-years on, we can assume that all - ** extent versions of SQLite support file-format 4, so we always and - ** unconditionally upgrade to 4. + /* Make sure the schema version is at least 3. But do not upgrade + ** from less than 3 to 4, as that will corrupt any preexisting DESC + ** index. */ - sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, - SQLITE_MAX_FILE_FORMAT); + r1 = sqlite3GetTempReg(pParse); + sqlite3VdbeAddOp3(v, OP_ReadCookie, iDb, r1, BTREE_FILE_FORMAT); + sqlite3VdbeUsesBtree(v, iDb); + sqlite3VdbeAddOp2(v, OP_AddImm, r1, -2); + sqlite3VdbeAddOp2(v, OP_IfPos, r1, sqlite3VdbeCurrentAddr(v)+2); + VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_SetCookie, iDb, BTREE_FILE_FORMAT, 3); + sqlite3ReleaseTempReg(pParse, r1); /* Reload the schema of the modified table. */ reloadTableSchema(pParse, pTab, pTab->zName); @@ -93577,7 +96455,7 @@ SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){ analysisInfo sInfo; HashElem *i; char *zSql; - int rc; + int rc = SQLITE_OK; assert( iDb>=0 && iDbnDb ); assert( db->aDb[iDb].pBt!=0 ); @@ -93586,31 +96464,34 @@ SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3 *db, int iDb){ assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ Index *pIdx = sqliteHashData(i); - sqlite3DefaultRowEst(pIdx); + pIdx->aiRowLogEst[0] = 0; #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 sqlite3DeleteIndexSamples(db, pIdx); pIdx->aSample = 0; #endif } - /* Check to make sure the sqlite_stat1 table exists */ + /* Load new statistics out of the sqlite_stat1 table */ sInfo.db = db; sInfo.zDatabase = db->aDb[iDb].zName; - if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)==0 ){ - return SQLITE_ERROR; + if( sqlite3FindTable(db, "sqlite_stat1", sInfo.zDatabase)!=0 ){ + zSql = sqlite3MPrintf(db, + "SELECT tbl,idx,stat FROM %Q.sqlite_stat1", sInfo.zDatabase); + if( zSql==0 ){ + rc = SQLITE_NOMEM_BKPT; + }else{ + rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0); + sqlite3DbFree(db, zSql); + } } - /* Load new statistics out of the sqlite_stat1 table */ - zSql = sqlite3MPrintf(db, - "SELECT tbl,idx,stat FROM %Q.sqlite_stat1", sInfo.zDatabase); - if( zSql==0 ){ - rc = SQLITE_NOMEM_BKPT; - }else{ - rc = sqlite3_exec(db, zSql, analysisLoader, &sInfo, 0); - sqlite3DbFree(db, zSql); + /* Set appropriate defaults on all indexes not in the sqlite_stat1 table */ + assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); + for(i=sqliteHashFirst(&db->aDb[iDb].pSchema->idxHash);i;i=sqliteHashNext(i)){ + Index *pIdx = sqliteHashData(i); + if( pIdx->aiRowLogEst[0]==0 ) sqlite3DefaultRowEst(pIdx); } - /* Load the statistics from the sqlite_stat4 table. */ #ifdef SQLITE_ENABLE_STAT3_OR_STAT4 if( rc==SQLITE_OK && OptimizationEnabled(db, SQLITE_Stat34) ){ @@ -94333,6 +97214,7 @@ SQLITE_PRIVATE int sqlite3AuthReadCol( char *zDb = db->aDb[iDb].zName; /* Name of attached database */ int rc; /* Auth callback return code */ + if( db->init.busy ) return SQLITE_OK; rc = db->xAuth(db->pAuthArg, SQLITE_READ, zTab,zCol,zDb,pParse->zAuthContext #ifdef SQLITE_USER_AUTHENTICATION ,db->auth.zAuthUser @@ -94823,7 +97705,7 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const cha */ SQLITE_PRIVATE Table *sqlite3LocateTable( Parse *pParse, /* context in which to report errors */ - int isView, /* True if looking for a VIEW rather than a TABLE */ + u32 flags, /* LOCATE_VIEW or LOCATE_NOERR */ const char *zName, /* Name of the table we are looking for */ const char *zDbase /* Name of the database. Might be NULL */ ){ @@ -94837,7 +97719,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( p = sqlite3FindTable(pParse->db, zName, zDbase); if( p==0 ){ - const char *zMsg = isView ? "no such view" : "no such table"; + const char *zMsg = flags & LOCATE_VIEW ? "no such view" : "no such table"; #ifndef SQLITE_OMIT_VIRTUALTABLE if( sqlite3FindDbName(pParse->db, zDbase)<1 ){ /* If zName is the not the name of a table in the schema created using @@ -94849,12 +97731,14 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( } } #endif - if( zDbase ){ - sqlite3ErrorMsg(pParse, "%s: %s.%s", zMsg, zDbase, zName); - }else{ - sqlite3ErrorMsg(pParse, "%s: %s", zMsg, zName); + if( (flags & LOCATE_NOERR)==0 ){ + if( zDbase ){ + sqlite3ErrorMsg(pParse, "%s: %s.%s", zMsg, zDbase, zName); + }else{ + sqlite3ErrorMsg(pParse, "%s: %s", zMsg, zName); + } + pParse->checkSchema = 1; } - pParse->checkSchema = 1; } return p; @@ -94871,7 +97755,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( */ SQLITE_PRIVATE Table *sqlite3LocateTableItem( Parse *pParse, - int isView, + u32 flags, struct SrcList_item *p ){ const char *zDb; @@ -94882,7 +97766,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTableItem( }else{ zDb = p->zDatabase; } - return sqlite3LocateTable(pParse, isView, p->zName, zDb); + return sqlite3LocateTable(pParse, flags, p->zName, zDb); } /* @@ -95077,16 +97961,10 @@ SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3 *db, Table *pTable){ ** db parameter can be used with db->pnBytesFreed to measure the memory ** used by the Table object. */ -SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ +static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){ Index *pIndex, *pNext; TESTONLY( int nLookaside; ) /* Used to verify lookaside not used for schema */ - assert( !pTable || pTable->nRef>0 ); - - /* Do not delete the table until the reference count reaches zero. */ - if( !pTable ) return; - if( ((!db || db->pnBytesFreed==0) && (--pTable->nRef)>0) ) return; - /* Record the number of outstanding lookaside allocations in schema Tables ** prior to doing any free() operations. Since schema Tables do not use ** lookaside, this number should not change. */ @@ -95096,8 +97974,9 @@ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ /* Delete all indices associated with this table. */ for(pIndex = pTable->pIndex; pIndex; pIndex=pNext){ pNext = pIndex->pNext; - assert( pIndex->pSchema==pTable->pSchema ); - if( !db || db->pnBytesFreed==0 ){ + assert( pIndex->pSchema==pTable->pSchema + || (IsVirtual(pTable) && pIndex->idxType!=SQLITE_IDXTYPE_APPDEF) ); + if( (db==0 || db->pnBytesFreed==0) && !IsVirtual(pTable) ){ char *zName = pIndex->zName; TESTONLY ( Index *pOld = ) sqlite3HashInsert( &pIndex->pSchema->idxHash, zName, 0 @@ -95126,6 +98005,13 @@ SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ /* Verify that no lookaside memory was used by schema tables */ assert( nLookaside==0 || nLookaside==db->lookaside.nOut ); } +SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ + /* Do not delete the table until the reference count reaches zero. */ + if( !pTable ) return; + if( ((!db || db->pnBytesFreed==0) && (--pTable->nRef)>0) ) return; + deleteTable(db, pTable); +} + /* ** Unlink the given table from the hash tables and the delete the @@ -95772,7 +98658,7 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey( Column *pCol = 0; int iCol = -1, i; int nTerm; - if( pTab==0 || IN_DECLARE_VTAB ) goto primary_key_exit; + if( pTab==0 ) goto primary_key_exit; if( pTab->tabFlags & TF_HasPrimaryKey ){ sqlite3ErrorMsg(pParse, "table \"%s\" has more than one primary key", pTab->zName); @@ -95818,12 +98704,8 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey( "INTEGER PRIMARY KEY"); #endif }else{ - Index *p; - p = sqlite3CreateIndex(pParse, 0, 0, 0, pList, onError, 0, - 0, sortOrder, 0); - if( p ){ - p->idxType = SQLITE_IDXTYPE_PRIMARYKEY; - } + sqlite3CreateIndex(pParse, 0, 0, 0, pList, onError, 0, + 0, sortOrder, 0, SQLITE_IDXTYPE_PRIMARYKEY); pList = 0; } @@ -96140,21 +99022,23 @@ static int hasColumn(const i16 *aiCol, int nCol, int x){ ** are appropriate for a WITHOUT ROWID table instead of a rowid table. ** Changes include: ** -** (1) Convert the OP_CreateTable into an OP_CreateIndex. There is +** (1) Set all columns of the PRIMARY KEY schema object to be NOT NULL. +** (2) Convert the OP_CreateTable into an OP_CreateIndex. There is ** no rowid btree for a WITHOUT ROWID. Instead, the canonical ** data storage is a covering index btree. -** (2) Bypass the creation of the sqlite_master table entry +** (3) Bypass the creation of the sqlite_master table entry ** for the PRIMARY KEY as the primary key index is now ** identified by the sqlite_master table entry of the table itself. -** (3) Set the Index.tnum of the PRIMARY KEY Index object in the +** (4) Set the Index.tnum of the PRIMARY KEY Index object in the ** schema to the rootpage from the main table. -** (4) Set all columns of the PRIMARY KEY schema object to be NOT NULL. ** (5) Add all table columns to the PRIMARY KEY Index object ** so that the PRIMARY KEY is a covering index. The surplus ** columns are part of KeyInfo.nXField and are not used for ** sorting or lookup or uniqueness checks. ** (6) Replace the rowid tail on all automatically generated UNIQUE ** indices with the PRIMARY KEY columns. +** +** For virtual tables, only (1) is performed. */ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ Index *pIdx; @@ -96164,6 +99048,20 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ sqlite3 *db = pParse->db; Vdbe *v = pParse->pVdbe; + /* Mark every PRIMARY KEY column as NOT NULL (except for imposter tables) + */ + if( !db->init.imposterTable ){ + for(i=0; inCol; i++){ + if( (pTab->aCol[i].colFlags & COLFLAG_PRIMKEY)!=0 ){ + pTab->aCol[i].notNull = OE_Abort; + } + } + } + + /* The remaining transformations only apply to b-tree tables, not to + ** virtual tables */ + if( IN_DECLARE_VTAB ) return; + /* Convert the OP_CreateTable opcode that would normally create the ** root-page for the table into an OP_CreateIndex opcode. The index ** created will become the PRIMARY KEY index. @@ -96185,9 +99083,10 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ if( pList==0 ) return; pList->a[0].sortOrder = pParse->iPkSortOrder; assert( pParse->pNewTable==pTab ); - pPk = sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0); - if( pPk==0 ) return; - pPk->idxType = SQLITE_IDXTYPE_PRIMARYKEY; + sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0, + SQLITE_IDXTYPE_PRIMARYKEY); + if( db->mallocFailed ) return; + pPk = sqlite3PrimaryKeyIndex(pTab); pTab->iPKey = -1; }else{ pPk = sqlite3PrimaryKeyIndex(pTab); @@ -96215,19 +99114,11 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){ } pPk->nKeyCol = j; } - pPk->isCovering = 1; assert( pPk!=0 ); + pPk->isCovering = 1; + if( !db->init.imposterTable ) pPk->uniqNotNull = 1; nPk = pPk->nKeyCol; - /* Make sure every column of the PRIMARY KEY is NOT NULL. (Except, - ** do not enforce this for imposter tables.) */ - if( !db->init.imposterTable ){ - for(i=0; iaCol[pPk->aiColumn[i]].notNull = OE_Abort; - } - pPk->uniqNotNull = 1; - } - /* The root page of the PRIMARY KEY is the table root page */ pPk->tnum = pTab->tnum; @@ -96706,7 +99597,7 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ pTable->nCol = 0; nErr++; } - if( pSelTab ) sqlite3DeleteTable(db, pSelTab); + sqlite3DeleteTable(db, pSelTab); sqlite3SelectDelete(db, pSel); db->lookaside.bDisable--; } else { @@ -96982,6 +99873,7 @@ SQLITE_PRIVATE void sqlite3DropTable(Parse *pParse, SrcList *pName, int isView, assert( pName->nSrc==1 ); if( sqlite3ReadSchema(pParse) ) goto exit_drop_table; if( noErr ) db->suppressErr++; + assert( isView==0 || isView==LOCATE_VIEW ); pTab = sqlite3LocateTableItem(pParse, isView, &pName->a[0]); if( noErr ) db->suppressErr--; @@ -97259,6 +100151,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ tnum = pIndex->tnum; } pKey = sqlite3KeyInfoOfIndex(pParse, pIndex); + assert( pKey!=0 || db->mallocFailed || pParse->nErr ); /* Open the sorter cursor if we are to use one. */ iSorter = pParse->nTab++; @@ -97282,8 +100175,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){ sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR|((memRootPage>=0)?OPFLAG_P2ISREG:0)); addr1 = sqlite3VdbeAddOp2(v, OP_SorterSort, iSorter, 0); VdbeCoverage(v); - assert( pKey!=0 || db->mallocFailed || pParse->nErr ); - if( IsUniqueIndex(pIndex) && pKey!=0 ){ + if( IsUniqueIndex(pIndex) ){ int j2 = sqlite3VdbeCurrentAddr(v) + 3; sqlite3VdbeGoto(v, j2); addr2 = sqlite3VdbeCurrentAddr(v); @@ -97352,12 +100244,8 @@ SQLITE_PRIVATE Index *sqlite3AllocateIndexObject( ** pList is a list of columns to be indexed. pList will be NULL if this ** is a primary key or unique-constraint on the most recent column added ** to the table currently under construction. -** -** If the index is created successfully, return a pointer to the new Index -** structure. This is used by sqlite3AddPrimaryKey() to mark the index -** as the tables primary key (Index.idxType==SQLITE_IDXTYPE_PRIMARYKEY) */ -SQLITE_PRIVATE Index *sqlite3CreateIndex( +SQLITE_PRIVATE void sqlite3CreateIndex( Parse *pParse, /* All information about this parse */ Token *pName1, /* First part of index name. May be NULL */ Token *pName2, /* Second part of index name. May be NULL */ @@ -97367,9 +100255,9 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex( Token *pStart, /* The CREATE token that begins this statement */ Expr *pPIWhere, /* WHERE clause for partial indices */ int sortOrder, /* Sort order of primary key when pList==NULL */ - int ifNotExist /* Omit error if index already exists */ + int ifNotExist, /* Omit error if index already exists */ + u8 idxType /* The index type */ ){ - Index *pRet = 0; /* Pointer to return */ Table *pTab = 0; /* Table to be indexed */ Index *pIndex = 0; /* The index to be created */ char *zName = 0; /* Name of the index */ @@ -97387,7 +100275,10 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex( char *zExtra = 0; /* Extra space after the Index object */ Index *pPk = 0; /* PRIMARY KEY index for WITHOUT ROWID tables */ - if( db->mallocFailed || IN_DECLARE_VTAB || pParse->nErr>0 ){ + if( db->mallocFailed || pParse->nErr>0 ){ + goto exit_create_index; + } + if( IN_DECLARE_VTAB && idxType!=SQLITE_IDXTYPE_PRIMARYKEY ){ goto exit_create_index; } if( SQLITE_OK!=sqlite3ReadSchema(pParse) ){ @@ -97513,6 +100404,13 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex( if( zName==0 ){ goto exit_create_index; } + + /* Automatic index names generated from within sqlite3_declare_vtab() + ** must have names that are distinct from normal automatic index names. + ** The following statement converts "sqlite3_autoindex..." into + ** "sqlite3_butoindex..." in order to make the names distinct. + ** The "vtab_err.test" test demonstrates the need of this statement. */ + if( IN_DECLARE_VTAB ) zName[7]++; } /* Check for authorization to create an index. @@ -97576,7 +100474,7 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex( pIndex->pTable = pTab; pIndex->onError = (u8)onError; pIndex->uniqNotNull = onError!=OE_None; - pIndex->idxType = pName ? SQLITE_IDXTYPE_APPDEF : SQLITE_IDXTYPE_UNIQUE; + pIndex->idxType = idxType; pIndex->pSchema = db->aDb[iDb].pSchema; pIndex->nKeyCol = pList->nExpr; if( pPIWhere ){ @@ -97756,7 +100654,7 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex( pIdx->onError = pIndex->onError; } } - pRet = pIdx; + if( idxType==SQLITE_IDXTYPE_PRIMARYKEY ) pIdx->idxType = idxType; goto exit_create_index; } } @@ -97768,6 +100666,7 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex( assert( pParse->nErr==0 ); if( db->init.busy ){ Index *p; + assert( !IN_DECLARE_VTAB ); assert( sqlite3SchemaMutexHeld(db, 0, pIndex->pSchema) ); p = sqlite3HashInsert(&pIndex->pSchema->idxHash, pIndex->zName, pIndex); @@ -97849,7 +100748,7 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex( sqlite3ChangeCookie(pParse, iDb); sqlite3VdbeAddParseSchemaOp(v, iDb, sqlite3MPrintf(db, "name='%q' AND type='index'", pIndex->zName)); - sqlite3VdbeAddOp1(v, OP_Expire, 0); + sqlite3VdbeAddOp0(v, OP_Expire); } sqlite3VdbeJumpHere(v, pIndex->tnum); @@ -97874,7 +100773,6 @@ SQLITE_PRIVATE Index *sqlite3CreateIndex( pIndex->pNext = pOther->pNext; pOther->pNext = pIndex; } - pRet = pIndex; pIndex = 0; } @@ -97885,7 +100783,6 @@ exit_create_index: sqlite3ExprListDelete(db, pList); sqlite3SrcListDelete(db, pTblName); sqlite3DbFree(db, zName); - return pRet; } /* @@ -97914,10 +100811,11 @@ SQLITE_PRIVATE void sqlite3DefaultRowEst(Index *pIdx){ int i; /* Set the first entry (number of rows in the index) to the estimated - ** number of rows in the table. Or 10, if the estimated number of rows - ** in the table is less than that. */ + ** number of rows in the table, or half the number of rows in the table + ** for a partial index. But do not let the estimate drop below 10. */ a[0] = pIdx->pTable->nRowLogEst; - if( a[0]<33 ) a[0] = 33; assert( 33==sqlite3LogEst(10) ); + if( pIdx->pPartIdxWhere!=0 ) a[0] -= 10; assert( 10==sqlite3LogEst(2) ); + if( a[0]<33 ) a[0] = 33; assert( 33==sqlite3LogEst(10) ); /* Estimate that a[1] is 10, a[2] is 9, a[3] is 8, a[4] is 7, a[5] is ** 6 and each subsequent value (if any) is 5. */ @@ -98799,10 +101697,6 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ /* ** Return a KeyInfo structure that is appropriate for the given Index. ** -** The KeyInfo structure for an index is cached in the Index object. -** So there might be multiple references to the returned pointer. The -** caller should not try to modify the KeyInfo object. -** ** The caller should invoke sqlite3KeyInfoUnref() on the returned object ** when it has finished using it. */ @@ -99537,7 +102431,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( */ if( pOrderBy && (pLimit == 0) ) { sqlite3ErrorMsg(pParse, "ORDER BY without LIMIT on %s", zStmtType); - goto limit_where_cleanup_2; + goto limit_where_cleanup; } /* We only need to generate a select expression if there @@ -99559,16 +102453,16 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( */ pSelectRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); - if( pSelectRowid == 0 ) goto limit_where_cleanup_2; + if( pSelectRowid == 0 ) goto limit_where_cleanup; pEList = sqlite3ExprListAppend(pParse, 0, pSelectRowid); - if( pEList == 0 ) goto limit_where_cleanup_2; + if( pEList == 0 ) goto limit_where_cleanup; /* duplicate the FROM clause as it is needed by both the DELETE/UPDATE tree ** and the SELECT subtree. */ pSelectSrc = sqlite3SrcListDup(pParse->db, pSrc, 0); if( pSelectSrc == 0 ) { sqlite3ExprListDelete(pParse->db, pEList); - goto limit_where_cleanup_2; + goto limit_where_cleanup; } /* generate the SELECT expression tree. */ @@ -99578,21 +102472,11 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */ pWhereRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0); - if( pWhereRowid == 0 ) goto limit_where_cleanup_1; - pInClause = sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0, 0); - if( pInClause == 0 ) goto limit_where_cleanup_1; - - pInClause->x.pSelect = pSelect; - pInClause->flags |= EP_xIsSelect; - sqlite3ExprSetHeightAndFlags(pParse, pInClause); + pInClause = pWhereRowid ? sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0, 0) : 0; + sqlite3PExprAddSelect(pParse, pInClause, pSelect); return pInClause; - /* something went wrong. clean up anything allocated. */ -limit_where_cleanup_1: - sqlite3SelectDelete(pParse->db, pSelect); - return 0; - -limit_where_cleanup_2: +limit_where_cleanup: sqlite3ExprDelete(pParse->db, pWhere); sqlite3ExprListDelete(pParse->db, pOrderBy); sqlite3ExprDelete(pParse->db, pLimit); @@ -99643,11 +102527,12 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( int addrBypass = 0; /* Address of jump over the delete logic */ int addrLoop = 0; /* Top of the delete loop */ int addrEphOpen = 0; /* Instruction to open the Ephemeral table */ + int bComplex; /* True if there are triggers or FKs or + ** subqueries in the WHERE clause */ #ifndef SQLITE_OMIT_TRIGGER int isView; /* True if attempting to delete from a view */ Trigger *pTrigger; /* List of table triggers, if required */ - int bComplex; /* True if there are either triggers or FKs */ #endif memset(&sContext, 0, sizeof(sContext)); @@ -99675,7 +102560,6 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( #else # define pTrigger 0 # define isView 0 -# define bComplex 0 #endif #ifdef SQLITE_OMIT_VIEW # undef isView @@ -99760,6 +102644,9 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( && pWhere==0 && !bComplex && !IsVirtual(pTab) +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + && db->xPreUpdateCallback==0 +#endif ){ assert( !isView ); sqlite3TableLock(pParse, iDb, pTab->tnum, 1, pTab->zName); @@ -99774,7 +102661,8 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( }else #endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */ { - u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK; + u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK|WHERE_SEEK_TABLE; + if( sNC.ncFlags & NC_VarSelect ) bComplex = 1; wcf |= (bComplex ? 0 : WHERE_ONEPASS_MULTIROW); if( HasRowid(pTab) ){ /* For a rowid table, initialize the RowSet to an empty set */ @@ -100109,14 +102997,19 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete( /* Delete the index and table entries. Skip this step if pTab is really ** a view (in which case the only effect of the DELETE statement is to - ** fire the INSTEAD OF triggers). */ + ** fire the INSTEAD OF triggers). + ** + ** If variable 'count' is non-zero, then this OP_Delete instruction should + ** invoke the update-hook. The pre-update-hook, on the other hand should + ** be invoked unless table pTab is a system table. The difference is that + ** the update-hook is not invoked for rows removed by REPLACE, but the + ** pre-update-hook is. + */ if( pTab->pSelect==0 ){ u8 p5 = 0; sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,iIdxNoSeek); sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, (count?OPFLAG_NCHANGE:0)); - if( count ){ - sqlite3VdbeChangeP4(v, -1, pTab->zName, P4_TRANSIENT); - } + sqlite3VdbeChangeP4(v, -1, (char*)pTab, P4_TABLE); if( eMode!=ONEPASS_OFF ){ sqlite3VdbeChangeP5(v, OPFLAG_AUXDELETE); } @@ -101035,7 +103928,7 @@ static int patternCompare( } c2 = Utf8Read(zString); if( c==c2 ) continue; - if( noCase && c<0x80 && c2<0x80 && sqlite3Tolower(c)==sqlite3Tolower(c2) ){ + if( noCase && sqlite3Tolower(c)==sqlite3Tolower(c2) && c<0x80 && c2<0x80 ){ continue; } if( c==matchOne && zPattern!=zEscaped && c2!=0 ) continue; @@ -101611,6 +104504,26 @@ static void trimFunc( } +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION +/* +** The "unknown" function is automatically substituted in place of +** any unrecognized function name when doing an EXPLAIN or EXPLAIN QUERY PLAN +** when the SQLITE_ENABLE_UNKNOWN_FUNCTION compile-time option is used. +** When the "sqlite3" command-line shell is built using this functionality, +** that allows an EXPLAIN or EXPLAIN QUERY PLAN for complex queries +** involving application-defined functions to be examined in a generic +** sqlite3 shell. +*/ +static void unknownFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + /* no-op */ +} +#endif /*SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION*/ + + /* IMP: R-25361-16150 This function is omitted from SQLite by default. It ** is only available if the SQLITE_SOUNDEX compile-time option is used ** when SQLite is built. @@ -101681,6 +104594,14 @@ static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){ sqlite3 *db = sqlite3_context_db_handle(context); char *zErrMsg = 0; + /* Disallow the load_extension() SQL function unless the SQLITE_LoadExtFunc + ** flag is set. See the sqlite3_enable_load_extension() API. + */ + if( (db->flags & SQLITE_LoadExtFunc)==0 ){ + sqlite3_result_error(context, "not authorized", -1); + return; + } + if( argc==2 ){ zProc = (const char *)sqlite3_value_text(argv[1]); }else{ @@ -102073,13 +104994,16 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ AGGREGATE(group_concat, 2, 0, 0, groupConcatStep, groupConcatFinalize), LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), - #ifdef SQLITE_CASE_SENSITIVE_LIKE +#ifdef SQLITE_CASE_SENSITIVE_LIKE LIKEFUNC(like, 2, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), LIKEFUNC(like, 3, &likeInfoAlt, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), - #else +#else LIKEFUNC(like, 2, &likeInfoNorm, SQLITE_FUNC_LIKE), LIKEFUNC(like, 3, &likeInfoNorm, SQLITE_FUNC_LIKE), - #endif +#endif +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + FUNCTION(unknown, -1, 0, 0, unknownFunc ), +#endif FUNCTION(coalesce, 1, 0, 0, 0 ), FUNCTION(coalesce, 0, 0, 0, 0 ), FUNCTION2(coalesce, -1, 0, 0, noopFunc, SQLITE_FUNC_COALESCE), @@ -103279,7 +106203,6 @@ static Trigger *fkActionTrigger( if( action==OE_Restrict && (db->flags & SQLITE_DeferFKs) ){ return 0; } - pTrigger = pFKey->apTrigger[iAction]; if( action!=OE_None && !pTrigger ){ @@ -103487,7 +106410,8 @@ SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){ FKey *pFKey; /* Iterator variable */ FKey *pNext; /* Copy of pFKey->pNextFrom */ - assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) ); + assert( db==0 || IsVirtual(pTab) + || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) ); for(pFKey=pTab->pFKey; pFKey; pFKey=pNext){ /* Remove the FK from the fkeyHash hash table. */ @@ -104956,9 +107880,18 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( if( pTrigger || sqlite3FkRequired(pParse, pTab, 0, 0) ){ sqlite3MultiWrite(pParse); sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur, - regNewData, 1, 0, OE_Replace, - ONEPASS_SINGLE, -1); + regNewData, 1, 0, OE_Replace, 1, -1); }else{ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( HasRowid(pTab) ){ + /* This OP_Delete opcode fires the pre-update-hook only. It does + ** not modify the b-tree. It is more efficient to let the coming + ** OP_Insert replace the existing entry than it is to delete the + ** existing entry and then insert a new one. */ + sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, OPFLAG_ISNOOP); + sqlite3VdbeChangeP4(v, -1, (char *)pTab, P4_TABLE); + } +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ if( pTab->pIndex ){ sqlite3MultiWrite(pParse); sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,-1); @@ -105228,7 +108161,7 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion( } sqlite3VdbeAddOp3(v, OP_Insert, iDataCur, regRec, regNewData); if( !pParse->nested ){ - sqlite3VdbeChangeP4(v, -1, pTab->zName, P4_TRANSIENT); + sqlite3VdbeChangeP4(v, -1, (char *)pTab, P4_TABLE); } sqlite3VdbeChangeP5(v, pik_flags); } @@ -105628,7 +108561,7 @@ static int xferOptimization( } sqlite3VdbeAddOp2(v, OP_RowData, iSrc, regData); sqlite3VdbeAddOp4(v, OP_Insert, iDest, regData, regRowid, - pDest->zName, 0); + (char*)pDest, P4_TABLE); sqlite3VdbeChangeP5(v, OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND); sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v); sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0); @@ -105888,12 +108821,10 @@ exec_out: ** as extensions by SQLite should #include this file instead of ** sqlite3.h. */ -#ifndef _SQLITE3EXT_H_ -#define _SQLITE3EXT_H_ +#ifndef SQLITE3EXT_H +#define SQLITE3EXT_H /* #include "sqlite3.h" */ -typedef struct sqlite3_api_routines sqlite3_api_routines; - /* ** The following structure holds pointers to all of the SQLite API ** routines. @@ -106154,8 +109085,21 @@ struct sqlite3_api_routines { int (*db_cacheflush)(sqlite3*); /* Version 3.12.0 and later */ int (*system_errno)(sqlite3*); + /* Version 3.14.0 and later */ + int (*trace_v2)(sqlite3*,unsigned,int(*)(unsigned,void*,void*,void*),void*); + char *(*expanded_sql)(sqlite3_stmt*); }; +/* +** This is the function signature used for all extension entry points. It +** is also defined in the file "loadext.c". +*/ +typedef int (*sqlite3_loadext_entry)( + sqlite3 *db, /* Handle to the database. */ + char **pzErrMsg, /* Used to set error string on failure. */ + const sqlite3_api_routines *pThunk /* Extension API function pointers. */ +); + /* ** The following macros redefine the API routines so that they are ** redirected through the global sqlite3_api structure. @@ -106399,6 +109343,9 @@ struct sqlite3_api_routines { #define sqlite3_db_cacheflush sqlite3_api->db_cacheflush /* Version 3.12.0 and later */ #define sqlite3_system_errno sqlite3_api->system_errno +/* Version 3.14.0 and later */ +#define sqlite3_trace_v2 sqlite3_api->trace_v2 +#define sqlite3_expanded_sql sqlite3_api->expanded_sql #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) @@ -106416,7 +109363,7 @@ struct sqlite3_api_routines { # define SQLITE_EXTENSION_INIT3 /*no-op*/ #endif -#endif /* _SQLITE3EXT_H_ */ +#endif /* SQLITE3EXT_H */ /************** End of sqlite3ext.h ******************************************/ /************** Continuing where we left off in loadext.c ********************/ @@ -106424,7 +109371,6 @@ struct sqlite3_api_routines { /* #include */ #ifndef SQLITE_OMIT_LOAD_EXTENSION - /* ** Some API routines are omitted when various features are ** excluded from a build of SQLite. Substitute a NULL pointer @@ -106494,7 +109440,7 @@ struct sqlite3_api_routines { # define sqlite3_enable_shared_cache 0 #endif -#ifdef SQLITE_OMIT_TRACE +#if defined(SQLITE_OMIT_TRACE) || defined(SQLITE_OMIT_DEPRECATED) # define sqlite3_profile 0 # define sqlite3_trace 0 #endif @@ -106514,6 +109460,10 @@ struct sqlite3_api_routines { #define sqlite3_blob_reopen 0 #endif +#if defined(SQLITE_OMIT_TRACE) +# define sqlite3_trace_v2 0 +#endif + /* ** The following structure contains pointers to all SQLite API routines. ** A pointer to this structure is passed into extensions when they are @@ -106819,7 +109769,10 @@ static const sqlite3_api_routines sqlite3Apis = { sqlite3_strlike, sqlite3_db_cacheflush, /* Version 3.12.0 and later */ - sqlite3_system_errno + sqlite3_system_errno, + /* Version 3.14.0 and later */ + sqlite3_trace_v2, + sqlite3_expanded_sql }; /* @@ -106842,13 +109795,14 @@ static int sqlite3LoadExtension( ){ sqlite3_vfs *pVfs = db->pVfs; void *handle; - int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*); + sqlite3_loadext_entry xInit; char *zErrmsg = 0; const char *zEntry; char *zAltEntry = 0; void **aHandle; u64 nMsg = 300 + sqlite3Strlen30(zFile); int ii; + int rc; /* Shared library endings to try if zFile cannot be loaded as written */ static const char *azEndings[] = { @@ -106867,8 +109821,9 @@ static int sqlite3LoadExtension( /* Ticket #1863. To avoid a creating security problems for older ** applications that relink against newer versions of SQLite, the ** ability to run load_extension is turned off by default. One - ** must call sqlite3_enable_load_extension() to turn on extension - ** loading. Otherwise you get the following error. + ** must call either sqlite3_enable_load_extension(db) or + ** sqlite3_db_config(db, SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, 1, 0) + ** to turn on extension loading. */ if( (db->flags & SQLITE_LoadExtension)==0 ){ if( pzErrMsg ){ @@ -106899,8 +109854,7 @@ static int sqlite3LoadExtension( } return SQLITE_ERROR; } - xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) - sqlite3OsDlSym(pVfs, handle, zEntry); + xInit = (sqlite3_loadext_entry)sqlite3OsDlSym(pVfs, handle, zEntry); /* If no entry point was specified and the default legacy ** entry point name "sqlite3_extension_init" was not found, then @@ -106932,8 +109886,7 @@ static int sqlite3LoadExtension( } memcpy(zAltEntry+iEntry, "_init", 6); zEntry = zAltEntry; - xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) - sqlite3OsDlSym(pVfs, handle, zEntry); + xInit = (sqlite3_loadext_entry)sqlite3OsDlSym(pVfs, handle, zEntry); } if( xInit==0 ){ if( pzErrMsg ){ @@ -106950,7 +109903,9 @@ static int sqlite3LoadExtension( return SQLITE_ERROR; } sqlite3_free(zAltEntry); - if( xInit(db, &zErrmsg, &sqlite3Apis) ){ + rc = xInit(db, &zErrmsg, &sqlite3Apis); + if( rc ){ + if( rc==SQLITE_OK_LOAD_PERMANENTLY ) return SQLITE_OK; if( pzErrMsg ){ *pzErrMsg = sqlite3_mprintf("error during initialization: %s", zErrmsg); } @@ -107007,9 +109962,9 @@ SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3 *db){ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff){ sqlite3_mutex_enter(db->mutex); if( onoff ){ - db->flags |= SQLITE_LoadExtension; + db->flags |= SQLITE_LoadExtension|SQLITE_LoadExtFunc; }else{ - db->flags &= ~SQLITE_LoadExtension; + db->flags &= ~(SQLITE_LoadExtension|SQLITE_LoadExtFunc); } sqlite3_mutex_leave(db->mutex); return SQLITE_OK; @@ -107061,7 +110016,9 @@ static SQLITE_WSD struct sqlite3AutoExtList { ** Register a statically linked extension that is automatically ** loaded by every new database connection. */ -SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xInit)(void)){ +SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension( + void (*xInit)(void) +){ int rc = SQLITE_OK; #ifndef SQLITE_OMIT_AUTOINIT rc = sqlite3_initialize(); @@ -107106,7 +110063,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xInit)(void)){ ** Return 1 if xInit was found on the list and removed. Return 0 if xInit ** was not on the list. */ -SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xInit)(void)){ +SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension( + void (*xInit)(void) +){ #if SQLITE_THREADSAFE sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); #endif @@ -107155,7 +110114,7 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){ u32 i; int go = 1; int rc; - int (*xInit)(sqlite3*,char**,const sqlite3_api_routines*); + sqlite3_loadext_entry xInit; wsdAutoextInit; if( wsdAutoext.nExt==0 ){ @@ -107172,8 +110131,7 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){ xInit = 0; go = 0; }else{ - xInit = (int(*)(sqlite3*,char**,const sqlite3_api_routines*)) - wsdAutoext.aExt[i]; + xInit = (sqlite3_loadext_entry)wsdAutoext.aExt[i]; } sqlite3_mutex_leave(mutex); zErrmsg = 0; @@ -108688,7 +111646,7 @@ SQLITE_PRIVATE void sqlite3Pragma( ** compiler (eg. count_changes). So add an opcode to expire all ** compiled SQL statements after modifying a pragma value. */ - sqlite3VdbeAddOp2(v, OP_Expire, 0, 0); + sqlite3VdbeAddOp0(v, OP_Expire); setAllPagerFlags(db); } break; @@ -108710,7 +111668,7 @@ SQLITE_PRIVATE void sqlite3Pragma( */ case PragTyp_TABLE_INFO: if( zRight ){ Table *pTab; - pTab = sqlite3FindTable(db, zRight, zDb); + pTab = sqlite3LocateTable(pParse, LOCATE_NOERR, zRight, zDb); if( pTab ){ static const char *azCol[] = { "cid", "name", "type", "notnull", "dflt_value", "pk" @@ -108992,12 +111950,10 @@ SQLITE_PRIVATE void sqlite3Pragma( sqlite3VdbeAddOp3(v, OP_Column, 0, iKey, regRow); sqlite3ColumnDefault(v, pTab, iKey, regRow); sqlite3VdbeAddOp2(v, OP_IsNull, regRow, addrOk); VdbeCoverage(v); - sqlite3VdbeAddOp2(v, OP_MustBeInt, regRow, - sqlite3VdbeCurrentAddr(v)+3); VdbeCoverage(v); }else{ sqlite3VdbeAddOp2(v, OP_Rowid, 0, regRow); } - sqlite3VdbeAddOp3(v, OP_NotExists, i, 0, regRow); VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_SeekRowid, i, 0, regRow); VdbeCoverage(v); sqlite3VdbeGoto(v, addrOk); sqlite3VdbeJumpHere(v, sqlite3VdbeCurrentAddr(v)-2); }else{ @@ -110568,6 +113524,7 @@ struct SortCtx { int addrSortIndex; /* Address of the OP_SorterOpen or OP_OpenEphemeral */ int labelDone; /* Jump here when done, ex: LIMIT reached */ u8 sortFlags; /* Zero or more SORTFLAG_* bits */ + u8 bOrderedInnerLoop; /* ORDER BY correctly sorts the inner loop */ }; #define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */ @@ -110586,7 +113543,7 @@ static void clearSelect(sqlite3 *db, Select *p, int bFree){ sqlite3ExprListDelete(db, p->pOrderBy); sqlite3ExprDelete(db, p->pLimit); sqlite3ExprDelete(db, p->pOffset); - sqlite3WithDelete(db, p->pWith); + if( p->pWith ) sqlite3WithDelete(db, p->pWith); if( bFree ) sqlite3DbFree(db, p); p = pPrior; bFree = 1; @@ -110681,7 +113638,7 @@ SQLITE_PRIVATE void sqlite3SelectSetName(Select *p, const char *zName){ ** Delete the given Select structure and all of its substructures. */ SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){ - clearSelect(db, p, 1); + if( p ) clearSelect(db, p, 1); } /* @@ -111101,9 +114058,30 @@ static void pushOntoSorter( sqlite3VdbeAddOp2(v, op, pSort->iECursor, regRecord); if( iLimit ){ int addr; + int r1 = 0; + /* Fill the sorter until it contains LIMIT+OFFSET entries. (The iLimit + ** register is initialized with value of LIMIT+OFFSET.) After the sorter + ** fills up, delete the least entry in the sorter after each insert. + ** Thus we never hold more than the LIMIT+OFFSET rows in memory at once */ addr = sqlite3VdbeAddOp3(v, OP_IfNotZero, iLimit, 0, 1); VdbeCoverage(v); sqlite3VdbeAddOp1(v, OP_Last, pSort->iECursor); + if( pSort->bOrderedInnerLoop ){ + r1 = ++pParse->nMem; + sqlite3VdbeAddOp3(v, OP_Column, pSort->iECursor, nExpr, r1); + VdbeComment((v, "seq")); + } sqlite3VdbeAddOp1(v, OP_Delete, pSort->iECursor); + if( pSort->bOrderedInnerLoop ){ + /* If the inner loop is driven by an index such that values from + ** the same iteration of the inner loop are in sorted order, then + ** immediately jump to the next iteration of an inner loop if the + ** entry from the current iteration does not fit into the top + ** LIMIT+OFFSET entries of the sorter. */ + int iBrk = sqlite3VdbeCurrentAddr(v) + 2; + sqlite3VdbeAddOp3(v, OP_Eq, regBase+nExpr, iBrk, r1); + sqlite3VdbeChangeP5(v, SQLITE_NULLEQ); + VdbeCoverage(v); + } sqlite3VdbeJumpHere(v, addr); } } @@ -111518,7 +114496,7 @@ static void selectInnerLoop( */ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ int nExtra = (N+X)*(sizeof(CollSeq*)+1); - KeyInfo *p = sqlite3Malloc(sizeof(KeyInfo) + nExtra); + KeyInfo *p = sqlite3DbMallocRaw(db, sizeof(KeyInfo) + nExtra); if( p ){ p->aSortOrder = (u8*)&p->aColl[N+X]; p->nField = (u16)N; @@ -111540,7 +114518,7 @@ SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo *p){ if( p ){ assert( p->nRef>0 ); p->nRef--; - if( p->nRef==0 ) sqlite3DbFree(0, p); + if( p->nRef==0 ) sqlite3DbFree(p->db, p); } } @@ -112301,20 +115279,20 @@ SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){ ** Get a VDBE for the given parser context. Create a new one if necessary. ** If an error occurs, return NULL and leave a message in pParse. */ -SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse *pParse){ - Vdbe *v = pParse->pVdbe; - if( v==0 ){ - v = pParse->pVdbe = sqlite3VdbeCreate(pParse); - if( v ) sqlite3VdbeAddOp0(v, OP_Init); - if( pParse->pToplevel==0 - && OptimizationEnabled(pParse->db,SQLITE_FactorOutConst) - ){ - pParse->okConstFactor = 1; - } - +static SQLITE_NOINLINE Vdbe *allocVdbe(Parse *pParse){ + Vdbe *v = pParse->pVdbe = sqlite3VdbeCreate(pParse); + if( v ) sqlite3VdbeAddOp0(v, OP_Init); + if( pParse->pToplevel==0 + && OptimizationEnabled(pParse->db,SQLITE_FactorOutConst) + ){ + pParse->okConstFactor = 1; } return v; } +SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse *pParse){ + Vdbe *v = pParse->pVdbe; + return v ? v : allocVdbe(pParse); +} /* @@ -114297,12 +117275,18 @@ static int pushDownWhereTerms( ){ Expr *pNew; int nChng = 0; + Select *pX; /* For looping over compound SELECTs in pSubq */ if( pWhere==0 ) return 0; - if( (pSubq->selFlags & (SF_Aggregate|SF_Recursive))!=0 ){ - return 0; /* restrictions (1) and (2) */ + for(pX=pSubq; pX; pX=pX->pPrior){ + if( (pX->selFlags & (SF_Aggregate|SF_Recursive))!=0 ){ + testcase( pX->selFlags & SF_Aggregate ); + testcase( pX->selFlags & SF_Recursive ); + testcase( pX!=pSubq ); + return 0; /* restrictions (1) and (2) */ + } } if( pSubq->pLimit!=0 ){ - return 0; /* restriction (3) */ + return 0; /* restriction (3) */ } while( pWhere->op==TK_AND ){ nChng += pushDownWhereTerms(db, pSubq, pWhere->pRight, iCursor); @@ -115604,6 +118588,13 @@ SQLITE_PRIVATE int sqlite3Select( ** the sDistinct.isTnct is still set. Hence, isTnct represents the ** original setting of the SF_Distinct flag, not the current setting */ assert( sDistinct.isTnct ); + +#if SELECTTRACE_ENABLED + if( sqlite3SelectTrace & 0x400 ){ + SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n")); + sqlite3TreeViewSelect(0, p, 0); + } +#endif } /* If there is an ORDER BY clause, then create an ephemeral index to @@ -115675,6 +118666,7 @@ SQLITE_PRIVATE int sqlite3Select( } if( sSort.pOrderBy ){ sSort.nOBSat = sqlite3WhereIsOrdered(pWInfo); + sSort.bOrderedInnerLoop = sqlite3WhereOrderedInnerLoop(pWInfo); if( sSort.nOBSat==sSort.pOrderBy->nExpr ){ sSort.pOrderBy = 0; } @@ -117842,7 +120834,8 @@ SQLITE_PRIVATE void sqlite3Update( if( HasRowid(pTab) ){ sqlite3VdbeAddOp3(v, OP_Null, 0, regRowSet, regOldRowid); pWInfo = sqlite3WhereBegin( - pParse, pTabList, pWhere, 0, 0, WHERE_ONEPASS_DESIRED, iIdxCur + pParse, pTabList, pWhere, 0, 0, + WHERE_ONEPASS_DESIRED | WHERE_SEEK_TABLE, iIdxCur ); if( pWInfo==0 ) goto update_cleanup; okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); @@ -118080,11 +121073,30 @@ SQLITE_PRIVATE void sqlite3Update( VdbeCoverageNeverTaken(v); } sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur, aRegIdx, -1); - - /* If changing the record number, delete the old record. */ + + /* If changing the rowid value, or if there are foreign key constraints + ** to process, delete the old record. Otherwise, add a noop OP_Delete + ** to invoke the pre-update hook. + ** + ** That (regNew==regnewRowid+1) is true is also important for the + ** pre-update hook. If the caller invokes preupdate_new(), the returned + ** value is copied from memory cell (regNewRowid+1+iCol), where iCol + ** is the column index supplied by the user. + */ + assert( regNew==regNewRowid+1 ); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + sqlite3VdbeAddOp3(v, OP_Delete, iDataCur, + OPFLAG_ISUPDATE | ((hasFK || chngKey || pPk!=0) ? 0 : OPFLAG_ISNOOP), + regNewRowid + ); + if( !pParse->nested ){ + sqlite3VdbeChangeP4(v, -1, (char*)pTab, P4_TABLE); + } +#else if( hasFK || chngKey || pPk!=0 ){ sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, 0); } +#endif if( bReplace || chngKey ){ sqlite3VdbeJumpHere(v, addr1); } @@ -118424,7 +121436,7 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){ int saved_flags; /* Saved value of the db->flags */ int saved_nChange; /* Saved value of db->nChange */ int saved_nTotalChange; /* Saved value of db->nTotalChange */ - void (*saved_xTrace)(void*,const char*); /* Saved db->xTrace */ + u8 saved_mTrace; /* Saved trace settings */ Db *pDb = 0; /* Database to detach at end of vacuum */ int isMemDb; /* True if vacuuming a :memory: database */ int nRes; /* Bytes of reserved space at the end of each page */ @@ -118445,10 +121457,10 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){ saved_flags = db->flags; saved_nChange = db->nChange; saved_nTotalChange = db->nTotalChange; - saved_xTrace = db->xTrace; + saved_mTrace = db->mTrace; db->flags |= SQLITE_WriteSchema | SQLITE_IgnoreChecks | SQLITE_PreferBuiltin; db->flags &= ~(SQLITE_ForeignKeys | SQLITE_ReverseOrder); - db->xTrace = 0; + db->mTrace = 0; pMain = db->aDb[0].pBt; isMemDb = sqlite3PagerIsMemdb(sqlite3BtreePager(pMain)); @@ -118500,6 +121512,8 @@ SQLITE_PRIVATE int sqlite3RunVacuum(char **pzErrMsg, sqlite3 *db){ } #endif + sqlite3BtreeSetCacheSize(pTemp, db->aDb[0].pSchema->cache_size); + sqlite3BtreeSetSpillSize(pTemp, sqlite3BtreeSetSpillSize(pMain,0)); rc = execSql(db, pzErrMsg, "PRAGMA vacuum_db.synchronous=OFF"); if( rc!=SQLITE_OK ) goto end_of_vacuum; @@ -118648,7 +121662,7 @@ end_of_vacuum: db->flags = saved_flags; db->nChange = saved_nChange; db->nTotalChange = saved_nTotalChange; - db->xTrace = saved_xTrace; + db->mTrace = saved_mTrace; sqlite3BtreeSetPageSize(pMain, -1, -1, 1); /* Currently there is an SQL level transaction open on the vacuum @@ -119097,7 +122111,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ v = sqlite3GetVdbe(pParse); sqlite3ChangeCookie(pParse, iDb); - sqlite3VdbeAddOp2(v, OP_Expire, 0, 0); + sqlite3VdbeAddOp0(v, OP_Expire); zWhere = sqlite3MPrintf(db, "name='%q' AND type='table'", pTab->zName); sqlite3VdbeAddParseSchemaOp(v, iDb, zWhere); @@ -119433,10 +122447,24 @@ SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3 *db, const char *zCre && (pParse->pNewTable->tabFlags & TF_Virtual)==0 ){ if( !pTab->aCol ){ - pTab->aCol = pParse->pNewTable->aCol; - pTab->nCol = pParse->pNewTable->nCol; - pParse->pNewTable->nCol = 0; - pParse->pNewTable->aCol = 0; + Table *pNew = pParse->pNewTable; + Index *pIdx; + pTab->aCol = pNew->aCol; + pTab->nCol = pNew->nCol; + pTab->tabFlags |= pNew->tabFlags & (TF_WithoutRowid|TF_NoVisibleRowid); + pNew->nCol = 0; + pNew->aCol = 0; + assert( pTab->pIndex==0 ); + if( !HasRowid(pNew) && pCtx->pVTable->pMod->pModule->xUpdate!=0 ){ + rc = SQLITE_ERROR; + } + pIdx = pNew->pIndex; + if( pIdx ){ + assert( pIdx->pNext==0 ); + pTab->pIndex = pIdx; + pNew->pIndex = 0; + pIdx->pTable = pTab; + } } pCtx->bDeclared = 1; }else{ @@ -119472,7 +122500,7 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3 *db, int iDb, const char *zTab Table *pTab; pTab = sqlite3FindTable(db, zTab, db->aDb[iDb].zName); - if( ALWAYS(pTab!=0 && pTab->pVTable!=0) ){ + if( pTab!=0 && ALWAYS(pTab->pVTable!=0) ){ VTable *p; int (*xDestroy)(sqlite3_vtab *); for(p=pTab->pVTable; p; p=p->pNext){ @@ -119612,7 +122640,10 @@ SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *db, VTable *pVTab){ if( rc==SQLITE_OK ){ int iSvpt = db->nStatement + db->nSavepoint; addToVTrans(db, pVTab); - if( iSvpt ) rc = sqlite3VtabSavepoint(db, SAVEPOINT_BEGIN, iSvpt-1); + if( iSvpt && pModule->xSavepoint ){ + pVTab->iSavepoint = iSvpt; + rc = pModule->xSavepoint(pVTab->pVtab, iSvpt-1); + } } } } @@ -119766,7 +122797,7 @@ SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){ } /* -** Check to see if virtual tale module pMod can be have an eponymous +** Check to see if virtual table module pMod can be have an eponymous ** virtual table instance. If it can, create one if one does not already ** exist. Return non-zero if the eponymous virtual table instance exists ** when this routine returns, and return zero if it does not exist. @@ -119783,17 +122814,18 @@ SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){ const sqlite3_module *pModule = pMod->pModule; Table *pTab; char *zErr = 0; - int nName; int rc; sqlite3 *db = pParse->db; if( pMod->pEpoTab ) return 1; if( pModule->xCreate!=0 && pModule->xCreate!=pModule->xConnect ) return 0; - nName = sqlite3Strlen30(pMod->zName) + 1; - pTab = sqlite3DbMallocZero(db, sizeof(Table) + nName); + pTab = sqlite3DbMallocZero(db, sizeof(Table)); if( pTab==0 ) return 0; + pTab->zName = sqlite3DbStrDup(db, pMod->zName); + if( pTab->zName==0 ){ + sqlite3DbFree(db, pTab); + return 0; + } pMod->pEpoTab = pTab; - pTab->zName = (char*)&pTab[1]; - memcpy(pTab->zName, pMod->zName, nName); pTab->nRef = 1; pTab->pSchema = db->aDb[0].pSchema; pTab->tabFlags |= TF_Virtual; @@ -119819,9 +122851,11 @@ SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){ SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3 *db, Module *pMod){ Table *pTab = pMod->pEpoTab; if( pTab!=0 ){ - sqlite3DeleteColumnNames(db, pTab); - sqlite3VtabClear(db, pTab); - sqlite3DbFree(db, pTab); + /* Mark the table as Ephemeral prior to deleting it, so that the + ** sqlite3DeleteTable() routine will know that it is not stored in + ** the schema. */ + pTab->tabFlags |= TF_Ephemeral; + sqlite3DeleteTable(db, pTab); pMod->pEpoTab = 0; } } @@ -119980,7 +123014,7 @@ struct WhereLevel { int addrFirst; /* First instruction of interior of the loop */ int addrBody; /* Beginning of the body of this loop */ #ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS - int iLikeRepCntr; /* LIKE range processing counter register */ + u32 iLikeRepCntr; /* LIKE range processing counter register (times 2) */ int addrLikeRep; /* LIKE range processing address */ #endif u8 iFrom; /* Which entry in the FROM clause */ @@ -120318,7 +123352,7 @@ struct WhereInfo { Parse *pParse; /* Parsing and code generating context */ SrcList *pTabList; /* List of tables in the join */ ExprList *pOrderBy; /* The ORDER BY clause or NULL */ - ExprList *pResultSet; /* Result set. DISTINCT operates on these */ + ExprList *pDistinctSet; /* DISTINCT over all these values */ WhereLoop *pLoops; /* List of all WhereLoop objects */ Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ LogEst nRowOut; /* Estimated number of output rows */ @@ -120328,8 +123362,9 @@ struct WhereInfo { u8 sorted; /* True if really sorted (not just grouped) */ u8 eOnePass; /* ONEPASS_OFF, or _SINGLE, or _MULTI */ u8 untestedTerms; /* Not all WHERE terms resolved by outer loop */ - u8 eDistinct; /* One of the WHERE_DISTINCT_* values below */ + u8 eDistinct; /* One of the WHERE_DISTINCT_* values */ u8 nLevel; /* Number of nested loop */ + u8 bOrderedInnerLoop; /* True if only the inner-most loop is ordered */ int iTop; /* The very beginning of the WHERE loop */ int iContinue; /* Jump here to continue with next record */ int iBreak; /* Jump here to break out of the loop */ @@ -120346,6 +123381,9 @@ struct WhereInfo { ** where.c: */ SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int); +#ifdef WHERETRACE_ENABLED +SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC); +#endif SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm( WhereClause *pWC, /* The WHERE clause to be searched */ int iCur, /* Cursor number of LHS */ @@ -120402,6 +123440,14 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, struct SrcList_item*, WhereC ** operators that are of interest to the query planner. An ** OR-ed combination of these values can be used when searching for ** particular WhereTerms within a WhereClause. +** +** Value constraints: +** WO_EQ == SQLITE_INDEX_CONSTRAINT_EQ +** WO_LT == SQLITE_INDEX_CONSTRAINT_LT +** WO_LE == SQLITE_INDEX_CONSTRAINT_LE +** WO_GT == SQLITE_INDEX_CONSTRAINT_GT +** WO_GE == SQLITE_INDEX_CONSTRAINT_GE +** WO_MATCH == SQLITE_INDEX_CONSTRAINT_MATCH */ #define WO_IN 0x0001 #define WO_EQ 0x0002 @@ -120554,7 +123600,7 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( pLoop = pLevel->pWLoop; flags = pLoop->wsFlags; - if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_ONETABLE_ONLY) ) return 0; + if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_OR_SUBCLAUSE) ) return 0; isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0 || ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0)) @@ -120988,15 +124034,16 @@ static int codeAllEqualityTerms( #ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS /* -** If the most recently coded instruction is a constant range contraint -** that originated from the LIKE optimization, then change the P3 to be -** pLoop->iLikeRepCntr and set P5. +** If the most recently coded instruction is a constant range constraint +** (a string literal) that originated from the LIKE optimization, then +** set P3 and P5 on the OP_String opcode so that the string will be cast +** to a BLOB at appropriate times. ** ** The LIKE optimization trys to evaluate "x LIKE 'abc%'" as a range ** expression: "x>='ABC' AND x<'abd'". But this requires that the range ** scan loop run twice, once for strings and a second time for BLOBs. ** The OP_String opcodes on the second pass convert the upper and lower -** bound string contants to blobs. This routine makes the necessary changes +** bound string constants to blobs. This routine makes the necessary changes ** to the OP_String opcodes for that to happen. ** ** Except, of course, if SQLITE_LIKE_DOESNT_MATCH_BLOBS is defined, then @@ -121015,8 +124062,8 @@ static void whereLikeOptimizationStringFixup( assert( pOp!=0 ); assert( pOp->opcode==OP_String8 || pTerm->pWC->pWInfo->pParse->db->mallocFailed ); - pOp->p3 = pLevel->iLikeRepCntr; - pOp->p5 = 1; + pOp->p3 = (int)(pLevel->iLikeRepCntr>>1); /* Register holding counter */ + pOp->p5 = (u8)(pLevel->iLikeRepCntr&1); /* ASC or DESC */ } } #else @@ -121053,6 +124100,38 @@ static int codeCursorHintCheckExpr(Walker *pWalker, Expr *pExpr){ return WRC_Continue; } +/* +** Test whether or not expression pExpr, which was part of a WHERE clause, +** should be included in the cursor-hint for a table that is on the rhs +** of a LEFT JOIN. Set Walker.eCode to non-zero before returning if the +** expression is not suitable. +** +** An expression is unsuitable if it might evaluate to non NULL even if +** a TK_COLUMN node that does affect the value of the expression is set +** to NULL. For example: +** +** col IS NULL +** col IS NOT NULL +** coalesce(col, 1) +** CASE WHEN col THEN 0 ELSE 1 END +*/ +static int codeCursorHintIsOrFunction(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_IS + || pExpr->op==TK_ISNULL || pExpr->op==TK_ISNOT + || pExpr->op==TK_NOTNULL || pExpr->op==TK_CASE + ){ + pWalker->eCode = 1; + }else if( pExpr->op==TK_FUNCTION ){ + int d1; + char d2[3]; + if( 0==sqlite3IsLikeFunction(pWalker->pParse->db, pExpr, &d1, d2) ){ + pWalker->eCode = 1; + } + } + + return WRC_Continue; +} + /* ** This function is called on every node of an expression tree used as an @@ -121105,6 +124184,7 @@ static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){ ** Insert an OP_CursorHint instruction if it is appropriate to do so. */ static void codeCursorHint( + struct SrcList_item *pTabItem, /* FROM clause item */ WhereInfo *pWInfo, /* The where clause */ WhereLevel *pLevel, /* Which loop to provide hints for */ WhereTerm *pEndRange /* Hint this end-of-scan boundary term if not NULL */ @@ -121135,7 +124215,42 @@ static void codeCursorHint( pTerm = &pWC->a[i]; if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; if( pTerm->prereqAll & pLevel->notReady ) continue; - if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) continue; + + /* Any terms specified as part of the ON(...) clause for any LEFT + ** JOIN for which the current table is not the rhs are omitted + ** from the cursor-hint. + ** + ** If this table is the rhs of a LEFT JOIN, "IS" or "IS NULL" terms + ** that were specified as part of the WHERE clause must be excluded. + ** This is to address the following: + ** + ** SELECT ... t1 LEFT JOIN t2 ON (t1.a=t2.b) WHERE t2.c IS NULL; + ** + ** Say there is a single row in t2 that matches (t1.a=t2.b), but its + ** t2.c values is not NULL. If the (t2.c IS NULL) constraint is + ** pushed down to the cursor, this row is filtered out, causing + ** SQLite to synthesize a row of NULL values. Which does match the + ** WHERE clause, and so the query returns a row. Which is incorrect. + ** + ** For the same reason, WHERE terms such as: + ** + ** WHERE 1 = (t2.c IS NULL) + ** + ** are also excluded. See codeCursorHintIsOrFunction() for details. + */ + if( pTabItem->fg.jointype & JT_LEFT ){ + Expr *pExpr = pTerm->pExpr; + if( !ExprHasProperty(pExpr, EP_FromJoin) + || pExpr->iRightJoinTable!=pTabItem->iCursor + ){ + sWalker.eCode = 0; + sWalker.xExprCallback = codeCursorHintIsOrFunction; + sqlite3WalkExpr(&sWalker, pTerm->pExpr); + if( sWalker.eCode ) continue; + } + }else{ + if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) continue; + } /* All terms in pWLoop->aLTerm[] except pEndRange are used to initialize ** the cursor. These terms are not needed as hints for a pure range @@ -121169,7 +124284,7 @@ static void codeCursorHint( } } #else -# define codeCursorHint(A,B,C) /* No-op */ +# define codeCursorHint(A,B,C,D) /* No-op */ #endif /* SQLITE_ENABLE_CURSOR_HINTS */ /* @@ -121203,7 +124318,7 @@ static void codeDeferredSeek( assert( pIdx->aiColumn[pIdx->nColumn-1]==-1 ); sqlite3VdbeAddOp3(v, OP_Seek, iIdxCur, 0, iCur); - if( (pWInfo->wctrlFlags & WHERE_FORCE_TABLE) + if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE) && DbMaskAllZero(sqlite3ParseToplevel(pParse)->writeMask) ){ int i; @@ -121258,7 +124373,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); bRev = (pWInfo->revMask>>iLevel)&1; omitTable = (pLoop->wsFlags & WHERE_IDX_ONLY)!=0 - && (pWInfo->wctrlFlags & WHERE_FORCE_TABLE)==0; + && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0; VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName)); /* Create labels for the "break" and "continue" instructions @@ -121398,8 +124513,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg); if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg); addrNxt = pLevel->addrNxt; - sqlite3VdbeAddOp2(v, OP_MustBeInt, iRowidReg, addrNxt); VdbeCoverage(v); - sqlite3VdbeAddOp3(v, OP_NotExists, iCur, addrNxt, iRowidReg); + sqlite3VdbeAddOp3(v, OP_SeekRowid, iCur, addrNxt, iRowidReg); VdbeCoverage(v); sqlite3ExprCacheAffinityChange(pParse, iRowidReg, 1); sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); @@ -121426,7 +124540,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( pStart = pEnd; pEnd = pTerm; } - codeCursorHint(pWInfo, pLevel, pEnd); + codeCursorHint(pTabItem, pWInfo, pLevel, pEnd); if( pStart ){ Expr *pX; /* The expression that defines the start bound */ int r1, rTemp; /* Registers for holding the start boundary */ @@ -121603,14 +124717,17 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( (pRangeEnd->wtFlags & TERM_LIKEOPT)!=0 ){ assert( pRangeStart!=0 ); /* LIKE opt constraints */ assert( pRangeStart->wtFlags & TERM_LIKEOPT ); /* occur in pairs */ - pLevel->iLikeRepCntr = ++pParse->nMem; - testcase( bRev ); - testcase( pIdx->aSortOrder[nEq]==SQLITE_SO_DESC ); - sqlite3VdbeAddOp2(v, OP_Integer, - bRev ^ (pIdx->aSortOrder[nEq]==SQLITE_SO_DESC), - pLevel->iLikeRepCntr); + pLevel->iLikeRepCntr = (u32)++pParse->nMem; + sqlite3VdbeAddOp2(v, OP_Integer, 1, (int)pLevel->iLikeRepCntr); VdbeComment((v, "LIKE loop counter")); pLevel->addrLikeRep = sqlite3VdbeCurrentAddr(v); + /* iLikeRepCntr actually stores 2x the counter register number. The + ** bottom bit indicates whether the search order is ASC or DESC. */ + testcase( bRev ); + testcase( pIdx->aSortOrder[nEq]==SQLITE_SO_DESC ); + assert( (bRev & ~1)==0 ); + pLevel->iLikeRepCntr <<=1; + pLevel->iLikeRepCntr |= bRev ^ (pIdx->aSortOrder[nEq]==SQLITE_SO_DESC); } #endif if( pRangeStart==0 @@ -121637,7 +124754,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** and store the values of those terms in an array of registers ** starting at regBase. */ - codeCursorHint(pWInfo, pLevel, pRangeEnd); + codeCursorHint(pTabItem, pWInfo, pLevel, pRangeEnd); regBase = codeAllEqualityTerms(pParse,pLevel,bRev,nExtraReg,&zStartAff); assert( zStartAff==0 || sqlite3Strlen30(zStartAff)>=nEq ); if( zStartAff ) cEndAff = zStartAff[nEq]; @@ -121676,6 +124793,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } nConstraint++; testcase( pRangeStart->wtFlags & TERM_VIRTUAL ); + bSeekPastNull = 0; }else if( bSeekPastNull ){ sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq); nConstraint++; @@ -121748,7 +124866,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( omitTable ){ /* pIdx is a covering index. No need to access the main table. */ }else if( HasRowid(pIdx->pTable) ){ - if( pWInfo->eOnePass!=ONEPASS_OFF ){ + if( (pWInfo->wctrlFlags & WHERE_SEEK_TABLE)!=0 ){ iRowidReg = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg); sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg); @@ -121941,10 +125059,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** eliminating duplicates from other WHERE clauses, the action for each ** sub-WHERE clause is to to invoke the main loop body as a subroutine. */ - wctrlFlags = WHERE_OMIT_OPEN_CLOSE - | WHERE_FORCE_TABLE - | WHERE_ONETABLE_ONLY - | WHERE_NO_AUTOINDEX; + wctrlFlags = WHERE_OR_SUBCLAUSE | (pWInfo->wctrlFlags & WHERE_SEEK_TABLE); for(ii=0; iinTerm; ii++){ WhereTerm *pOrTerm = &pOrWc->a[ii]; if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){ @@ -122052,7 +125167,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ){ assert( pSubWInfo->a[0].iIdxCur==iCovCur ); pCov = pSubLoop->u.btree.pIndex; - wctrlFlags |= WHERE_REOPEN_IDX; }else{ pCov = 0; } @@ -122089,7 +125203,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** a pseudo-cursor. No need to Rewind or Next such cursors. */ pLevel->op = OP_Noop; }else{ - codeCursorHint(pWInfo, pLevel, 0); + codeCursorHint(pTabItem, pWInfo, pLevel, 0); pLevel->op = aStep[bRev]; pLevel->p1 = iCur; pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrBrk); @@ -122114,7 +125228,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue; if( (pTerm->prereqAll & pLevel->notReady)!=0 ){ testcase( pWInfo->untestedTerms==0 - && (pWInfo->wctrlFlags & WHERE_ONETABLE_ONLY)!=0 ); + && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 ); pWInfo->untestedTerms = 1; continue; } @@ -122124,11 +125238,17 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( continue; } if( pTerm->wtFlags & TERM_LIKECOND ){ + /* If the TERM_LIKECOND flag is set, that means that the range search + ** is sufficient to guarantee that the LIKE operator is true, so we + ** can skip the call to the like(A,B) function. But this only works + ** for strings. So do not skip the call to the function on the pass + ** that compares BLOBs. */ #ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS continue; #else - assert( pLevel->iLikeRepCntr>0 ); - skipLikeAddr = sqlite3VdbeAddOp1(v, OP_IfNot, pLevel->iLikeRepCntr); + u32 x = pLevel->iLikeRepCntr; + assert( x>0 ); + skipLikeAddr = sqlite3VdbeAddOp1(v, (x&1)? OP_IfNot : OP_If, (int)(x>>1)); VdbeCoverage(v); #endif } @@ -122770,7 +125890,9 @@ static void exprAnalyzeOrTerm( if( !db->mallocFailed ){ for(j=0, pAndTerm=pAndWC->a; jnTerm; j++, pAndTerm++){ assert( pAndTerm->pExpr ); - if( allowedOp(pAndTerm->pExpr->op) ){ + if( allowedOp(pAndTerm->pExpr->op) + || pAndTerm->eOperator==WO_MATCH + ){ b |= sqlite3WhereGetMask(&pWInfo->sMaskSet, pAndTerm->leftCursor); } } @@ -122985,12 +126107,10 @@ static int termIsEquivalence(Parse *pParse, Expr *pExpr){ pColl = sqlite3BinaryCompareCollSeq(pParse, pExpr->pLeft, pExpr->pRight); if( pColl==0 || sqlite3StrICmp(pColl->zName, "BINARY")==0 ) return 1; pColl = sqlite3ExprCollSeq(pParse, pExpr->pLeft); - /* Since pLeft and pRight are both a column references, their collating - ** sequence should always be defined. */ - zColl1 = ALWAYS(pColl) ? pColl->zName : 0; + zColl1 = pColl ? pColl->zName : 0; pColl = sqlite3ExprCollSeq(pParse, pExpr->pRight); - zColl2 = ALWAYS(pColl) ? pColl->zName : 0; - return sqlite3StrICmp(zColl1, zColl2)==0; + zColl2 = pColl ? pColl->zName : 0; + return sqlite3_stricmp(zColl1, zColl2)==0; } /* @@ -123324,7 +126444,7 @@ static void exprAnalyze( ** virtual tables. The native query optimizer does not attempt ** to do anything with MATCH functions. */ - if( isMatchOfColumn(pExpr, &eOp2) ){ + if( pWC->op==TK_AND && isMatchOfColumn(pExpr, &eOp2) ){ int idxNew; Expr *pRight, *pLeft; WhereTerm *pNewTerm; @@ -123484,10 +126604,10 @@ SQLITE_PRIVATE Bitmask sqlite3WhereExprUsage(WhereMaskSet *pMaskSet, Expr *p){ return mask; } mask = sqlite3WhereExprUsage(pMaskSet, p->pRight); - mask |= sqlite3WhereExprUsage(pMaskSet, p->pLeft); + if( p->pLeft ) mask |= sqlite3WhereExprUsage(pMaskSet, p->pLeft); if( ExprHasProperty(p, EP_xIsSelect) ){ mask |= exprSelectUsage(pMaskSet, p->x.pSelect); - }else{ + }else if( p->x.pList ){ mask |= sqlite3WhereExprListUsage(pMaskSet, p->x.pList); } return mask; @@ -123617,6 +126737,18 @@ SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){ return pWInfo->nOBSat; } +/* +** Return TRUE if the innermost loop of the WHERE clause implementation +** returns rows in ORDER BY order for complete run of the inner loop. +** +** Across multiple iterations of outer loops, the output rows need not be +** sorted. As long as rows are sorted for just the innermost loop, this +** routine can return TRUE. +*/ +SQLITE_PRIVATE int sqlite3WhereOrderedInnerLoop(WhereInfo *pWInfo){ + return pWInfo->bOrderedInnerLoop; +} + /* ** Return the VDBE address or label to jump to in order to continue ** immediately with the next row of a WHERE clause. @@ -123827,7 +126959,10 @@ static WhereTerm *whereScanNext(WhereScan *pScan){ ** ** The scanner will be searching the WHERE clause pWC. It will look ** for terms of the form "X " where X is column iColumn of table -** iCur. The must be one of the operators described by opMask. +** iCur. Or if pIdx!=0 then X is column iColumn of index pIdx. pIdx +** must be one of the indexes of table iCur. +** +** The must be one of the operators described by opMask. ** ** If the search is for X and the WHERE clause contains terms of the ** form X=Y then this routine might also return terms of the form @@ -123875,11 +127010,12 @@ static WhereTerm *whereScanInit( /* ** Search for a term in the WHERE clause that is of the form "X " -** where X is a reference to the iColumn of table iCur and is one of -** the WO_xx operator codes specified by the op parameter. -** Return a pointer to the term. Return 0 if not found. +** where X is a reference to the iColumn of table iCur or of index pIdx +** if pIdx!=0 and is one of the WO_xx operator codes specified by +** the op parameter. Return a pointer to the term. Return 0 if not found. ** -** If pIdx!=0 then search for terms matching the iColumn-th column of pIdx +** If pIdx!=0 then it must be one of the indexes of table iCur. +** Search for terms matching the iColumn-th column of pIdx ** rather than the iColumn-th column of table iCur. ** ** The term returned might by Y= if there is another constraint in @@ -125198,30 +128334,53 @@ static void whereTermPrint(WhereTerm *pTerm, int iTerm){ sqlite3DebugPrintf("TERM-%-3d NULL\n", iTerm); }else{ char zType[4]; + char zLeft[50]; memcpy(zType, "...", 4); if( pTerm->wtFlags & TERM_VIRTUAL ) zType[0] = 'V'; if( pTerm->eOperator & WO_EQUIV ) zType[1] = 'E'; if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) zType[2] = 'L'; + if( pTerm->eOperator & WO_SINGLE ){ + sqlite3_snprintf(sizeof(zLeft),zLeft,"left={%d:%d}", + pTerm->leftCursor, pTerm->u.leftColumn); + }else if( (pTerm->eOperator & WO_OR)!=0 && pTerm->u.pOrInfo!=0 ){ + sqlite3_snprintf(sizeof(zLeft),zLeft,"indexable=0x%lld", + pTerm->u.pOrInfo->indexable); + }else{ + sqlite3_snprintf(sizeof(zLeft),zLeft,"left=%d", pTerm->leftCursor); + } sqlite3DebugPrintf( - "TERM-%-3d %p %s cursor=%-3d prob=%-3d op=0x%03x wtFlags=0x%04x\n", - iTerm, pTerm, zType, pTerm->leftCursor, pTerm->truthProb, + "TERM-%-3d %p %s %-12s prob=%-3d op=0x%03x wtFlags=0x%04x\n", + iTerm, pTerm, zType, zLeft, pTerm->truthProb, pTerm->eOperator, pTerm->wtFlags); sqlite3TreeViewExpr(0, pTerm->pExpr, 0); } } #endif +#ifdef WHERETRACE_ENABLED +/* +** Show the complete content of a WhereClause +*/ +SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ + int i; + for(i=0; inTerm; i++){ + whereTermPrint(&pWC->a[i], i); + } +} +#endif + #ifdef WHERETRACE_ENABLED /* ** Print a WhereLoop object for debugging purposes */ static void whereLoopPrint(WhereLoop *p, WhereClause *pWC){ WhereInfo *pWInfo = pWC->pWInfo; - int nb = 1+(pWInfo->pTabList->nSrc+7)/8; + int nb = 1+(pWInfo->pTabList->nSrc+3)/4; struct SrcList_item *pItem = pWInfo->pTabList->a + p->iTab; Table *pTab = pItem->pTab; + Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, - p->iTab, nb, p->maskSelf, nb, p->prereq); + p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); sqlite3DebugPrintf(" %12s", pItem->zAlias ? pItem->zAlias : pTab->zName); if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ @@ -126196,7 +129355,7 @@ static int whereLoopAddBtree( #ifndef SQLITE_OMIT_AUTOMATIC_INDEX /* Automatic indexes */ if( !pBuilder->pOrSet /* Not part of an OR optimization */ - && (pWInfo->wctrlFlags & WHERE_NO_AUTOINDEX)==0 + && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 && (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0 && pSrc->pIBIndex==0 /* Has no INDEXED BY clause */ && !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */ @@ -126228,6 +129387,7 @@ static int whereLoopAddBtree( pNew->rSetup += 24; } ApplyCostMultiplier(pNew->rSetup, pTab->costMult); + if( pNew->rSetup<0 ) pNew->rSetup = 0; /* TUNING: Each index lookup yields 20 rows in the table. This ** is more than the usual guess of 10 rows, since we have no way ** of knowing how selective the index will ultimately be. It would @@ -126288,6 +129448,7 @@ static int whereLoopAddBtree( /* Full scan via index */ if( b || !HasRowid(pTab) + || pProbe->pPartIdxWhere!=0 || ( m==0 && pProbe->bUnordered==0 && (pProbe->szIdxRowszTabRow) @@ -126300,11 +129461,34 @@ static int whereLoopAddBtree( /* The cost of visiting the index rows is N*K, where K is ** between 1.1 and 3.0, depending on the relative sizes of the - ** index and table rows. If this is a non-covering index scan, - ** also add the cost of visiting table rows (N*3.0). */ + ** index and table rows. */ pNew->rRun = rSize + 1 + (15*pProbe->szIdxRow)/pTab->szTabRow; if( m!=0 ){ - pNew->rRun = sqlite3LogEstAdd(pNew->rRun, rSize+16); + /* If this is a non-covering index scan, add in the cost of + ** doing table lookups. The cost will be 3x the number of + ** lookups. Take into account WHERE clause terms that can be + ** satisfied using just the index, and that do not require a + ** table lookup. */ + LogEst nLookup = rSize + 16; /* Base cost: N*3 */ + int ii; + int iCur = pSrc->iCursor; + WhereClause *pWC2 = &pWInfo->sWC; + for(ii=0; iinTerm; ii++){ + WhereTerm *pTerm = &pWC2->a[ii]; + if( !sqlite3ExprCoveredByIndex(pTerm->pExpr, iCur, pProbe) ){ + break; + } + /* pTerm can be evaluated using just the index. So reduce + ** the expected number of table lookups accordingly */ + if( pTerm->truthProb<=0 ){ + nLookup += pTerm->truthProb; + }else{ + nLookup--; + if( pTerm->eOperator & (WO_EQ|WO_IS) ) nLookup -= 19; + } + } + + pNew->rRun = sqlite3LogEstAdd(pNew->rRun, nLookup); } ApplyCostMultiplier(pNew->rRun, pTab->costMult); whereLoopOutputAdjust(pWC, pNew, rSize); @@ -126673,9 +129857,7 @@ static int whereLoopAddOr( WHERETRACE(0x200, ("OR-term %d of %p has %d subterms:\n", (int)(pOrTerm-pOrWC->a), pTerm, sSubBuild.pWC->nTerm)); if( sqlite3WhereTrace & 0x400 ){ - for(i=0; inTerm; i++){ - whereTermPrint(&sSubBuild.pWC->a[i], i); - } + sqlite3WhereClausePrint(sSubBuild.pWC); } #endif #ifndef SQLITE_OMIT_VIRTUALTABLE @@ -126768,6 +129950,7 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ mPrereq = mPrior; } priorJointype = pItem->fg.jointype; +#ifndef SQLITE_OMIT_VIRTUALTABLE if( IsVirtual(pItem->pTab) ){ struct SrcList_item *p; for(p=&pItem[1]; paLoop[] */ WhereLoop *pLast, /* Add this WhereLoop to the end of pPath->aLoop[] */ Bitmask *pRevMask /* OUT: Mask of WhereLoops to run in reverse order */ @@ -126822,6 +130007,7 @@ static i8 wherePathSatisfiesOrderBy( u8 isOrderDistinct; /* All prior WhereLoops are order-distinct */ u8 distinctColumns; /* True if the loop has UNIQUE NOT NULL columns */ u8 isMatch; /* iColumn matches a term of the ORDER BY clause */ + u16 eqOpMask; /* Allowed equality operators */ u16 nKeyCol; /* Number of key columns in pIndex */ u16 nColumn; /* Total number of ordered columns in the index */ u16 nOrderBy; /* Number terms in the ORDER BY clause */ @@ -126872,9 +130058,16 @@ static i8 wherePathSatisfiesOrderBy( obDone = MASKBIT(nOrderBy)-1; orderDistinctMask = 0; ready = 0; + eqOpMask = WO_EQ | WO_IS | WO_ISNULL; + if( wctrlFlags & WHERE_ORDERBY_LIMIT ) eqOpMask |= WO_IN; for(iLoop=0; isOrderDistinct && obSat0 ) ready |= pLoop->maskSelf; - pLoop = iLoopaLoop[iLoop] : pLast; + if( iLoopaLoop[iLoop]; + if( wctrlFlags & WHERE_ORDERBY_LIMIT ) continue; + }else{ + pLoop = pLast; + } if( pLoop->wsFlags & WHERE_VIRTUALTABLE ){ if( pLoop->u.vtab.isOrdered ) obSat = obDone; break; @@ -126892,7 +130085,7 @@ static i8 wherePathSatisfiesOrderBy( if( pOBExpr->op!=TK_COLUMN ) continue; if( pOBExpr->iTable!=iCur ) continue; pTerm = sqlite3WhereFindTerm(&pWInfo->sWC, iCur, pOBExpr->iColumn, - ~ready, WO_EQ|WO_ISNULL|WO_IS, 0); + ~ready, eqOpMask, 0); if( pTerm==0 ) continue; if( (pTerm->eOperator&(WO_EQ|WO_IS))!=0 && pOBExpr->iColumn>=0 ){ const char *z1, *z2; @@ -126932,10 +130125,12 @@ static i8 wherePathSatisfiesOrderBy( for(j=0; ju.btree.nEq && pLoop->nSkip==0 - && ((i = pLoop->aLTerm[j]->eOperator) & (WO_EQ|WO_ISNULL|WO_IS))!=0 + && ((i = pLoop->aLTerm[j]->eOperator) & eqOpMask)!=0 ){ if( i & WO_ISNULL ){ testcase( isOrderDistinct ); @@ -127446,9 +130641,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ && nRowEst ){ Bitmask notUsed; - int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pResultSet, pFrom, + int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pDistinctSet, pFrom, WHERE_DISTINCTBY, nLoop-1, pFrom->aLoop[nLoop-1], ¬Used); - if( rc==pWInfo->pResultSet->nExpr ){ + if( rc==pWInfo->pDistinctSet->nExpr ){ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; } } @@ -127459,8 +130654,19 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ } }else{ pWInfo->nOBSat = pFrom->isOrdered; - if( pWInfo->nOBSat<0 ) pWInfo->nOBSat = 0; pWInfo->revMask = pFrom->revLoop; + if( pWInfo->nOBSat<=0 ){ + pWInfo->nOBSat = 0; + if( nLoop>0 && (pFrom->aLoop[nLoop-1]->wsFlags & WHERE_ONEROW)==0 ){ + Bitmask m = 0; + int rc = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, pFrom, + WHERE_ORDERBY_LIMIT, nLoop-1, pFrom->aLoop[nLoop-1], &m); + if( rc==pWInfo->pOrderBy->nExpr ){ + pWInfo->bOrderedInnerLoop = 1; + pWInfo->revMask = m; + } + } + } } if( (pWInfo->wctrlFlags & WHERE_SORTBYGROUP) && pWInfo->nOBSat==pWInfo->pOrderBy->nExpr && nLoop>0 @@ -127508,7 +130714,7 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ Index *pIdx; pWInfo = pBuilder->pWInfo; - if( pWInfo->wctrlFlags & WHERE_FORCE_TABLE ) return 0; + if( pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE ) return 0; assert( pWInfo->pTabList->nSrc>=1 ); pItem = pWInfo->pTabList->a; pTab = pItem->pTab; @@ -127655,7 +130861,7 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ ** is called from an UPDATE or DELETE statement, then pOrderBy is NULL. ** ** The iIdxCur parameter is the cursor number of an index. If -** WHERE_ONETABLE_ONLY is set, iIdxCur is the cursor number of an index +** WHERE_OR_SUBCLAUSE is set, iIdxCur is the cursor number of an index ** to use for OR clause processing. The WHERE clause should use this ** specific cursor. If WHERE_ONEPASS_DESIRED is set, then iIdxCur is ** the first cursor in an array of cursors for all indices. iIdxCur should @@ -127663,14 +130869,14 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ ** used. */ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( - Parse *pParse, /* The parser context */ - SrcList *pTabList, /* FROM clause: A list of all tables to be scanned */ - Expr *pWhere, /* The WHERE clause */ - ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */ - ExprList *pResultSet, /* Result set of the query */ - u16 wctrlFlags, /* One of the WHERE_* flags defined in sqliteInt.h */ - int iAuxArg /* If WHERE_ONETABLE_ONLY is set, index cursor number, - ** If WHERE_USE_LIMIT, then the limit amount */ + Parse *pParse, /* The parser context */ + SrcList *pTabList, /* FROM clause: A list of all tables to be scanned */ + Expr *pWhere, /* The WHERE clause */ + ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */ + ExprList *pDistinctSet, /* Try not to output two rows that duplicate these */ + u16 wctrlFlags, /* The WHERE_* flags defined in sqliteInt.h */ + int iAuxArg /* If WHERE_OR_SUBCLAUSE is set, index cursor number + ** If WHERE_USE_LIMIT, then the limit amount */ ){ int nByteWInfo; /* Num. bytes allocated for WhereInfo struct */ int nTabList; /* Number of elements in pTabList */ @@ -127688,11 +130894,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( assert( (wctrlFlags & WHERE_ONEPASS_MULTIROW)==0 || ( (wctrlFlags & WHERE_ONEPASS_DESIRED)!=0 - && (wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0 + && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 )); - /* Only one of WHERE_ONETABLE_ONLY or WHERE_USE_LIMIT */ - assert( (wctrlFlags & WHERE_ONETABLE_ONLY)==0 + /* Only one of WHERE_OR_SUBCLAUSE or WHERE_USE_LIMIT */ + assert( (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 || (wctrlFlags & WHERE_USE_LIMIT)==0 ); /* Variable initialization */ @@ -127720,11 +130926,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } /* This function normally generates a nested loop for all tables in - ** pTabList. But if the WHERE_ONETABLE_ONLY flag is set, then we should + ** pTabList. But if the WHERE_OR_SUBCLAUSE flag is set, then we should ** only generate code for the first table in pTabList and assume that ** any cursors associated with subsequent tables are uninitialized. */ - nTabList = (wctrlFlags & WHERE_ONETABLE_ONLY) ? 1 : pTabList->nSrc; + nTabList = (wctrlFlags & WHERE_OR_SUBCLAUSE) ? 1 : pTabList->nSrc; /* Allocate and initialize the WhereInfo structure that will become the ** return value. A single allocation is used to store the WhereInfo @@ -127745,7 +130951,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( pWInfo->pParse = pParse; pWInfo->pTabList = pTabList; pWInfo->pOrderBy = pOrderBy; - pWInfo->pResultSet = pResultSet; + pWInfo->pDistinctSet = pDistinctSet; pWInfo->iBreak = pWInfo->iContinue = sqlite3VdbeMakeLabel(v); pWInfo->wctrlFlags = wctrlFlags; pWInfo->iLimit = iAuxArg; @@ -127800,7 +131006,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** Note that bitmasks are created for all pTabList->nSrc tables in ** pTabList, not just the first nTabList tables. nTabList is normally ** equal to pTabList->nSrc but might be shortened to 1 if the - ** WHERE_ONETABLE_ONLY flag is set. + ** WHERE_OR_SUBCLAUSE flag is set. */ for(ii=0; iinSrc; ii++){ createMask(pMaskSet, pTabList->a[ii].iCursor); @@ -127818,13 +131024,13 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( if( db->mallocFailed ) goto whereBeginError; if( wctrlFlags & WHERE_WANT_DISTINCT ){ - if( isDistinctRedundant(pParse, pTabList, &pWInfo->sWC, pResultSet) ){ + if( isDistinctRedundant(pParse, pTabList, &pWInfo->sWC, pDistinctSet) ){ /* The DISTINCT marking is pointless. Ignore it. */ pWInfo->eDistinct = WHERE_DISTINCT_UNIQUE; }else if( pOrderBy==0 ){ /* Try to ORDER BY the result set to make distinct processing easier */ pWInfo->wctrlFlags |= WHERE_DISTINCTBY; - pWInfo->pOrderBy = pResultSet; + pWInfo->pOrderBy = pDistinctSet; } } @@ -127838,10 +131044,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( sqlite3DebugPrintf(")\n"); } if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */ - int i; - for(i=0; inTerm; i++){ - whereTermPrint(&sWLB.pWC->a[i], i); - } + sqlite3WhereClausePrint(sWLB.pWC); } #endif @@ -127903,10 +131106,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( #endif /* Attempt to omit tables from the join that do not effect the result */ if( pWInfo->nLevel>=2 - && pResultSet!=0 + && pDistinctSet!=0 && OptimizationEnabled(db, SQLITE_OmitNoopJoin) ){ - Bitmask tabUsed = sqlite3WhereExprListUsage(pMaskSet, pResultSet); + Bitmask tabUsed = sqlite3WhereExprListUsage(pMaskSet, pDistinctSet); if( sWLB.pOrderBy ){ tabUsed |= sqlite3WhereExprListUsage(pMaskSet, sWLB.pOrderBy); } @@ -127983,7 +131186,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( }else #endif if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 - && (wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0 ){ + && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 ){ int op = OP_OpenRead; if( pWInfo->eOnePass!=ONEPASS_OFF ){ op = OP_OpenWrite; @@ -128022,7 +131225,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* iAuxArg is always set if to a positive value if ONEPASS is possible */ assert( iAuxArg!=0 || (pWInfo->wctrlFlags & WHERE_ONEPASS_DESIRED)==0 ); if( !HasRowid(pTab) && IsPrimaryKeyIndex(pIx) - && (wctrlFlags & WHERE_ONETABLE_ONLY)!=0 + && (wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 ){ /* This is one term of an OR-optimization using the PRIMARY KEY of a ** WITHOUT ROWID table. No need for a separate index */ @@ -128038,9 +131241,9 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } op = OP_OpenWrite; pWInfo->aiCurOnePass[1] = iIndexCur; - }else if( iAuxArg && (wctrlFlags & WHERE_ONETABLE_ONLY)!=0 ){ + }else if( iAuxArg && (wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 ){ iIndexCur = iAuxArg; - if( wctrlFlags & WHERE_REOPEN_IDX ) op = OP_ReopenIdx; + op = OP_ReopenIdx; }else{ iIndexCur = pParse->nTab++; } @@ -128102,7 +131305,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( pLevel->addrBody = sqlite3VdbeCurrentAddr(v); notReady = sqlite3WhereCodeOneLoopStart(pWInfo, ii, notReady); pWInfo->iContinue = pLevel->addrCont; - if( (wsFlags&WHERE_MULTI_OR)==0 && (wctrlFlags&WHERE_ONETABLE_ONLY)==0 ){ + if( (wsFlags&WHERE_MULTI_OR)==0 && (wctrlFlags&WHERE_OR_SUBCLAUSE)==0 ){ sqlite3WhereAddScanStatus(v, pTabList, pLevel, addrExplain); } } @@ -128172,13 +131375,8 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ } #ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS if( pLevel->addrLikeRep ){ - int op; - if( sqlite3VdbeGetOp(v, pLevel->addrLikeRep-1)->p1 ){ - op = OP_DecrJumpZero; - }else{ - op = OP_JumpZeroIncr; - } - sqlite3VdbeAddOp2(v, op, pLevel->iLikeRepCntr, pLevel->addrLikeRep); + sqlite3VdbeAddOp2(v, OP_DecrJumpZero, (int)(pLevel->iLikeRepCntr>>1), + pLevel->addrLikeRep); VdbeCoverage(v); } #endif @@ -128230,12 +131428,12 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ /* Close all of the cursors that were opened by sqlite3WhereBegin. ** Except, do not close cursors that will be reused by the OR optimization - ** (WHERE_OMIT_OPEN_CLOSE). And do not close the OP_OpenWrite cursors + ** (WHERE_OR_SUBCLAUSE). And do not close the OP_OpenWrite cursors ** created for the ONEPASS optimization. */ if( (pTab->tabFlags & TF_Ephemeral)==0 && pTab->pSelect==0 - && (pWInfo->wctrlFlags & WHERE_OMIT_OPEN_CLOSE)==0 + && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 ){ int ws = pLoop->wsFlags; if( pWInfo->eOnePass==ONEPASS_OFF && (ws & WHERE_IDX_ONLY)==0 ){ @@ -128582,26 +131780,26 @@ static void disableLookaside(Parse *pParse){ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned char -#define YYNOCODE 251 +#define YYNOCODE 252 #define YYACTIONTYPE unsigned short int -#define YYWILDCARD 70 +#define YYWILDCARD 96 #define sqlite3ParserTOKENTYPE Token typedef union { int yyinit; sqlite3ParserTOKENTYPE yy0; - struct LimitVal yy64; - Expr* yy122; - Select* yy159; - IdList* yy180; - struct {int value; int mask;} yy207; - struct LikeOp yy318; - TriggerStep* yy327; - With* yy331; - ExprSpan yy342; - SrcList* yy347; - int yy392; - struct TrigEvent yy410; - ExprList* yy442; + Expr* yy72; + TriggerStep* yy145; + ExprList* yy148; + SrcList* yy185; + ExprSpan yy190; + int yy194; + Select* yy243; + IdList* yy254; + With* yy285; + struct TrigEvent yy332; + struct LimitVal yy354; + struct LikeOp yy392; + struct {int value; int mask;} yy497; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -128611,16 +131809,16 @@ typedef union { #define sqlite3ParserARG_FETCH Parse *pParse = yypParser->pParse #define sqlite3ParserARG_STORE yypParser->pParse = pParse #define YYFALLBACK 1 -#define YYNSTATE 440 -#define YYNRULE 326 -#define YY_MAX_SHIFT 439 -#define YY_MIN_SHIFTREDUCE 649 -#define YY_MAX_SHIFTREDUCE 974 -#define YY_MIN_REDUCE 975 -#define YY_MAX_REDUCE 1300 -#define YY_ERROR_ACTION 1301 -#define YY_ACCEPT_ACTION 1302 -#define YY_NO_ACTION 1303 +#define YYNSTATE 443 +#define YYNRULE 328 +#define YY_MAX_SHIFT 442 +#define YY_MIN_SHIFTREDUCE 653 +#define YY_MAX_SHIFTREDUCE 980 +#define YY_MIN_REDUCE 981 +#define YY_MAX_REDUCE 1308 +#define YY_ERROR_ACTION 1309 +#define YY_ACCEPT_ACTION 1310 +#define YY_NO_ACTION 1311 /************* End control #defines *******************************************/ /* Define the yytestcase() macro to be a no-op if is not already defined @@ -128688,444 +131886,448 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (1499) +#define YY_ACTTAB_COUNT (1507) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 315, 1302, 146, 921, 2, 194, 922, 342, 952, 91, - /* 10 */ 91, 91, 91, 84, 89, 89, 89, 89, 88, 88, - /* 20 */ 87, 87, 87, 86, 339, 87, 87, 87, 86, 339, - /* 30 */ 331, 819, 819, 91, 91, 91, 91, 339, 89, 89, - /* 40 */ 89, 89, 88, 88, 87, 87, 87, 86, 339, 319, - /* 50 */ 933, 933, 92, 93, 83, 831, 834, 823, 823, 90, - /* 60 */ 90, 91, 91, 91, 91, 123, 89, 89, 89, 89, - /* 70 */ 88, 88, 87, 87, 87, 86, 339, 315, 952, 89, - /* 80 */ 89, 89, 89, 88, 88, 87, 87, 87, 86, 339, - /* 90 */ 365, 772, 360, 24, 933, 933, 947, 694, 933, 933, - /* 100 */ 773, 937, 933, 933, 434, 715, 328, 434, 819, 819, - /* 110 */ 203, 160, 278, 391, 273, 390, 190, 933, 933, 370, - /* 120 */ 934, 935, 367, 271, 953, 48, 679, 953, 48, 92, - /* 130 */ 93, 83, 831, 834, 823, 823, 90, 90, 91, 91, - /* 140 */ 91, 91, 123, 89, 89, 89, 89, 88, 88, 87, - /* 150 */ 87, 87, 86, 339, 315, 682, 337, 336, 218, 412, - /* 160 */ 398, 68, 412, 403, 934, 935, 743, 959, 934, 935, - /* 170 */ 810, 937, 934, 935, 957, 221, 958, 88, 88, 87, - /* 180 */ 87, 87, 86, 339, 291, 819, 819, 934, 935, 185, - /* 190 */ 94, 792, 388, 385, 384, 1240, 1240, 792, 804, 960, - /* 200 */ 960, 290, 798, 383, 123, 315, 92, 93, 83, 831, - /* 210 */ 834, 823, 823, 90, 90, 91, 91, 91, 91, 326, - /* 220 */ 89, 89, 89, 89, 88, 88, 87, 87, 87, 86, - /* 230 */ 339, 681, 741, 803, 803, 803, 819, 819, 944, 56, - /* 240 */ 253, 353, 242, 85, 82, 168, 253, 358, 252, 110, - /* 250 */ 96, 233, 397, 698, 677, 683, 683, 92, 93, 83, - /* 260 */ 831, 834, 823, 823, 90, 90, 91, 91, 91, 91, - /* 270 */ 433, 89, 89, 89, 89, 88, 88, 87, 87, 87, - /* 280 */ 86, 339, 315, 434, 439, 651, 396, 57, 733, 733, - /* 290 */ 234, 291, 107, 287, 395, 86, 339, 810, 427, 728, - /* 300 */ 933, 933, 185, 953, 30, 388, 385, 384, 215, 949, - /* 310 */ 434, 933, 933, 819, 819, 697, 383, 162, 161, 407, - /* 320 */ 400, 85, 82, 168, 677, 804, 335, 113, 771, 798, - /* 330 */ 953, 48, 22, 351, 92, 93, 83, 831, 834, 823, - /* 340 */ 823, 90, 90, 91, 91, 91, 91, 870, 89, 89, - /* 350 */ 89, 89, 88, 88, 87, 87, 87, 86, 339, 315, - /* 360 */ 803, 803, 803, 268, 123, 412, 394, 1, 933, 933, - /* 370 */ 934, 935, 933, 933, 85, 82, 168, 232, 5, 343, - /* 380 */ 194, 934, 935, 952, 85, 82, 168, 54, 956, 434, - /* 390 */ 819, 819, 431, 938, 939, 792, 67, 759, 350, 144, - /* 400 */ 166, 770, 123, 896, 889, 955, 348, 288, 758, 953, - /* 410 */ 47, 92, 93, 83, 831, 834, 823, 823, 90, 90, - /* 420 */ 91, 91, 91, 91, 892, 89, 89, 89, 89, 88, - /* 430 */ 88, 87, 87, 87, 86, 339, 315, 113, 934, 935, - /* 440 */ 687, 893, 934, 935, 253, 358, 252, 85, 82, 168, - /* 450 */ 820, 820, 956, 952, 338, 938, 939, 894, 701, 721, - /* 460 */ 359, 289, 233, 397, 434, 349, 434, 819, 819, 955, - /* 470 */ 866, 722, 23, 389, 832, 835, 692, 357, 904, 667, - /* 480 */ 194, 702, 402, 952, 953, 48, 953, 48, 92, 93, - /* 490 */ 83, 831, 834, 823, 823, 90, 90, 91, 91, 91, - /* 500 */ 91, 824, 89, 89, 89, 89, 88, 88, 87, 87, - /* 510 */ 87, 86, 339, 315, 434, 113, 434, 680, 434, 332, - /* 520 */ 434, 408, 889, 356, 380, 940, 401, 720, 948, 864, - /* 530 */ 191, 165, 329, 689, 953, 9, 953, 9, 953, 9, - /* 540 */ 953, 9, 718, 948, 819, 819, 953, 8, 325, 111, - /* 550 */ 327, 153, 224, 952, 410, 113, 189, 337, 336, 913, - /* 560 */ 1295, 852, 75, 1295, 73, 92, 93, 83, 831, 834, - /* 570 */ 823, 823, 90, 90, 91, 91, 91, 91, 359, 89, - /* 580 */ 89, 89, 89, 88, 88, 87, 87, 87, 86, 339, - /* 590 */ 315, 730, 148, 236, 797, 366, 789, 892, 1179, 434, - /* 600 */ 960, 960, 400, 148, 314, 212, 873, 911, 757, 404, - /* 610 */ 872, 300, 320, 434, 893, 311, 237, 271, 405, 953, - /* 620 */ 34, 819, 819, 225, 371, 945, 360, 913, 1296, 113, - /* 630 */ 894, 1296, 417, 953, 35, 1245, 922, 342, 259, 247, - /* 640 */ 290, 315, 92, 93, 83, 831, 834, 823, 823, 90, - /* 650 */ 90, 91, 91, 91, 91, 148, 89, 89, 89, 89, - /* 660 */ 88, 88, 87, 87, 87, 86, 339, 310, 434, 796, - /* 670 */ 434, 240, 819, 819, 266, 911, 876, 876, 373, 346, - /* 680 */ 167, 654, 655, 656, 259, 244, 19, 246, 953, 11, - /* 690 */ 953, 26, 222, 92, 93, 83, 831, 834, 823, 823, - /* 700 */ 90, 90, 91, 91, 91, 91, 757, 89, 89, 89, - /* 710 */ 89, 88, 88, 87, 87, 87, 86, 339, 315, 434, - /* 720 */ 261, 434, 264, 696, 434, 241, 434, 344, 971, 308, - /* 730 */ 757, 434, 796, 434, 324, 434, 393, 423, 434, 953, - /* 740 */ 36, 953, 37, 20, 953, 38, 953, 27, 434, 819, - /* 750 */ 819, 953, 28, 953, 39, 953, 40, 738, 953, 41, - /* 760 */ 71, 738, 737, 245, 307, 973, 737, 259, 953, 10, - /* 770 */ 92, 93, 83, 831, 834, 823, 823, 90, 90, 91, - /* 780 */ 91, 91, 91, 434, 89, 89, 89, 89, 88, 88, - /* 790 */ 87, 87, 87, 86, 339, 315, 434, 372, 434, 259, - /* 800 */ 149, 434, 167, 953, 42, 188, 187, 186, 219, 434, - /* 810 */ 748, 434, 974, 434, 796, 434, 953, 98, 953, 43, - /* 820 */ 862, 953, 44, 434, 920, 2, 819, 819, 757, 953, - /* 830 */ 31, 953, 45, 953, 46, 953, 32, 74, 307, 912, - /* 840 */ 220, 259, 259, 953, 115, 909, 315, 92, 93, 83, - /* 850 */ 831, 834, 823, 823, 90, 90, 91, 91, 91, 91, - /* 860 */ 434, 89, 89, 89, 89, 88, 88, 87, 87, 87, - /* 870 */ 86, 339, 434, 248, 434, 215, 949, 819, 819, 333, - /* 880 */ 953, 116, 895, 860, 176, 259, 974, 400, 361, 259, - /* 890 */ 951, 887, 953, 117, 953, 52, 884, 315, 92, 93, - /* 900 */ 83, 831, 834, 823, 823, 90, 90, 91, 91, 91, - /* 910 */ 91, 434, 89, 89, 89, 89, 88, 88, 87, 87, - /* 920 */ 87, 86, 339, 434, 113, 434, 258, 883, 819, 819, - /* 930 */ 727, 953, 33, 363, 259, 673, 321, 189, 430, 321, - /* 940 */ 368, 365, 364, 953, 99, 953, 49, 365, 315, 92, - /* 950 */ 81, 83, 831, 834, 823, 823, 90, 90, 91, 91, - /* 960 */ 91, 91, 434, 89, 89, 89, 89, 88, 88, 87, - /* 970 */ 87, 87, 86, 339, 434, 723, 434, 214, 165, 819, - /* 980 */ 819, 772, 953, 100, 322, 124, 1269, 158, 65, 710, - /* 990 */ 773, 700, 699, 320, 953, 101, 953, 97, 255, 315, - /* 1000 */ 216, 93, 83, 831, 834, 823, 823, 90, 90, 91, - /* 1010 */ 91, 91, 91, 434, 89, 89, 89, 89, 88, 88, - /* 1020 */ 87, 87, 87, 86, 339, 434, 251, 434, 707, 708, - /* 1030 */ 819, 819, 223, 953, 114, 908, 794, 254, 309, 193, - /* 1040 */ 67, 381, 869, 869, 199, 953, 112, 953, 105, 269, - /* 1050 */ 726, 260, 67, 83, 831, 834, 823, 823, 90, 90, - /* 1060 */ 91, 91, 91, 91, 263, 89, 89, 89, 89, 88, - /* 1070 */ 88, 87, 87, 87, 86, 339, 79, 429, 690, 3, - /* 1080 */ 1174, 228, 434, 113, 340, 340, 868, 868, 265, 79, - /* 1090 */ 429, 735, 3, 859, 70, 432, 434, 340, 340, 434, - /* 1100 */ 1259, 434, 953, 104, 434, 670, 416, 766, 432, 434, - /* 1110 */ 193, 434, 413, 434, 418, 806, 953, 102, 420, 953, - /* 1120 */ 103, 953, 48, 123, 953, 51, 810, 418, 424, 953, - /* 1130 */ 53, 953, 50, 953, 25, 267, 123, 711, 113, 810, - /* 1140 */ 428, 277, 695, 272, 764, 113, 76, 77, 690, 434, - /* 1150 */ 795, 113, 276, 78, 436, 435, 412, 414, 798, 76, - /* 1160 */ 77, 113, 855, 859, 376, 199, 78, 436, 435, 953, - /* 1170 */ 29, 798, 744, 113, 755, 79, 429, 675, 3, 415, - /* 1180 */ 109, 292, 293, 340, 340, 806, 802, 678, 672, 803, - /* 1190 */ 803, 803, 805, 18, 432, 661, 660, 662, 927, 209, - /* 1200 */ 150, 352, 803, 803, 803, 805, 18, 6, 306, 280, - /* 1210 */ 282, 284, 786, 418, 250, 386, 243, 886, 694, 362, - /* 1220 */ 286, 163, 275, 79, 429, 810, 3, 857, 856, 159, - /* 1230 */ 419, 340, 340, 298, 930, 968, 126, 196, 965, 903, - /* 1240 */ 901, 323, 432, 136, 55, 76, 77, 742, 147, 58, - /* 1250 */ 121, 129, 78, 436, 435, 65, 783, 798, 354, 131, - /* 1260 */ 355, 418, 379, 132, 133, 134, 175, 139, 151, 369, - /* 1270 */ 888, 180, 791, 810, 61, 851, 871, 69, 429, 375, - /* 1280 */ 3, 756, 210, 257, 181, 340, 340, 145, 803, 803, - /* 1290 */ 803, 805, 18, 76, 77, 377, 432, 262, 182, 183, - /* 1300 */ 78, 436, 435, 663, 312, 798, 392, 714, 713, 712, - /* 1310 */ 330, 705, 692, 313, 704, 418, 686, 406, 752, 685, - /* 1320 */ 274, 684, 942, 64, 279, 195, 281, 810, 753, 839, - /* 1330 */ 751, 283, 72, 750, 285, 422, 803, 803, 803, 805, - /* 1340 */ 18, 334, 426, 95, 411, 229, 409, 76, 77, 230, - /* 1350 */ 734, 66, 231, 294, 78, 436, 435, 204, 295, 798, - /* 1360 */ 217, 296, 297, 669, 21, 305, 304, 303, 206, 301, - /* 1370 */ 437, 928, 664, 205, 208, 207, 438, 658, 657, 652, - /* 1380 */ 118, 108, 119, 226, 650, 341, 157, 170, 169, 239, - /* 1390 */ 803, 803, 803, 805, 18, 125, 120, 235, 238, 317, - /* 1400 */ 318, 345, 106, 790, 867, 127, 865, 128, 130, 724, - /* 1410 */ 249, 172, 174, 882, 135, 137, 59, 138, 173, 60, - /* 1420 */ 885, 123, 171, 177, 178, 881, 7, 12, 179, 256, - /* 1430 */ 874, 140, 193, 962, 374, 141, 666, 152, 378, 276, - /* 1440 */ 184, 382, 142, 122, 62, 13, 387, 703, 270, 14, - /* 1450 */ 63, 227, 809, 808, 837, 732, 15, 841, 736, 4, - /* 1460 */ 765, 211, 399, 164, 213, 143, 760, 201, 70, 316, - /* 1470 */ 67, 838, 836, 891, 198, 192, 16, 197, 890, 917, - /* 1480 */ 154, 17, 202, 421, 918, 155, 200, 156, 425, 840, - /* 1490 */ 807, 1261, 676, 80, 302, 299, 347, 1260, 923, + /* 0 */ 317, 814, 341, 808, 5, 195, 195, 802, 93, 94, + /* 10 */ 84, 823, 823, 835, 838, 827, 827, 91, 91, 92, + /* 20 */ 92, 92, 92, 293, 90, 90, 90, 90, 89, 89, + /* 30 */ 88, 88, 88, 87, 341, 317, 958, 958, 807, 807, + /* 40 */ 807, 928, 344, 93, 94, 84, 823, 823, 835, 838, + /* 50 */ 827, 827, 91, 91, 92, 92, 92, 92, 328, 90, + /* 60 */ 90, 90, 90, 89, 89, 88, 88, 88, 87, 341, + /* 70 */ 89, 89, 88, 88, 88, 87, 341, 776, 958, 958, + /* 80 */ 317, 88, 88, 88, 87, 341, 777, 69, 93, 94, + /* 90 */ 84, 823, 823, 835, 838, 827, 827, 91, 91, 92, + /* 100 */ 92, 92, 92, 437, 90, 90, 90, 90, 89, 89, + /* 110 */ 88, 88, 88, 87, 341, 1310, 147, 147, 2, 317, + /* 120 */ 76, 25, 74, 49, 49, 87, 341, 93, 94, 84, + /* 130 */ 823, 823, 835, 838, 827, 827, 91, 91, 92, 92, + /* 140 */ 92, 92, 95, 90, 90, 90, 90, 89, 89, 88, + /* 150 */ 88, 88, 87, 341, 939, 939, 317, 260, 415, 400, + /* 160 */ 398, 58, 737, 737, 93, 94, 84, 823, 823, 835, + /* 170 */ 838, 827, 827, 91, 91, 92, 92, 92, 92, 57, + /* 180 */ 90, 90, 90, 90, 89, 89, 88, 88, 88, 87, + /* 190 */ 341, 317, 1253, 928, 344, 269, 940, 941, 242, 93, + /* 200 */ 94, 84, 823, 823, 835, 838, 827, 827, 91, 91, + /* 210 */ 92, 92, 92, 92, 293, 90, 90, 90, 90, 89, + /* 220 */ 89, 88, 88, 88, 87, 341, 317, 919, 1303, 793, + /* 230 */ 691, 1303, 724, 724, 93, 94, 84, 823, 823, 835, + /* 240 */ 838, 827, 827, 91, 91, 92, 92, 92, 92, 337, + /* 250 */ 90, 90, 90, 90, 89, 89, 88, 88, 88, 87, + /* 260 */ 341, 317, 114, 919, 1304, 684, 395, 1304, 124, 93, + /* 270 */ 94, 84, 823, 823, 835, 838, 827, 827, 91, 91, + /* 280 */ 92, 92, 92, 92, 683, 90, 90, 90, 90, 89, + /* 290 */ 89, 88, 88, 88, 87, 341, 317, 86, 83, 169, + /* 300 */ 801, 917, 234, 399, 93, 94, 84, 823, 823, 835, + /* 310 */ 838, 827, 827, 91, 91, 92, 92, 92, 92, 686, + /* 320 */ 90, 90, 90, 90, 89, 89, 88, 88, 88, 87, + /* 330 */ 341, 317, 436, 742, 86, 83, 169, 917, 741, 93, + /* 340 */ 94, 84, 823, 823, 835, 838, 827, 827, 91, 91, + /* 350 */ 92, 92, 92, 92, 902, 90, 90, 90, 90, 89, + /* 360 */ 89, 88, 88, 88, 87, 341, 317, 321, 434, 434, + /* 370 */ 434, 1, 722, 722, 93, 94, 84, 823, 823, 835, + /* 380 */ 838, 827, 827, 91, 91, 92, 92, 92, 92, 190, + /* 390 */ 90, 90, 90, 90, 89, 89, 88, 88, 88, 87, + /* 400 */ 341, 317, 685, 292, 939, 939, 150, 977, 310, 93, + /* 410 */ 94, 84, 823, 823, 835, 838, 827, 827, 91, 91, + /* 420 */ 92, 92, 92, 92, 437, 90, 90, 90, 90, 89, + /* 430 */ 89, 88, 88, 88, 87, 341, 926, 2, 372, 719, + /* 440 */ 698, 369, 950, 317, 49, 49, 940, 941, 719, 177, + /* 450 */ 72, 93, 94, 84, 823, 823, 835, 838, 827, 827, + /* 460 */ 91, 91, 92, 92, 92, 92, 322, 90, 90, 90, + /* 470 */ 90, 89, 89, 88, 88, 88, 87, 341, 317, 415, + /* 480 */ 405, 824, 824, 836, 839, 75, 93, 82, 84, 823, + /* 490 */ 823, 835, 838, 827, 827, 91, 91, 92, 92, 92, + /* 500 */ 92, 430, 90, 90, 90, 90, 89, 89, 88, 88, + /* 510 */ 88, 87, 341, 317, 340, 340, 340, 658, 659, 660, + /* 520 */ 333, 288, 94, 84, 823, 823, 835, 838, 827, 827, + /* 530 */ 91, 91, 92, 92, 92, 92, 437, 90, 90, 90, + /* 540 */ 90, 89, 89, 88, 88, 88, 87, 341, 317, 882, + /* 550 */ 882, 375, 828, 66, 330, 409, 49, 49, 84, 823, + /* 560 */ 823, 835, 838, 827, 827, 91, 91, 92, 92, 92, + /* 570 */ 92, 351, 90, 90, 90, 90, 89, 89, 88, 88, + /* 580 */ 88, 87, 341, 80, 432, 742, 3, 1180, 351, 350, + /* 590 */ 741, 334, 796, 939, 939, 761, 80, 432, 278, 3, + /* 600 */ 204, 161, 279, 393, 274, 392, 191, 362, 437, 277, + /* 610 */ 745, 77, 78, 272, 800, 254, 355, 243, 79, 342, + /* 620 */ 342, 86, 83, 169, 77, 78, 234, 399, 49, 49, + /* 630 */ 435, 79, 342, 342, 437, 940, 941, 186, 442, 655, + /* 640 */ 390, 387, 386, 435, 235, 213, 108, 421, 761, 351, + /* 650 */ 437, 385, 167, 732, 10, 10, 124, 124, 671, 814, + /* 660 */ 421, 439, 438, 415, 414, 802, 362, 168, 327, 124, + /* 670 */ 49, 49, 814, 219, 439, 438, 800, 186, 802, 326, + /* 680 */ 390, 387, 386, 437, 1248, 1248, 23, 939, 939, 80, + /* 690 */ 432, 385, 3, 761, 416, 876, 807, 807, 807, 809, + /* 700 */ 19, 290, 149, 49, 49, 415, 396, 260, 910, 807, + /* 710 */ 807, 807, 809, 19, 312, 237, 145, 77, 78, 746, + /* 720 */ 168, 702, 437, 149, 79, 342, 342, 114, 358, 940, + /* 730 */ 941, 302, 223, 397, 345, 313, 435, 260, 415, 417, + /* 740 */ 858, 374, 31, 31, 80, 432, 761, 3, 348, 92, + /* 750 */ 92, 92, 92, 421, 90, 90, 90, 90, 89, 89, + /* 760 */ 88, 88, 88, 87, 341, 814, 114, 439, 438, 796, + /* 770 */ 367, 802, 77, 78, 701, 796, 124, 1187, 220, 79, + /* 780 */ 342, 342, 124, 747, 734, 939, 939, 775, 404, 939, + /* 790 */ 939, 435, 254, 360, 253, 402, 895, 346, 254, 360, + /* 800 */ 253, 774, 807, 807, 807, 809, 19, 800, 421, 90, + /* 810 */ 90, 90, 90, 89, 89, 88, 88, 88, 87, 341, + /* 820 */ 814, 114, 439, 438, 939, 939, 802, 940, 941, 114, + /* 830 */ 437, 940, 941, 86, 83, 169, 192, 166, 309, 979, + /* 840 */ 70, 432, 700, 3, 382, 870, 238, 86, 83, 169, + /* 850 */ 10, 10, 361, 406, 763, 190, 222, 807, 807, 807, + /* 860 */ 809, 19, 870, 872, 329, 24, 940, 941, 77, 78, + /* 870 */ 359, 437, 335, 260, 218, 79, 342, 342, 437, 307, + /* 880 */ 306, 305, 207, 303, 339, 338, 668, 435, 339, 338, + /* 890 */ 407, 10, 10, 762, 216, 216, 939, 939, 49, 49, + /* 900 */ 437, 260, 97, 241, 421, 225, 402, 189, 188, 187, + /* 910 */ 309, 918, 980, 149, 221, 898, 814, 868, 439, 438, + /* 920 */ 10, 10, 802, 870, 915, 316, 898, 163, 162, 171, + /* 930 */ 249, 240, 322, 410, 412, 687, 687, 272, 940, 941, + /* 940 */ 239, 965, 901, 437, 226, 403, 226, 437, 963, 367, + /* 950 */ 964, 173, 248, 807, 807, 807, 809, 19, 174, 367, + /* 960 */ 899, 124, 172, 48, 48, 9, 9, 35, 35, 966, + /* 970 */ 966, 899, 363, 966, 966, 814, 900, 808, 725, 939, + /* 980 */ 939, 802, 895, 318, 980, 324, 125, 900, 726, 420, + /* 990 */ 92, 92, 92, 92, 85, 90, 90, 90, 90, 89, + /* 1000 */ 89, 88, 88, 88, 87, 341, 216, 216, 437, 946, + /* 1010 */ 349, 292, 807, 807, 807, 114, 291, 693, 402, 705, + /* 1020 */ 890, 940, 941, 437, 245, 889, 247, 437, 36, 36, + /* 1030 */ 437, 353, 391, 437, 260, 252, 260, 437, 361, 437, + /* 1040 */ 706, 437, 370, 12, 12, 224, 437, 27, 27, 437, + /* 1050 */ 37, 37, 437, 38, 38, 752, 368, 39, 39, 28, + /* 1060 */ 28, 29, 29, 215, 166, 331, 40, 40, 437, 41, + /* 1070 */ 41, 437, 42, 42, 437, 866, 246, 731, 437, 879, + /* 1080 */ 437, 256, 437, 878, 437, 267, 437, 261, 11, 11, + /* 1090 */ 437, 43, 43, 437, 99, 99, 437, 373, 44, 44, + /* 1100 */ 45, 45, 32, 32, 46, 46, 47, 47, 437, 426, + /* 1110 */ 33, 33, 776, 116, 116, 437, 117, 117, 437, 124, + /* 1120 */ 437, 777, 437, 260, 437, 957, 437, 352, 118, 118, + /* 1130 */ 437, 195, 437, 111, 437, 53, 53, 264, 34, 34, + /* 1140 */ 100, 100, 50, 50, 101, 101, 102, 102, 437, 260, + /* 1150 */ 98, 98, 115, 115, 113, 113, 437, 262, 437, 265, + /* 1160 */ 437, 943, 958, 437, 727, 437, 681, 437, 106, 106, + /* 1170 */ 68, 437, 893, 730, 437, 365, 105, 105, 103, 103, + /* 1180 */ 104, 104, 217, 52, 52, 54, 54, 51, 51, 694, + /* 1190 */ 259, 26, 26, 266, 30, 30, 677, 323, 433, 323, + /* 1200 */ 674, 423, 427, 943, 958, 114, 114, 431, 681, 865, + /* 1210 */ 1277, 233, 366, 714, 112, 20, 154, 704, 703, 810, + /* 1220 */ 914, 55, 159, 311, 798, 255, 383, 194, 68, 200, + /* 1230 */ 21, 694, 268, 114, 114, 114, 270, 711, 712, 68, + /* 1240 */ 114, 739, 770, 715, 71, 194, 861, 875, 875, 200, + /* 1250 */ 696, 865, 874, 874, 679, 699, 273, 110, 229, 419, + /* 1260 */ 768, 810, 799, 378, 748, 759, 418, 210, 294, 281, + /* 1270 */ 295, 806, 283, 682, 676, 665, 664, 666, 933, 151, + /* 1280 */ 285, 7, 1267, 308, 251, 790, 354, 244, 892, 364, + /* 1290 */ 287, 422, 300, 164, 160, 936, 974, 127, 197, 137, + /* 1300 */ 909, 907, 971, 388, 276, 863, 862, 56, 698, 325, + /* 1310 */ 148, 59, 122, 66, 356, 381, 357, 176, 152, 62, + /* 1320 */ 371, 130, 877, 181, 377, 760, 211, 182, 132, 133, + /* 1330 */ 134, 135, 258, 146, 140, 795, 787, 263, 183, 379, + /* 1340 */ 667, 394, 184, 332, 894, 314, 718, 717, 857, 716, + /* 1350 */ 696, 315, 709, 690, 65, 196, 6, 408, 289, 708, + /* 1360 */ 275, 689, 688, 948, 756, 757, 280, 282, 425, 755, + /* 1370 */ 284, 336, 73, 67, 754, 429, 411, 96, 286, 413, + /* 1380 */ 205, 934, 673, 22, 209, 440, 119, 120, 109, 206, + /* 1390 */ 208, 441, 662, 661, 656, 843, 654, 343, 158, 236, + /* 1400 */ 170, 347, 107, 227, 121, 738, 873, 298, 296, 297, + /* 1410 */ 299, 871, 794, 128, 129, 728, 230, 131, 175, 250, + /* 1420 */ 888, 136, 138, 231, 232, 139, 60, 61, 891, 178, + /* 1430 */ 179, 887, 8, 13, 180, 257, 880, 968, 194, 141, + /* 1440 */ 142, 376, 153, 670, 380, 185, 143, 277, 63, 384, + /* 1450 */ 14, 707, 271, 15, 389, 64, 319, 320, 126, 228, + /* 1460 */ 813, 812, 841, 736, 123, 16, 401, 740, 4, 769, + /* 1470 */ 165, 212, 214, 193, 144, 764, 71, 68, 17, 18, + /* 1480 */ 856, 842, 840, 897, 845, 896, 199, 198, 923, 155, + /* 1490 */ 424, 929, 924, 156, 201, 202, 428, 844, 157, 203, + /* 1500 */ 811, 680, 81, 1269, 1268, 301, 304, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 19, 144, 145, 146, 147, 24, 1, 2, 27, 80, - /* 10 */ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, - /* 20 */ 91, 92, 93, 94, 95, 91, 92, 93, 94, 95, - /* 30 */ 19, 50, 51, 80, 81, 82, 83, 95, 85, 86, - /* 40 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 157, - /* 50 */ 27, 28, 71, 72, 73, 74, 75, 76, 77, 78, - /* 60 */ 79, 80, 81, 82, 83, 66, 85, 86, 87, 88, - /* 70 */ 89, 90, 91, 92, 93, 94, 95, 19, 97, 85, - /* 80 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - /* 90 */ 152, 33, 152, 22, 27, 28, 179, 180, 27, 28, - /* 100 */ 42, 27, 27, 28, 152, 188, 95, 152, 50, 51, - /* 110 */ 99, 100, 101, 102, 103, 104, 105, 27, 28, 227, - /* 120 */ 97, 98, 230, 112, 172, 173, 172, 172, 173, 71, - /* 130 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - /* 140 */ 82, 83, 66, 85, 86, 87, 88, 89, 90, 91, - /* 150 */ 92, 93, 94, 95, 19, 172, 89, 90, 218, 207, - /* 160 */ 208, 26, 207, 208, 97, 98, 91, 100, 97, 98, - /* 170 */ 69, 97, 97, 98, 107, 237, 109, 89, 90, 91, - /* 180 */ 92, 93, 94, 95, 152, 50, 51, 97, 98, 99, - /* 190 */ 55, 59, 102, 103, 104, 119, 120, 59, 97, 132, - /* 200 */ 133, 152, 101, 113, 66, 19, 71, 72, 73, 74, - /* 210 */ 75, 76, 77, 78, 79, 80, 81, 82, 83, 187, - /* 220 */ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, - /* 230 */ 95, 172, 210, 132, 133, 134, 50, 51, 185, 53, - /* 240 */ 108, 109, 110, 221, 222, 223, 108, 109, 110, 22, - /* 250 */ 22, 119, 120, 181, 27, 27, 28, 71, 72, 73, - /* 260 */ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - /* 270 */ 152, 85, 86, 87, 88, 89, 90, 91, 92, 93, - /* 280 */ 94, 95, 19, 152, 148, 149, 115, 24, 117, 118, - /* 290 */ 154, 152, 156, 152, 163, 94, 95, 69, 249, 163, - /* 300 */ 27, 28, 99, 172, 173, 102, 103, 104, 194, 195, - /* 310 */ 152, 27, 28, 50, 51, 181, 113, 89, 90, 152, - /* 320 */ 206, 221, 222, 223, 97, 97, 187, 196, 175, 101, - /* 330 */ 172, 173, 196, 219, 71, 72, 73, 74, 75, 76, - /* 340 */ 77, 78, 79, 80, 81, 82, 83, 11, 85, 86, - /* 350 */ 87, 88, 89, 90, 91, 92, 93, 94, 95, 19, - /* 360 */ 132, 133, 134, 23, 66, 207, 208, 22, 27, 28, - /* 370 */ 97, 98, 27, 28, 221, 222, 223, 199, 22, 243, - /* 380 */ 24, 97, 98, 27, 221, 222, 223, 209, 152, 152, - /* 390 */ 50, 51, 168, 169, 170, 59, 26, 124, 100, 58, - /* 400 */ 152, 175, 66, 240, 163, 169, 170, 152, 124, 172, - /* 410 */ 173, 71, 72, 73, 74, 75, 76, 77, 78, 79, - /* 420 */ 80, 81, 82, 83, 12, 85, 86, 87, 88, 89, - /* 430 */ 90, 91, 92, 93, 94, 95, 19, 196, 97, 98, - /* 440 */ 23, 29, 97, 98, 108, 109, 110, 221, 222, 223, - /* 450 */ 50, 51, 152, 97, 168, 169, 170, 45, 37, 47, - /* 460 */ 219, 224, 119, 120, 152, 229, 152, 50, 51, 169, - /* 470 */ 170, 59, 231, 52, 74, 75, 106, 236, 152, 21, - /* 480 */ 24, 60, 163, 27, 172, 173, 172, 173, 71, 72, - /* 490 */ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, - /* 500 */ 83, 101, 85, 86, 87, 88, 89, 90, 91, 92, - /* 510 */ 93, 94, 95, 19, 152, 196, 152, 23, 152, 207, - /* 520 */ 152, 207, 163, 65, 19, 171, 152, 190, 191, 229, - /* 530 */ 211, 212, 111, 179, 172, 173, 172, 173, 172, 173, - /* 540 */ 172, 173, 190, 191, 50, 51, 172, 173, 186, 22, - /* 550 */ 186, 24, 186, 97, 186, 196, 51, 89, 90, 22, - /* 560 */ 23, 103, 137, 26, 139, 71, 72, 73, 74, 75, - /* 570 */ 76, 77, 78, 79, 80, 81, 82, 83, 219, 85, - /* 580 */ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, - /* 590 */ 19, 195, 152, 152, 23, 236, 163, 12, 140, 152, - /* 600 */ 132, 133, 206, 152, 164, 23, 31, 70, 26, 19, - /* 610 */ 35, 160, 107, 152, 29, 164, 152, 112, 28, 172, - /* 620 */ 173, 50, 51, 183, 49, 185, 152, 22, 23, 196, - /* 630 */ 45, 26, 47, 172, 173, 0, 1, 2, 152, 16, - /* 640 */ 152, 19, 71, 72, 73, 74, 75, 76, 77, 78, - /* 650 */ 79, 80, 81, 82, 83, 152, 85, 86, 87, 88, - /* 660 */ 89, 90, 91, 92, 93, 94, 95, 164, 152, 152, - /* 670 */ 152, 152, 50, 51, 16, 70, 108, 109, 110, 193, - /* 680 */ 98, 7, 8, 9, 152, 62, 22, 64, 172, 173, - /* 690 */ 172, 173, 218, 71, 72, 73, 74, 75, 76, 77, - /* 700 */ 78, 79, 80, 81, 82, 83, 124, 85, 86, 87, - /* 710 */ 88, 89, 90, 91, 92, 93, 94, 95, 19, 152, - /* 720 */ 62, 152, 64, 181, 152, 193, 152, 241, 246, 247, - /* 730 */ 26, 152, 152, 152, 217, 152, 91, 249, 152, 172, - /* 740 */ 173, 172, 173, 79, 172, 173, 172, 173, 152, 50, - /* 750 */ 51, 172, 173, 172, 173, 172, 173, 116, 172, 173, - /* 760 */ 138, 116, 121, 140, 22, 23, 121, 152, 172, 173, - /* 770 */ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, - /* 780 */ 81, 82, 83, 152, 85, 86, 87, 88, 89, 90, - /* 790 */ 91, 92, 93, 94, 95, 19, 152, 217, 152, 152, - /* 800 */ 24, 152, 98, 172, 173, 108, 109, 110, 193, 152, - /* 810 */ 213, 152, 70, 152, 152, 152, 172, 173, 172, 173, - /* 820 */ 152, 172, 173, 152, 146, 147, 50, 51, 124, 172, - /* 830 */ 173, 172, 173, 172, 173, 172, 173, 138, 22, 23, - /* 840 */ 193, 152, 152, 172, 173, 152, 19, 71, 72, 73, - /* 850 */ 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, - /* 860 */ 152, 85, 86, 87, 88, 89, 90, 91, 92, 93, - /* 870 */ 94, 95, 152, 152, 152, 194, 195, 50, 51, 217, - /* 880 */ 172, 173, 193, 193, 26, 152, 70, 206, 152, 152, - /* 890 */ 26, 163, 172, 173, 172, 173, 152, 19, 71, 72, - /* 900 */ 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, - /* 910 */ 83, 152, 85, 86, 87, 88, 89, 90, 91, 92, - /* 920 */ 93, 94, 95, 152, 196, 152, 193, 152, 50, 51, - /* 930 */ 193, 172, 173, 19, 152, 166, 167, 51, 166, 167, - /* 940 */ 152, 152, 28, 172, 173, 172, 173, 152, 19, 71, - /* 950 */ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - /* 960 */ 82, 83, 152, 85, 86, 87, 88, 89, 90, 91, - /* 970 */ 92, 93, 94, 95, 152, 193, 152, 211, 212, 50, - /* 980 */ 51, 33, 172, 173, 244, 245, 23, 123, 130, 26, - /* 990 */ 42, 100, 101, 107, 172, 173, 172, 173, 152, 19, - /* 1000 */ 22, 72, 73, 74, 75, 76, 77, 78, 79, 80, - /* 1010 */ 81, 82, 83, 152, 85, 86, 87, 88, 89, 90, - /* 1020 */ 91, 92, 93, 94, 95, 152, 237, 152, 7, 8, - /* 1030 */ 50, 51, 237, 172, 173, 23, 23, 23, 26, 26, - /* 1040 */ 26, 23, 132, 133, 26, 172, 173, 172, 173, 23, - /* 1050 */ 163, 152, 26, 73, 74, 75, 76, 77, 78, 79, - /* 1060 */ 80, 81, 82, 83, 152, 85, 86, 87, 88, 89, - /* 1070 */ 90, 91, 92, 93, 94, 95, 19, 20, 27, 22, - /* 1080 */ 23, 210, 152, 196, 27, 28, 132, 133, 152, 19, - /* 1090 */ 20, 23, 22, 27, 26, 38, 152, 27, 28, 152, - /* 1100 */ 122, 152, 172, 173, 152, 163, 191, 23, 38, 152, - /* 1110 */ 26, 152, 163, 152, 57, 27, 172, 173, 163, 172, - /* 1120 */ 173, 172, 173, 66, 172, 173, 69, 57, 163, 172, - /* 1130 */ 173, 172, 173, 172, 173, 152, 66, 152, 196, 69, - /* 1140 */ 163, 101, 152, 152, 152, 196, 89, 90, 97, 152, - /* 1150 */ 152, 196, 112, 96, 97, 98, 207, 208, 101, 89, - /* 1160 */ 90, 196, 23, 97, 233, 26, 96, 97, 98, 172, - /* 1170 */ 173, 101, 152, 196, 152, 19, 20, 23, 22, 152, - /* 1180 */ 26, 152, 152, 27, 28, 97, 152, 152, 152, 132, - /* 1190 */ 133, 134, 135, 136, 38, 152, 152, 152, 152, 232, - /* 1200 */ 197, 214, 132, 133, 134, 135, 136, 198, 150, 210, - /* 1210 */ 210, 210, 201, 57, 238, 176, 214, 201, 180, 238, - /* 1220 */ 214, 184, 175, 19, 20, 69, 22, 175, 175, 198, - /* 1230 */ 226, 27, 28, 200, 155, 39, 242, 122, 41, 159, - /* 1240 */ 159, 159, 38, 22, 239, 89, 90, 91, 220, 239, - /* 1250 */ 71, 189, 96, 97, 98, 130, 201, 101, 18, 192, - /* 1260 */ 159, 57, 18, 192, 192, 192, 158, 189, 220, 159, - /* 1270 */ 201, 158, 189, 69, 137, 201, 235, 19, 20, 46, - /* 1280 */ 22, 159, 159, 234, 158, 27, 28, 22, 132, 133, - /* 1290 */ 134, 135, 136, 89, 90, 177, 38, 159, 158, 158, - /* 1300 */ 96, 97, 98, 159, 177, 101, 107, 174, 174, 174, - /* 1310 */ 48, 182, 106, 177, 182, 57, 174, 125, 216, 176, - /* 1320 */ 174, 174, 174, 107, 215, 159, 215, 69, 216, 159, - /* 1330 */ 216, 215, 137, 216, 215, 177, 132, 133, 134, 135, - /* 1340 */ 136, 95, 177, 129, 126, 225, 127, 89, 90, 228, - /* 1350 */ 205, 128, 228, 204, 96, 97, 98, 25, 203, 101, - /* 1360 */ 5, 202, 201, 162, 26, 10, 11, 12, 13, 14, - /* 1370 */ 161, 13, 17, 153, 6, 153, 151, 151, 151, 151, - /* 1380 */ 165, 178, 165, 178, 4, 3, 22, 32, 15, 34, - /* 1390 */ 132, 133, 134, 135, 136, 245, 165, 142, 43, 248, - /* 1400 */ 248, 68, 16, 120, 23, 131, 23, 111, 123, 20, - /* 1410 */ 16, 56, 125, 1, 123, 131, 79, 111, 63, 79, - /* 1420 */ 28, 66, 67, 36, 122, 1, 5, 22, 107, 140, - /* 1430 */ 54, 54, 26, 61, 44, 107, 20, 24, 19, 112, - /* 1440 */ 105, 53, 22, 40, 22, 22, 53, 30, 23, 22, - /* 1450 */ 22, 53, 23, 23, 23, 116, 22, 11, 23, 22, - /* 1460 */ 28, 23, 26, 122, 23, 22, 124, 122, 26, 114, - /* 1470 */ 26, 23, 23, 23, 22, 36, 36, 26, 23, 23, - /* 1480 */ 22, 36, 122, 24, 23, 22, 26, 22, 24, 23, - /* 1490 */ 23, 122, 23, 22, 15, 23, 141, 122, 1, + /* 0 */ 19, 95, 53, 97, 22, 24, 24, 101, 27, 28, + /* 10 */ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + /* 20 */ 39, 40, 41, 152, 43, 44, 45, 46, 47, 48, + /* 30 */ 49, 50, 51, 52, 53, 19, 55, 55, 132, 133, + /* 40 */ 134, 1, 2, 27, 28, 29, 30, 31, 32, 33, + /* 50 */ 34, 35, 36, 37, 38, 39, 40, 41, 187, 43, + /* 60 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + /* 70 */ 47, 48, 49, 50, 51, 52, 53, 61, 97, 97, + /* 80 */ 19, 49, 50, 51, 52, 53, 70, 26, 27, 28, + /* 90 */ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + /* 100 */ 39, 40, 41, 152, 43, 44, 45, 46, 47, 48, + /* 110 */ 49, 50, 51, 52, 53, 144, 145, 146, 147, 19, + /* 120 */ 137, 22, 139, 172, 173, 52, 53, 27, 28, 29, + /* 130 */ 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + /* 140 */ 40, 41, 81, 43, 44, 45, 46, 47, 48, 49, + /* 150 */ 50, 51, 52, 53, 55, 56, 19, 152, 207, 208, + /* 160 */ 115, 24, 117, 118, 27, 28, 29, 30, 31, 32, + /* 170 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 79, + /* 180 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 190 */ 53, 19, 0, 1, 2, 23, 97, 98, 193, 27, + /* 200 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + /* 210 */ 38, 39, 40, 41, 152, 43, 44, 45, 46, 47, + /* 220 */ 48, 49, 50, 51, 52, 53, 19, 22, 23, 163, + /* 230 */ 23, 26, 190, 191, 27, 28, 29, 30, 31, 32, + /* 240 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 187, + /* 250 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 260 */ 53, 19, 196, 22, 23, 23, 49, 26, 92, 27, + /* 270 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + /* 280 */ 38, 39, 40, 41, 172, 43, 44, 45, 46, 47, + /* 290 */ 48, 49, 50, 51, 52, 53, 19, 221, 222, 223, + /* 300 */ 23, 96, 119, 120, 27, 28, 29, 30, 31, 32, + /* 310 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 172, + /* 320 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 330 */ 53, 19, 152, 116, 221, 222, 223, 96, 121, 27, + /* 340 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + /* 350 */ 38, 39, 40, 41, 241, 43, 44, 45, 46, 47, + /* 360 */ 48, 49, 50, 51, 52, 53, 19, 157, 168, 169, + /* 370 */ 170, 22, 190, 191, 27, 28, 29, 30, 31, 32, + /* 380 */ 33, 34, 35, 36, 37, 38, 39, 40, 41, 30, + /* 390 */ 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + /* 400 */ 53, 19, 172, 152, 55, 56, 24, 247, 248, 27, + /* 410 */ 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + /* 420 */ 38, 39, 40, 41, 152, 43, 44, 45, 46, 47, + /* 430 */ 48, 49, 50, 51, 52, 53, 146, 147, 228, 179, + /* 440 */ 180, 231, 185, 19, 172, 173, 97, 98, 188, 26, + /* 450 */ 138, 27, 28, 29, 30, 31, 32, 33, 34, 35, + /* 460 */ 36, 37, 38, 39, 40, 41, 107, 43, 44, 45, + /* 470 */ 46, 47, 48, 49, 50, 51, 52, 53, 19, 207, + /* 480 */ 208, 30, 31, 32, 33, 138, 27, 28, 29, 30, + /* 490 */ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + /* 500 */ 41, 250, 43, 44, 45, 46, 47, 48, 49, 50, + /* 510 */ 51, 52, 53, 19, 168, 169, 170, 7, 8, 9, + /* 520 */ 19, 152, 28, 29, 30, 31, 32, 33, 34, 35, + /* 530 */ 36, 37, 38, 39, 40, 41, 152, 43, 44, 45, + /* 540 */ 46, 47, 48, 49, 50, 51, 52, 53, 19, 108, + /* 550 */ 109, 110, 101, 130, 53, 152, 172, 173, 29, 30, + /* 560 */ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + /* 570 */ 41, 152, 43, 44, 45, 46, 47, 48, 49, 50, + /* 580 */ 51, 52, 53, 19, 20, 116, 22, 23, 169, 170, + /* 590 */ 121, 207, 85, 55, 56, 26, 19, 20, 101, 22, + /* 600 */ 99, 100, 101, 102, 103, 104, 105, 152, 152, 112, + /* 610 */ 210, 47, 48, 112, 152, 108, 109, 110, 54, 55, + /* 620 */ 56, 221, 222, 223, 47, 48, 119, 120, 172, 173, + /* 630 */ 66, 54, 55, 56, 152, 97, 98, 99, 148, 149, + /* 640 */ 102, 103, 104, 66, 154, 23, 156, 83, 26, 230, + /* 650 */ 152, 113, 152, 163, 172, 173, 92, 92, 21, 95, + /* 660 */ 83, 97, 98, 207, 208, 101, 152, 98, 186, 92, + /* 670 */ 172, 173, 95, 218, 97, 98, 152, 99, 101, 217, + /* 680 */ 102, 103, 104, 152, 119, 120, 196, 55, 56, 19, + /* 690 */ 20, 113, 22, 124, 163, 11, 132, 133, 134, 135, + /* 700 */ 136, 152, 152, 172, 173, 207, 208, 152, 152, 132, + /* 710 */ 133, 134, 135, 136, 164, 152, 84, 47, 48, 49, + /* 720 */ 98, 181, 152, 152, 54, 55, 56, 196, 91, 97, + /* 730 */ 98, 160, 218, 163, 244, 164, 66, 152, 207, 208, + /* 740 */ 103, 217, 172, 173, 19, 20, 124, 22, 193, 38, + /* 750 */ 39, 40, 41, 83, 43, 44, 45, 46, 47, 48, + /* 760 */ 49, 50, 51, 52, 53, 95, 196, 97, 98, 85, + /* 770 */ 152, 101, 47, 48, 181, 85, 92, 140, 193, 54, + /* 780 */ 55, 56, 92, 49, 195, 55, 56, 175, 163, 55, + /* 790 */ 56, 66, 108, 109, 110, 206, 163, 242, 108, 109, + /* 800 */ 110, 175, 132, 133, 134, 135, 136, 152, 83, 43, + /* 810 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + /* 820 */ 95, 196, 97, 98, 55, 56, 101, 97, 98, 196, + /* 830 */ 152, 97, 98, 221, 222, 223, 211, 212, 22, 23, + /* 840 */ 19, 20, 181, 22, 19, 152, 152, 221, 222, 223, + /* 850 */ 172, 173, 219, 19, 124, 30, 238, 132, 133, 134, + /* 860 */ 135, 136, 169, 170, 186, 232, 97, 98, 47, 48, + /* 870 */ 237, 152, 217, 152, 5, 54, 55, 56, 152, 10, + /* 880 */ 11, 12, 13, 14, 47, 48, 17, 66, 47, 48, + /* 890 */ 56, 172, 173, 124, 194, 195, 55, 56, 172, 173, + /* 900 */ 152, 152, 22, 152, 83, 186, 206, 108, 109, 110, + /* 910 */ 22, 23, 96, 152, 193, 12, 95, 152, 97, 98, + /* 920 */ 172, 173, 101, 230, 152, 164, 12, 47, 48, 60, + /* 930 */ 152, 62, 107, 207, 186, 55, 56, 112, 97, 98, + /* 940 */ 71, 100, 193, 152, 183, 152, 185, 152, 107, 152, + /* 950 */ 109, 82, 16, 132, 133, 134, 135, 136, 89, 152, + /* 960 */ 57, 92, 93, 172, 173, 172, 173, 172, 173, 132, + /* 970 */ 133, 57, 152, 132, 133, 95, 73, 97, 75, 55, + /* 980 */ 56, 101, 163, 114, 96, 245, 246, 73, 85, 75, + /* 990 */ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + /* 1000 */ 48, 49, 50, 51, 52, 53, 194, 195, 152, 171, + /* 1010 */ 141, 152, 132, 133, 134, 196, 225, 179, 206, 65, + /* 1020 */ 152, 97, 98, 152, 88, 152, 90, 152, 172, 173, + /* 1030 */ 152, 219, 78, 152, 152, 238, 152, 152, 219, 152, + /* 1040 */ 86, 152, 152, 172, 173, 238, 152, 172, 173, 152, + /* 1050 */ 172, 173, 152, 172, 173, 213, 237, 172, 173, 172, + /* 1060 */ 173, 172, 173, 211, 212, 111, 172, 173, 152, 172, + /* 1070 */ 173, 152, 172, 173, 152, 193, 140, 193, 152, 59, + /* 1080 */ 152, 152, 152, 63, 152, 16, 152, 152, 172, 173, + /* 1090 */ 152, 172, 173, 152, 172, 173, 152, 77, 172, 173, + /* 1100 */ 172, 173, 172, 173, 172, 173, 172, 173, 152, 250, + /* 1110 */ 172, 173, 61, 172, 173, 152, 172, 173, 152, 92, + /* 1120 */ 152, 70, 152, 152, 152, 26, 152, 100, 172, 173, + /* 1130 */ 152, 24, 152, 22, 152, 172, 173, 152, 172, 173, + /* 1140 */ 172, 173, 172, 173, 172, 173, 172, 173, 152, 152, + /* 1150 */ 172, 173, 172, 173, 172, 173, 152, 88, 152, 90, + /* 1160 */ 152, 55, 55, 152, 193, 152, 55, 152, 172, 173, + /* 1170 */ 26, 152, 163, 163, 152, 19, 172, 173, 172, 173, + /* 1180 */ 172, 173, 22, 172, 173, 172, 173, 172, 173, 55, + /* 1190 */ 193, 172, 173, 152, 172, 173, 166, 167, 166, 167, + /* 1200 */ 163, 163, 163, 97, 97, 196, 196, 163, 97, 55, + /* 1210 */ 23, 199, 56, 26, 22, 22, 24, 100, 101, 55, + /* 1220 */ 23, 209, 123, 26, 23, 23, 23, 26, 26, 26, + /* 1230 */ 37, 97, 152, 196, 196, 196, 23, 7, 8, 26, + /* 1240 */ 196, 23, 23, 152, 26, 26, 23, 132, 133, 26, + /* 1250 */ 106, 97, 132, 133, 23, 152, 152, 26, 210, 191, + /* 1260 */ 152, 97, 152, 234, 152, 152, 152, 233, 152, 210, + /* 1270 */ 152, 152, 210, 152, 152, 152, 152, 152, 152, 197, + /* 1280 */ 210, 198, 122, 150, 239, 201, 214, 214, 201, 239, + /* 1290 */ 214, 227, 200, 184, 198, 155, 67, 243, 122, 22, + /* 1300 */ 159, 159, 69, 176, 175, 175, 175, 240, 180, 159, + /* 1310 */ 220, 240, 27, 130, 18, 18, 159, 158, 220, 137, + /* 1320 */ 159, 189, 236, 158, 74, 159, 159, 158, 192, 192, + /* 1330 */ 192, 192, 235, 22, 189, 189, 201, 159, 158, 177, + /* 1340 */ 159, 107, 158, 76, 201, 177, 174, 174, 201, 174, + /* 1350 */ 106, 177, 182, 174, 107, 159, 22, 125, 159, 182, + /* 1360 */ 174, 176, 174, 174, 216, 216, 215, 215, 177, 216, + /* 1370 */ 215, 53, 137, 128, 216, 177, 127, 129, 215, 126, + /* 1380 */ 25, 13, 162, 26, 6, 161, 165, 165, 178, 153, + /* 1390 */ 153, 151, 151, 151, 151, 224, 4, 3, 22, 142, + /* 1400 */ 15, 94, 16, 178, 165, 205, 23, 202, 204, 203, + /* 1410 */ 201, 23, 120, 131, 111, 20, 226, 123, 125, 16, + /* 1420 */ 1, 123, 131, 229, 229, 111, 37, 37, 56, 64, + /* 1430 */ 122, 1, 5, 22, 107, 140, 80, 87, 26, 80, + /* 1440 */ 107, 72, 24, 20, 19, 105, 22, 112, 22, 79, + /* 1450 */ 22, 58, 23, 22, 79, 22, 249, 249, 246, 79, + /* 1460 */ 23, 23, 23, 116, 68, 22, 26, 23, 22, 56, + /* 1470 */ 122, 23, 23, 64, 22, 124, 26, 26, 64, 64, + /* 1480 */ 23, 23, 23, 23, 11, 23, 22, 26, 23, 22, + /* 1490 */ 24, 1, 23, 22, 26, 122, 24, 23, 22, 122, + /* 1500 */ 23, 23, 22, 122, 122, 23, 15, }; -#define YY_SHIFT_USE_DFLT (-72) -#define YY_SHIFT_COUNT (439) -#define YY_SHIFT_MIN (-71) -#define YY_SHIFT_MAX (1497) +#define YY_SHIFT_USE_DFLT (-95) +#define YY_SHIFT_COUNT (442) +#define YY_SHIFT_MIN (-94) +#define YY_SHIFT_MAX (1491) static const short yy_shift_ofst[] = { - /* 0 */ 5, 1057, 1355, 1070, 1204, 1204, 1204, 138, -19, 58, - /* 10 */ 58, 186, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 67, - /* 20 */ 67, 90, 132, 336, 76, 135, 263, 340, 417, 494, - /* 30 */ 571, 622, 699, 776, 827, 827, 827, 827, 827, 827, - /* 40 */ 827, 827, 827, 827, 827, 827, 827, 827, 827, 878, - /* 50 */ 827, 929, 980, 980, 1156, 1204, 1204, 1204, 1204, 1204, - /* 60 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, - /* 70 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, 1204, - /* 80 */ 1204, 1204, 1204, 1258, 1204, 1204, 1204, 1204, 1204, 1204, - /* 90 */ 1204, 1204, 1204, 1204, 1204, 1204, 1204, -71, -47, -47, - /* 100 */ -47, -47, -47, -6, 88, -66, 23, 458, 505, 468, - /* 110 */ 468, 23, 201, 343, -58, -72, -72, -72, 11, 11, - /* 120 */ 11, 412, 412, 341, 537, 605, 23, 23, 23, 23, - /* 130 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, - /* 140 */ 23, 23, 23, 23, 23, 23, 635, 298, 74, 74, - /* 150 */ 343, -1, -1, -1, -1, -1, -1, -72, -72, -72, - /* 160 */ 228, 101, 101, 203, 75, 71, 273, 284, 345, 23, - /* 170 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, - /* 180 */ 23, 23, 23, 23, 23, 23, 421, 421, 421, 23, - /* 190 */ 23, 582, 23, 23, 23, 356, 23, 23, 585, 23, - /* 200 */ 23, 23, 23, 23, 23, 23, 23, 23, 23, 568, - /* 210 */ 575, 456, 456, 456, 704, 171, 645, 674, 858, 590, - /* 220 */ 590, 914, 858, 914, 370, 963, 886, 948, 590, 425, - /* 230 */ 948, 948, 864, 641, 527, 1196, 1115, 1115, 1197, 1197, - /* 240 */ 1115, 1221, 1179, 1125, 1240, 1240, 1240, 1240, 1115, 1244, - /* 250 */ 1125, 1221, 1179, 1179, 1125, 1115, 1244, 1137, 1233, 1115, - /* 260 */ 1115, 1244, 1265, 1115, 1244, 1115, 1244, 1265, 1199, 1199, - /* 270 */ 1199, 1262, 1265, 1199, 1206, 1199, 1262, 1199, 1199, 1192, - /* 280 */ 1216, 1192, 1216, 1192, 1216, 1192, 1216, 1115, 1115, 1195, - /* 290 */ 1265, 1246, 1246, 1265, 1214, 1218, 1223, 1219, 1125, 1332, - /* 300 */ 1338, 1358, 1358, 1368, 1368, 1368, 1368, -72, -72, -72, - /* 310 */ -72, -72, -72, -72, -72, 400, 623, 742, 816, 658, - /* 320 */ 697, 227, 1012, 664, 1013, 1014, 1018, 1026, 1051, 891, - /* 330 */ 1021, 1040, 1068, 1084, 1066, 1139, 910, 954, 1154, 1088, - /* 340 */ 978, 1380, 1382, 1364, 1255, 1373, 1333, 1386, 1381, 1383, - /* 350 */ 1283, 1274, 1296, 1285, 1389, 1287, 1394, 1412, 1291, 1284, - /* 360 */ 1337, 1340, 1306, 1392, 1387, 1302, 1424, 1421, 1405, 1321, - /* 370 */ 1289, 1376, 1406, 1377, 1372, 1390, 1328, 1413, 1416, 1419, - /* 380 */ 1327, 1335, 1420, 1388, 1422, 1423, 1425, 1427, 1393, 1417, - /* 390 */ 1428, 1398, 1403, 1429, 1430, 1431, 1339, 1434, 1435, 1437, - /* 400 */ 1436, 1341, 1438, 1441, 1432, 1439, 1443, 1342, 1442, 1440, - /* 410 */ 1444, 1445, 1442, 1448, 1449, 1450, 1451, 1455, 1452, 1446, - /* 420 */ 1456, 1458, 1459, 1460, 1461, 1463, 1464, 1460, 1466, 1465, - /* 430 */ 1467, 1469, 1471, 1345, 1360, 1369, 1375, 1472, 1479, 1497, + /* 0 */ 40, 564, 869, 577, 725, 725, 725, 725, 690, -19, + /* 10 */ 16, 16, 100, 725, 725, 725, 725, 725, 725, 725, + /* 20 */ 841, 841, 538, 507, 684, 565, 61, 137, 172, 207, + /* 30 */ 242, 277, 312, 347, 382, 424, 424, 424, 424, 424, + /* 40 */ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424, + /* 50 */ 459, 424, 494, 529, 529, 670, 725, 725, 725, 725, + /* 60 */ 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, + /* 70 */ 725, 725, 725, 725, 725, 725, 725, 725, 725, 725, + /* 80 */ 725, 725, 725, 725, 821, 725, 725, 725, 725, 725, + /* 90 */ 725, 725, 725, 725, 725, 725, 725, 725, 952, 711, + /* 100 */ 711, 711, 711, 711, 766, 23, 32, 924, 637, 825, + /* 110 */ 837, 837, 924, 73, 183, -51, -95, -95, -95, 501, + /* 120 */ 501, 501, 903, 903, 632, 205, 241, 924, 924, 924, + /* 130 */ 924, 924, 924, 924, 924, 924, 924, 924, 924, 924, + /* 140 */ 924, 924, 924, 924, 924, 924, 924, 192, 1027, 1106, + /* 150 */ 1106, 183, 176, 176, 176, 176, 176, 176, -95, -95, + /* 160 */ -95, 880, -94, -94, 578, 734, 99, 730, 769, 349, + /* 170 */ 924, 924, 924, 924, 924, 924, 924, 924, 924, 924, + /* 180 */ 924, 924, 924, 924, 924, 924, 924, 954, 954, 954, + /* 190 */ 924, 924, 622, 924, 924, 924, -18, 924, 924, 914, + /* 200 */ 924, 924, 924, 924, 924, 924, 924, 924, 924, 924, + /* 210 */ 441, 1020, 1107, 1107, 1107, 569, 45, 217, 510, 423, + /* 220 */ 834, 834, 1156, 423, 1156, 1144, 1187, 359, 1051, 834, + /* 230 */ -17, 1051, 1051, 1099, 469, 1192, 1229, 1176, 1176, 1233, + /* 240 */ 1233, 1176, 1277, 1285, 1183, 1296, 1296, 1296, 1296, 1176, + /* 250 */ 1297, 1183, 1277, 1285, 1285, 1183, 1176, 1297, 1182, 1250, + /* 260 */ 1176, 1176, 1297, 1311, 1176, 1297, 1176, 1297, 1311, 1234, + /* 270 */ 1234, 1234, 1267, 1311, 1234, 1244, 1234, 1267, 1234, 1234, + /* 280 */ 1232, 1247, 1232, 1247, 1232, 1247, 1232, 1247, 1176, 1334, + /* 290 */ 1176, 1235, 1311, 1318, 1318, 1311, 1248, 1253, 1245, 1249, + /* 300 */ 1183, 1355, 1357, 1368, 1368, 1378, 1378, 1378, 1378, -95, + /* 310 */ -95, -95, -95, -95, -95, -95, -95, 451, 936, 816, + /* 320 */ 888, 1069, 799, 1111, 1197, 1193, 1201, 1202, 1203, 1213, + /* 330 */ 1134, 1117, 1230, 497, 1218, 1219, 1154, 1223, 1115, 1120, + /* 340 */ 1231, 1164, 1160, 1392, 1394, 1376, 1257, 1385, 1307, 1386, + /* 350 */ 1383, 1388, 1292, 1282, 1303, 1294, 1395, 1293, 1403, 1419, + /* 360 */ 1298, 1291, 1389, 1390, 1314, 1372, 1365, 1308, 1430, 1427, + /* 370 */ 1411, 1327, 1295, 1356, 1412, 1359, 1350, 1369, 1333, 1418, + /* 380 */ 1423, 1425, 1335, 1340, 1424, 1370, 1426, 1428, 1429, 1431, + /* 390 */ 1375, 1393, 1433, 1380, 1396, 1437, 1438, 1439, 1347, 1443, + /* 400 */ 1444, 1446, 1440, 1348, 1448, 1449, 1413, 1409, 1452, 1351, + /* 410 */ 1450, 1414, 1451, 1415, 1457, 1450, 1458, 1459, 1460, 1461, + /* 420 */ 1462, 1464, 1473, 1465, 1467, 1466, 1468, 1469, 1471, 1472, + /* 430 */ 1468, 1474, 1476, 1477, 1478, 1480, 1373, 1377, 1381, 1382, + /* 440 */ 1482, 1491, 1490, }; -#define YY_REDUCE_USE_DFLT (-144) -#define YY_REDUCE_COUNT (314) -#define YY_REDUCE_MIN (-143) -#define YY_REDUCE_MAX (1231) +#define YY_REDUCE_USE_DFLT (-130) +#define YY_REDUCE_COUNT (316) +#define YY_REDUCE_MIN (-129) +#define YY_REDUCE_MAX (1243) static const short yy_reduce_ofst[] = { - /* 0 */ -143, 949, 136, 131, -48, -45, 158, 241, 22, 153, - /* 10 */ 226, 163, 362, 364, 366, 312, 314, 368, 237, 236, - /* 20 */ 300, 440, 114, 359, 319, 100, 100, 100, 100, 100, - /* 30 */ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, - /* 40 */ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, - /* 50 */ 100, 100, 100, 100, 374, 447, 461, 516, 518, 567, - /* 60 */ 569, 572, 574, 579, 581, 583, 586, 596, 631, 644, - /* 70 */ 646, 649, 657, 659, 661, 663, 671, 708, 720, 722, - /* 80 */ 759, 771, 773, 810, 822, 824, 861, 873, 875, 930, - /* 90 */ 944, 947, 952, 957, 959, 961, 997, 100, 100, 100, - /* 100 */ 100, 100, 100, 100, 100, 100, 486, -108, -83, 224, - /* 110 */ 286, 451, 100, 681, 100, 100, 100, 100, 354, 354, - /* 120 */ 354, 337, 352, 49, 482, 482, 503, 532, -60, 615, - /* 130 */ 647, 689, 690, 737, 782, -62, 517, 789, 474, 795, - /* 140 */ 580, 733, 32, 662, 488, 139, 678, 433, 769, 772, - /* 150 */ 396, 728, 887, 942, 955, 965, 977, 740, 766, 178, - /* 160 */ -46, -17, 59, 53, 118, 141, 167, 248, 255, 326, - /* 170 */ 441, 464, 519, 668, 693, 721, 736, 744, 775, 788, - /* 180 */ 846, 899, 912, 936, 983, 985, 72, 134, 542, 990, - /* 190 */ 991, 597, 992, 998, 1020, 871, 1022, 1027, 915, 1029, - /* 200 */ 1030, 1034, 118, 1035, 1036, 1043, 1044, 1045, 1046, 931, - /* 210 */ 967, 999, 1000, 1001, 597, 1003, 1009, 1058, 1011, 987, - /* 220 */ 1002, 976, 1016, 981, 1039, 1037, 1038, 1047, 1006, 1004, - /* 230 */ 1052, 1053, 1033, 1031, 1079, 994, 1080, 1081, 1005, 1010, - /* 240 */ 1082, 1028, 1062, 1055, 1067, 1071, 1072, 1073, 1101, 1108, - /* 250 */ 1069, 1048, 1078, 1083, 1074, 1110, 1113, 1041, 1049, 1122, - /* 260 */ 1123, 1126, 1118, 1138, 1140, 1144, 1141, 1127, 1133, 1134, - /* 270 */ 1135, 1129, 1136, 1142, 1143, 1146, 1132, 1147, 1148, 1102, - /* 280 */ 1109, 1112, 1111, 1114, 1116, 1117, 1119, 1166, 1170, 1120, - /* 290 */ 1158, 1121, 1124, 1165, 1145, 1149, 1155, 1159, 1161, 1201, - /* 300 */ 1209, 1220, 1222, 1225, 1226, 1227, 1228, 1151, 1152, 1150, - /* 310 */ 1215, 1217, 1203, 1205, 1231, + /* 0 */ -29, 531, 490, 570, -49, 272, 456, 498, 633, 400, + /* 10 */ 612, 626, 113, 482, 678, 719, 384, 726, 748, 791, + /* 20 */ 419, 693, 761, 812, 819, 625, 76, 76, 76, 76, + /* 30 */ 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, + /* 40 */ 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, + /* 50 */ 76, 76, 76, 76, 76, 793, 795, 856, 871, 875, + /* 60 */ 878, 881, 885, 887, 889, 894, 897, 900, 916, 919, + /* 70 */ 922, 926, 928, 930, 932, 934, 938, 941, 944, 956, + /* 80 */ 963, 966, 968, 970, 972, 974, 978, 980, 982, 996, + /* 90 */ 1004, 1006, 1008, 1011, 1013, 1015, 1019, 1022, 76, 76, + /* 100 */ 76, 76, 76, 76, 76, 76, 76, 555, 210, 260, + /* 110 */ 200, 346, 571, 76, 700, 76, 76, 76, 76, 838, + /* 120 */ 838, 838, 42, 182, 251, 160, 160, 550, 5, 455, + /* 130 */ 585, 721, 749, 882, 884, 971, 618, 462, 797, 514, + /* 140 */ 807, 524, 997, -129, 655, 859, 62, 290, 66, 1030, + /* 150 */ 1032, 589, 1009, 1010, 1037, 1038, 1039, 1044, 740, 852, + /* 160 */ 1012, 112, 147, 230, 257, 180, 369, 403, 500, 549, + /* 170 */ 556, 563, 694, 751, 765, 772, 778, 820, 868, 873, + /* 180 */ 890, 929, 935, 985, 1041, 1080, 1091, 540, 593, 661, + /* 190 */ 1103, 1104, 842, 1108, 1110, 1112, 1048, 1113, 1114, 1068, + /* 200 */ 1116, 1118, 1119, 180, 1121, 1122, 1123, 1124, 1125, 1126, + /* 210 */ 1029, 1034, 1059, 1062, 1070, 842, 1082, 1083, 1133, 1084, + /* 220 */ 1072, 1073, 1045, 1087, 1050, 1127, 1109, 1128, 1129, 1076, + /* 230 */ 1064, 1130, 1131, 1092, 1096, 1140, 1054, 1141, 1142, 1067, + /* 240 */ 1071, 1150, 1090, 1132, 1135, 1136, 1137, 1138, 1139, 1157, + /* 250 */ 1159, 1143, 1098, 1145, 1146, 1147, 1161, 1165, 1086, 1097, + /* 260 */ 1166, 1167, 1169, 1162, 1178, 1180, 1181, 1184, 1168, 1172, + /* 270 */ 1173, 1175, 1170, 1174, 1179, 1185, 1186, 1177, 1188, 1189, + /* 280 */ 1148, 1151, 1149, 1152, 1153, 1155, 1158, 1163, 1196, 1171, + /* 290 */ 1199, 1190, 1191, 1194, 1195, 1198, 1200, 1204, 1206, 1205, + /* 300 */ 1209, 1220, 1224, 1236, 1237, 1240, 1241, 1242, 1243, 1207, + /* 310 */ 1208, 1212, 1221, 1222, 1210, 1225, 1239, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1250, 1240, 1240, 1240, 1174, 1174, 1174, 1240, 1071, 1100, - /* 10 */ 1100, 1224, 1301, 1301, 1301, 1301, 1301, 1301, 1173, 1301, - /* 20 */ 1301, 1301, 1301, 1240, 1075, 1106, 1301, 1301, 1301, 1301, - /* 30 */ 1301, 1301, 1301, 1301, 1223, 1225, 1114, 1113, 1206, 1087, - /* 40 */ 1111, 1104, 1108, 1175, 1169, 1170, 1168, 1172, 1176, 1301, - /* 50 */ 1107, 1138, 1153, 1137, 1301, 1301, 1301, 1301, 1301, 1301, - /* 60 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 70 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 80 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 90 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1147, 1152, 1159, - /* 100 */ 1151, 1148, 1140, 1139, 1141, 1142, 1301, 994, 1042, 1301, - /* 110 */ 1301, 1301, 1143, 1301, 1144, 1156, 1155, 1154, 1231, 1258, - /* 120 */ 1257, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 130 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 140 */ 1301, 1301, 1301, 1301, 1301, 1301, 1250, 1240, 1000, 1000, - /* 150 */ 1301, 1240, 1240, 1240, 1240, 1240, 1240, 1236, 1075, 1066, - /* 160 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 170 */ 1228, 1226, 1301, 1187, 1301, 1301, 1301, 1301, 1301, 1301, - /* 180 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 190 */ 1301, 1301, 1301, 1301, 1301, 1071, 1301, 1301, 1301, 1301, - /* 200 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1252, 1301, - /* 210 */ 1201, 1071, 1071, 1071, 1073, 1055, 1065, 979, 1110, 1089, - /* 220 */ 1089, 1290, 1110, 1290, 1017, 1272, 1014, 1100, 1089, 1171, - /* 230 */ 1100, 1100, 1072, 1065, 1301, 1293, 1080, 1080, 1292, 1292, - /* 240 */ 1080, 1119, 1045, 1110, 1051, 1051, 1051, 1051, 1080, 991, - /* 250 */ 1110, 1119, 1045, 1045, 1110, 1080, 991, 1205, 1287, 1080, - /* 260 */ 1080, 991, 1180, 1080, 991, 1080, 991, 1180, 1043, 1043, - /* 270 */ 1043, 1032, 1180, 1043, 1017, 1043, 1032, 1043, 1043, 1093, - /* 280 */ 1088, 1093, 1088, 1093, 1088, 1093, 1088, 1080, 1080, 1301, - /* 290 */ 1180, 1184, 1184, 1180, 1105, 1094, 1103, 1101, 1110, 997, - /* 300 */ 1035, 1255, 1255, 1251, 1251, 1251, 1251, 1298, 1298, 1236, - /* 310 */ 1267, 1267, 1019, 1019, 1267, 1301, 1301, 1301, 1301, 1301, - /* 320 */ 1301, 1262, 1301, 1189, 1301, 1301, 1301, 1301, 1301, 1301, - /* 330 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 340 */ 1125, 1301, 975, 1233, 1301, 1301, 1232, 1301, 1301, 1301, - /* 350 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 360 */ 1301, 1301, 1301, 1301, 1301, 1289, 1301, 1301, 1301, 1301, - /* 370 */ 1301, 1301, 1204, 1203, 1301, 1301, 1301, 1301, 1301, 1301, - /* 380 */ 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 390 */ 1301, 1301, 1301, 1301, 1301, 1301, 1057, 1301, 1301, 1301, - /* 400 */ 1276, 1301, 1301, 1301, 1301, 1301, 1301, 1301, 1102, 1301, - /* 410 */ 1095, 1301, 1280, 1301, 1301, 1301, 1301, 1301, 1301, 1301, - /* 420 */ 1301, 1301, 1301, 1242, 1301, 1301, 1301, 1241, 1301, 1301, - /* 430 */ 1301, 1301, 1301, 1127, 1301, 1126, 1130, 1301, 985, 1301, + /* 0 */ 1258, 1248, 1248, 1248, 1180, 1180, 1180, 1180, 1248, 1077, + /* 10 */ 1106, 1106, 1232, 1309, 1309, 1309, 1309, 1309, 1309, 1179, + /* 20 */ 1309, 1309, 1309, 1309, 1248, 1081, 1112, 1309, 1309, 1309, + /* 30 */ 1309, 1309, 1309, 1309, 1309, 1231, 1233, 1120, 1119, 1214, + /* 40 */ 1093, 1117, 1110, 1114, 1181, 1175, 1176, 1174, 1178, 1182, + /* 50 */ 1309, 1113, 1144, 1159, 1143, 1309, 1309, 1309, 1309, 1309, + /* 60 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 70 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 80 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 90 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1153, 1158, + /* 100 */ 1165, 1157, 1154, 1146, 1145, 1147, 1148, 1309, 1000, 1048, + /* 110 */ 1309, 1309, 1309, 1149, 1309, 1150, 1162, 1161, 1160, 1239, + /* 120 */ 1266, 1265, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 130 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 140 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1258, 1248, 1006, + /* 150 */ 1006, 1309, 1248, 1248, 1248, 1248, 1248, 1248, 1244, 1081, + /* 160 */ 1072, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 170 */ 1309, 1236, 1234, 1309, 1195, 1309, 1309, 1309, 1309, 1309, + /* 180 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 190 */ 1309, 1309, 1309, 1309, 1309, 1309, 1077, 1309, 1309, 1309, + /* 200 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1260, + /* 210 */ 1309, 1209, 1077, 1077, 1077, 1079, 1061, 1071, 985, 1116, + /* 220 */ 1095, 1095, 1298, 1116, 1298, 1023, 1280, 1020, 1106, 1095, + /* 230 */ 1177, 1106, 1106, 1078, 1071, 1309, 1301, 1086, 1086, 1300, + /* 240 */ 1300, 1086, 1125, 1051, 1116, 1057, 1057, 1057, 1057, 1086, + /* 250 */ 997, 1116, 1125, 1051, 1051, 1116, 1086, 997, 1213, 1295, + /* 260 */ 1086, 1086, 997, 1188, 1086, 997, 1086, 997, 1188, 1049, + /* 270 */ 1049, 1049, 1038, 1188, 1049, 1023, 1049, 1038, 1049, 1049, + /* 280 */ 1099, 1094, 1099, 1094, 1099, 1094, 1099, 1094, 1086, 1183, + /* 290 */ 1086, 1309, 1188, 1192, 1192, 1188, 1111, 1100, 1109, 1107, + /* 300 */ 1116, 1003, 1041, 1263, 1263, 1259, 1259, 1259, 1259, 1306, + /* 310 */ 1306, 1244, 1275, 1275, 1025, 1025, 1275, 1309, 1309, 1309, + /* 320 */ 1309, 1309, 1309, 1270, 1309, 1197, 1309, 1309, 1309, 1309, + /* 330 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 340 */ 1309, 1309, 1131, 1309, 981, 1241, 1309, 1309, 1240, 1309, + /* 350 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 360 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1297, 1309, 1309, + /* 370 */ 1309, 1309, 1309, 1309, 1212, 1211, 1309, 1309, 1309, 1309, + /* 380 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 390 */ 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1309, 1063, 1309, + /* 400 */ 1309, 1309, 1284, 1309, 1309, 1309, 1309, 1309, 1309, 1309, + /* 410 */ 1108, 1309, 1101, 1309, 1309, 1288, 1309, 1309, 1309, 1309, + /* 420 */ 1309, 1309, 1309, 1309, 1309, 1309, 1250, 1309, 1309, 1309, + /* 430 */ 1249, 1309, 1309, 1309, 1309, 1309, 1133, 1309, 1132, 1136, + /* 440 */ 1309, 991, 1309, }; /********** End of lemon-generated parsing tables *****************************/ @@ -129147,74 +132349,100 @@ static const YYACTIONTYPE yy_default[] = { static const YYCODETYPE yyFallback[] = { 0, /* $ => nothing */ 0, /* SEMI => nothing */ - 27, /* EXPLAIN => ID */ - 27, /* QUERY => ID */ - 27, /* PLAN => ID */ - 27, /* BEGIN => ID */ + 55, /* EXPLAIN => ID */ + 55, /* QUERY => ID */ + 55, /* PLAN => ID */ + 55, /* BEGIN => ID */ 0, /* TRANSACTION => nothing */ - 27, /* DEFERRED => ID */ - 27, /* IMMEDIATE => ID */ - 27, /* EXCLUSIVE => ID */ + 55, /* DEFERRED => ID */ + 55, /* IMMEDIATE => ID */ + 55, /* EXCLUSIVE => ID */ 0, /* COMMIT => nothing */ - 27, /* END => ID */ - 27, /* ROLLBACK => ID */ - 27, /* SAVEPOINT => ID */ - 27, /* RELEASE => ID */ + 55, /* END => ID */ + 55, /* ROLLBACK => ID */ + 55, /* SAVEPOINT => ID */ + 55, /* RELEASE => ID */ 0, /* TO => nothing */ 0, /* TABLE => nothing */ 0, /* CREATE => nothing */ - 27, /* IF => ID */ + 55, /* IF => ID */ 0, /* NOT => nothing */ 0, /* EXISTS => nothing */ - 27, /* TEMP => ID */ + 55, /* TEMP => ID */ 0, /* LP => nothing */ 0, /* RP => nothing */ 0, /* AS => nothing */ - 27, /* WITHOUT => ID */ + 55, /* WITHOUT => ID */ 0, /* COMMA => nothing */ + 0, /* OR => nothing */ + 0, /* AND => nothing */ + 0, /* IS => nothing */ + 55, /* MATCH => ID */ + 55, /* LIKE_KW => ID */ + 0, /* BETWEEN => nothing */ + 0, /* IN => nothing */ + 0, /* ISNULL => nothing */ + 0, /* NOTNULL => nothing */ + 0, /* NE => nothing */ + 0, /* EQ => nothing */ + 0, /* GT => nothing */ + 0, /* LE => nothing */ + 0, /* LT => nothing */ + 0, /* GE => nothing */ + 0, /* ESCAPE => nothing */ + 0, /* BITAND => nothing */ + 0, /* BITOR => nothing */ + 0, /* LSHIFT => nothing */ + 0, /* RSHIFT => nothing */ + 0, /* PLUS => nothing */ + 0, /* MINUS => nothing */ + 0, /* STAR => nothing */ + 0, /* SLASH => nothing */ + 0, /* REM => nothing */ + 0, /* CONCAT => nothing */ + 0, /* COLLATE => nothing */ + 0, /* BITNOT => nothing */ 0, /* ID => nothing */ 0, /* INDEXED => nothing */ - 27, /* ABORT => ID */ - 27, /* ACTION => ID */ - 27, /* AFTER => ID */ - 27, /* ANALYZE => ID */ - 27, /* ASC => ID */ - 27, /* ATTACH => ID */ - 27, /* BEFORE => ID */ - 27, /* BY => ID */ - 27, /* CASCADE => ID */ - 27, /* CAST => ID */ - 27, /* COLUMNKW => ID */ - 27, /* CONFLICT => ID */ - 27, /* DATABASE => ID */ - 27, /* DESC => ID */ - 27, /* DETACH => ID */ - 27, /* EACH => ID */ - 27, /* FAIL => ID */ - 27, /* FOR => ID */ - 27, /* IGNORE => ID */ - 27, /* INITIALLY => ID */ - 27, /* INSTEAD => ID */ - 27, /* LIKE_KW => ID */ - 27, /* MATCH => ID */ - 27, /* NO => ID */ - 27, /* KEY => ID */ - 27, /* OF => ID */ - 27, /* OFFSET => ID */ - 27, /* PRAGMA => ID */ - 27, /* RAISE => ID */ - 27, /* RECURSIVE => ID */ - 27, /* REPLACE => ID */ - 27, /* RESTRICT => ID */ - 27, /* ROW => ID */ - 27, /* TRIGGER => ID */ - 27, /* VACUUM => ID */ - 27, /* VIEW => ID */ - 27, /* VIRTUAL => ID */ - 27, /* WITH => ID */ - 27, /* REINDEX => ID */ - 27, /* RENAME => ID */ - 27, /* CTIME_KW => ID */ + 55, /* ABORT => ID */ + 55, /* ACTION => ID */ + 55, /* AFTER => ID */ + 55, /* ANALYZE => ID */ + 55, /* ASC => ID */ + 55, /* ATTACH => ID */ + 55, /* BEFORE => ID */ + 55, /* BY => ID */ + 55, /* CASCADE => ID */ + 55, /* CAST => ID */ + 55, /* COLUMNKW => ID */ + 55, /* CONFLICT => ID */ + 55, /* DATABASE => ID */ + 55, /* DESC => ID */ + 55, /* DETACH => ID */ + 55, /* EACH => ID */ + 55, /* FAIL => ID */ + 55, /* FOR => ID */ + 55, /* IGNORE => ID */ + 55, /* INITIALLY => ID */ + 55, /* INSTEAD => ID */ + 55, /* NO => ID */ + 55, /* KEY => ID */ + 55, /* OF => ID */ + 55, /* OFFSET => ID */ + 55, /* PRAGMA => ID */ + 55, /* RAISE => ID */ + 55, /* RECURSIVE => ID */ + 55, /* REPLACE => ID */ + 55, /* RESTRICT => ID */ + 55, /* ROW => ID */ + 55, /* TRIGGER => ID */ + 55, /* VACUUM => ID */ + 55, /* VIEW => ID */ + 55, /* VIRTUAL => ID */ + 55, /* WITH => ID */ + 55, /* REINDEX => ID */ + 55, /* RENAME => ID */ + 55, /* CTIME_KW => ID */ }; #endif /* YYFALLBACK */ @@ -129246,9 +132474,9 @@ typedef struct yyStackEntry yyStackEntry; /* The state of the parser is completely contained in an instance of ** the following structure */ struct yyParser { - int yyidx; /* Index of top element in stack */ + yyStackEntry *yytos; /* Pointer to top element of the stack */ #ifdef YYTRACKMAXSTACKDEPTH - int yyidxMax; /* Maximum value of yyidx */ + int yyhwm; /* High-water mark of the stack */ #endif #ifndef YYNOERRORRECOVERY int yyerrcnt; /* Shifts left before out of the error */ @@ -129257,6 +132485,7 @@ struct yyParser { #if YYSTACKDEPTH<=0 int yystksz; /* Current side of the stack */ yyStackEntry *yystack; /* The parser's stack */ + yyStackEntry yystk0; /* First stack entry */ #else yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */ #endif @@ -129305,25 +132534,25 @@ static const char *const yyTokenName[] = { "ROLLBACK", "SAVEPOINT", "RELEASE", "TO", "TABLE", "CREATE", "IF", "NOT", "EXISTS", "TEMP", "LP", "RP", - "AS", "WITHOUT", "COMMA", "ID", + "AS", "WITHOUT", "COMMA", "OR", + "AND", "IS", "MATCH", "LIKE_KW", + "BETWEEN", "IN", "ISNULL", "NOTNULL", + "NE", "EQ", "GT", "LE", + "LT", "GE", "ESCAPE", "BITAND", + "BITOR", "LSHIFT", "RSHIFT", "PLUS", + "MINUS", "STAR", "SLASH", "REM", + "CONCAT", "COLLATE", "BITNOT", "ID", "INDEXED", "ABORT", "ACTION", "AFTER", "ANALYZE", "ASC", "ATTACH", "BEFORE", "BY", "CASCADE", "CAST", "COLUMNKW", "CONFLICT", "DATABASE", "DESC", "DETACH", "EACH", "FAIL", "FOR", "IGNORE", - "INITIALLY", "INSTEAD", "LIKE_KW", "MATCH", - "NO", "KEY", "OF", "OFFSET", - "PRAGMA", "RAISE", "RECURSIVE", "REPLACE", - "RESTRICT", "ROW", "TRIGGER", "VACUUM", - "VIEW", "VIRTUAL", "WITH", "REINDEX", - "RENAME", "CTIME_KW", "ANY", "OR", - "AND", "IS", "BETWEEN", "IN", - "ISNULL", "NOTNULL", "NE", "EQ", - "GT", "LE", "LT", "GE", - "ESCAPE", "BITAND", "BITOR", "LSHIFT", - "RSHIFT", "PLUS", "MINUS", "STAR", - "SLASH", "REM", "CONCAT", "COLLATE", - "BITNOT", "STRING", "JOIN_KW", "CONSTRAINT", + "INITIALLY", "INSTEAD", "NO", "KEY", + "OF", "OFFSET", "PRAGMA", "RAISE", + "RECURSIVE", "REPLACE", "RESTRICT", "ROW", + "TRIGGER", "VACUUM", "VIEW", "VIRTUAL", + "WITH", "REINDEX", "RENAME", "CTIME_KW", + "ANY", "STRING", "JOIN_KW", "CONSTRAINT", "DEFAULT", "NULL", "PRIMARY", "UNIQUE", "CHECK", "REFERENCES", "AUTOINCR", "ON", "INSERT", "DELETE", "UPDATE", "SET", @@ -129355,13 +132584,13 @@ static const char *const yyTokenName[] = { "stl_prefix", "joinop", "indexed_opt", "on_opt", "using_opt", "idlist", "setlist", "insert_cmd", "idlist_opt", "likeop", "between_op", "in_op", - "case_operand", "case_exprlist", "case_else", "uniqueflag", - "collate", "nmnum", "trigger_decl", "trigger_cmd_list", - "trigger_time", "trigger_event", "foreach_clause", "when_clause", - "trigger_cmd", "trnm", "tridxby", "database_kw_opt", - "key_opt", "add_column_fullname", "kwcolumn_opt", "create_vtab", - "vtabarglist", "vtabarg", "vtabargtoken", "lp", - "anylist", "wqlist", + "paren_exprlist", "case_operand", "case_exprlist", "case_else", + "uniqueflag", "collate", "nmnum", "trigger_decl", + "trigger_cmd_list", "trigger_time", "trigger_event", "foreach_clause", + "when_clause", "trigger_cmd", "trnm", "tridxby", + "database_kw_opt", "key_opt", "add_column_fullname", "kwcolumn_opt", + "create_vtab", "vtabarglist", "vtabarg", "vtabargtoken", + "lp", "anylist", "wqlist", }; #endif /* NDEBUG */ @@ -129559,7 +132788,7 @@ static const char *const yyRuleName[] = { /* 187 */ "expr ::= expr in_op LP exprlist RP", /* 188 */ "expr ::= LP select RP", /* 189 */ "expr ::= expr in_op LP select RP", - /* 190 */ "expr ::= expr in_op nm dbnm", + /* 190 */ "expr ::= expr in_op nm dbnm paren_exprlist", /* 191 */ "expr ::= EXISTS LP select RP", /* 192 */ "expr ::= CASE case_operand case_exprlist case_else END", /* 193 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", @@ -129571,154 +132800,166 @@ static const char *const yyRuleName[] = { /* 199 */ "exprlist ::=", /* 200 */ "nexprlist ::= nexprlist COMMA expr", /* 201 */ "nexprlist ::= expr", - /* 202 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", - /* 203 */ "uniqueflag ::= UNIQUE", - /* 204 */ "uniqueflag ::=", - /* 205 */ "eidlist_opt ::=", - /* 206 */ "eidlist_opt ::= LP eidlist RP", - /* 207 */ "eidlist ::= eidlist COMMA nm collate sortorder", - /* 208 */ "eidlist ::= nm collate sortorder", - /* 209 */ "collate ::=", - /* 210 */ "collate ::= COLLATE ID|STRING", - /* 211 */ "cmd ::= DROP INDEX ifexists fullname", - /* 212 */ "cmd ::= VACUUM", - /* 213 */ "cmd ::= VACUUM nm", - /* 214 */ "cmd ::= PRAGMA nm dbnm", - /* 215 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", - /* 216 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", - /* 217 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", - /* 218 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", - /* 219 */ "plus_num ::= PLUS INTEGER|FLOAT", - /* 220 */ "minus_num ::= MINUS INTEGER|FLOAT", - /* 221 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", - /* 222 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", - /* 223 */ "trigger_time ::= BEFORE", - /* 224 */ "trigger_time ::= AFTER", - /* 225 */ "trigger_time ::= INSTEAD OF", - /* 226 */ "trigger_time ::=", - /* 227 */ "trigger_event ::= DELETE|INSERT", - /* 228 */ "trigger_event ::= UPDATE", - /* 229 */ "trigger_event ::= UPDATE OF idlist", - /* 230 */ "when_clause ::=", - /* 231 */ "when_clause ::= WHEN expr", - /* 232 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", - /* 233 */ "trigger_cmd_list ::= trigger_cmd SEMI", - /* 234 */ "trnm ::= nm DOT nm", - /* 235 */ "tridxby ::= INDEXED BY nm", - /* 236 */ "tridxby ::= NOT INDEXED", - /* 237 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt", - /* 238 */ "trigger_cmd ::= insert_cmd INTO trnm idlist_opt select", - /* 239 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt", - /* 240 */ "trigger_cmd ::= select", - /* 241 */ "expr ::= RAISE LP IGNORE RP", - /* 242 */ "expr ::= RAISE LP raisetype COMMA nm RP", - /* 243 */ "raisetype ::= ROLLBACK", - /* 244 */ "raisetype ::= ABORT", - /* 245 */ "raisetype ::= FAIL", - /* 246 */ "cmd ::= DROP TRIGGER ifexists fullname", - /* 247 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", - /* 248 */ "cmd ::= DETACH database_kw_opt expr", - /* 249 */ "key_opt ::=", - /* 250 */ "key_opt ::= KEY expr", - /* 251 */ "cmd ::= REINDEX", - /* 252 */ "cmd ::= REINDEX nm dbnm", - /* 253 */ "cmd ::= ANALYZE", - /* 254 */ "cmd ::= ANALYZE nm dbnm", - /* 255 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", - /* 256 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", - /* 257 */ "add_column_fullname ::= fullname", - /* 258 */ "cmd ::= create_vtab", - /* 259 */ "cmd ::= create_vtab LP vtabarglist RP", - /* 260 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", - /* 261 */ "vtabarg ::=", - /* 262 */ "vtabargtoken ::= ANY", - /* 263 */ "vtabargtoken ::= lp anylist RP", - /* 264 */ "lp ::= LP", - /* 265 */ "with ::=", - /* 266 */ "with ::= WITH wqlist", - /* 267 */ "with ::= WITH RECURSIVE wqlist", - /* 268 */ "wqlist ::= nm eidlist_opt AS LP select RP", - /* 269 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP", - /* 270 */ "input ::= cmdlist", - /* 271 */ "cmdlist ::= cmdlist ecmd", - /* 272 */ "cmdlist ::= ecmd", - /* 273 */ "ecmd ::= SEMI", - /* 274 */ "ecmd ::= explain cmdx SEMI", - /* 275 */ "explain ::=", - /* 276 */ "trans_opt ::=", - /* 277 */ "trans_opt ::= TRANSACTION", - /* 278 */ "trans_opt ::= TRANSACTION nm", - /* 279 */ "savepoint_opt ::= SAVEPOINT", - /* 280 */ "savepoint_opt ::=", - /* 281 */ "cmd ::= create_table create_table_args", - /* 282 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 283 */ "columnlist ::= columnname carglist", - /* 284 */ "nm ::= ID|INDEXED", - /* 285 */ "nm ::= STRING", - /* 286 */ "nm ::= JOIN_KW", - /* 287 */ "typetoken ::= typename", - /* 288 */ "typename ::= ID|STRING", - /* 289 */ "signed ::= plus_num", - /* 290 */ "signed ::= minus_num", - /* 291 */ "carglist ::= carglist ccons", - /* 292 */ "carglist ::=", - /* 293 */ "ccons ::= NULL onconf", - /* 294 */ "conslist_opt ::= COMMA conslist", - /* 295 */ "conslist ::= conslist tconscomma tcons", - /* 296 */ "conslist ::= tcons", - /* 297 */ "tconscomma ::=", - /* 298 */ "defer_subclause_opt ::= defer_subclause", - /* 299 */ "resolvetype ::= raisetype", - /* 300 */ "selectnowith ::= oneselect", - /* 301 */ "oneselect ::= values", - /* 302 */ "sclp ::= selcollist COMMA", - /* 303 */ "as ::= ID|STRING", - /* 304 */ "expr ::= term", - /* 305 */ "exprlist ::= nexprlist", - /* 306 */ "nmnum ::= plus_num", - /* 307 */ "nmnum ::= nm", - /* 308 */ "nmnum ::= ON", - /* 309 */ "nmnum ::= DELETE", - /* 310 */ "nmnum ::= DEFAULT", - /* 311 */ "plus_num ::= INTEGER|FLOAT", - /* 312 */ "foreach_clause ::=", - /* 313 */ "foreach_clause ::= FOR EACH ROW", - /* 314 */ "trnm ::= nm", - /* 315 */ "tridxby ::=", - /* 316 */ "database_kw_opt ::= DATABASE", - /* 317 */ "database_kw_opt ::=", - /* 318 */ "kwcolumn_opt ::=", - /* 319 */ "kwcolumn_opt ::= COLUMNKW", - /* 320 */ "vtabarglist ::= vtabarg", - /* 321 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 322 */ "vtabarg ::= vtabarg vtabargtoken", - /* 323 */ "anylist ::=", - /* 324 */ "anylist ::= anylist LP anylist RP", - /* 325 */ "anylist ::= anylist ANY", + /* 202 */ "paren_exprlist ::=", + /* 203 */ "paren_exprlist ::= LP exprlist RP", + /* 204 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt", + /* 205 */ "uniqueflag ::= UNIQUE", + /* 206 */ "uniqueflag ::=", + /* 207 */ "eidlist_opt ::=", + /* 208 */ "eidlist_opt ::= LP eidlist RP", + /* 209 */ "eidlist ::= eidlist COMMA nm collate sortorder", + /* 210 */ "eidlist ::= nm collate sortorder", + /* 211 */ "collate ::=", + /* 212 */ "collate ::= COLLATE ID|STRING", + /* 213 */ "cmd ::= DROP INDEX ifexists fullname", + /* 214 */ "cmd ::= VACUUM", + /* 215 */ "cmd ::= VACUUM nm", + /* 216 */ "cmd ::= PRAGMA nm dbnm", + /* 217 */ "cmd ::= PRAGMA nm dbnm EQ nmnum", + /* 218 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP", + /* 219 */ "cmd ::= PRAGMA nm dbnm EQ minus_num", + /* 220 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP", + /* 221 */ "plus_num ::= PLUS INTEGER|FLOAT", + /* 222 */ "minus_num ::= MINUS INTEGER|FLOAT", + /* 223 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END", + /* 224 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause", + /* 225 */ "trigger_time ::= BEFORE", + /* 226 */ "trigger_time ::= AFTER", + /* 227 */ "trigger_time ::= INSTEAD OF", + /* 228 */ "trigger_time ::=", + /* 229 */ "trigger_event ::= DELETE|INSERT", + /* 230 */ "trigger_event ::= UPDATE", + /* 231 */ "trigger_event ::= UPDATE OF idlist", + /* 232 */ "when_clause ::=", + /* 233 */ "when_clause ::= WHEN expr", + /* 234 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI", + /* 235 */ "trigger_cmd_list ::= trigger_cmd SEMI", + /* 236 */ "trnm ::= nm DOT nm", + /* 237 */ "tridxby ::= INDEXED BY nm", + /* 238 */ "tridxby ::= NOT INDEXED", + /* 239 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt", + /* 240 */ "trigger_cmd ::= insert_cmd INTO trnm idlist_opt select", + /* 241 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt", + /* 242 */ "trigger_cmd ::= select", + /* 243 */ "expr ::= RAISE LP IGNORE RP", + /* 244 */ "expr ::= RAISE LP raisetype COMMA nm RP", + /* 245 */ "raisetype ::= ROLLBACK", + /* 246 */ "raisetype ::= ABORT", + /* 247 */ "raisetype ::= FAIL", + /* 248 */ "cmd ::= DROP TRIGGER ifexists fullname", + /* 249 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt", + /* 250 */ "cmd ::= DETACH database_kw_opt expr", + /* 251 */ "key_opt ::=", + /* 252 */ "key_opt ::= KEY expr", + /* 253 */ "cmd ::= REINDEX", + /* 254 */ "cmd ::= REINDEX nm dbnm", + /* 255 */ "cmd ::= ANALYZE", + /* 256 */ "cmd ::= ANALYZE nm dbnm", + /* 257 */ "cmd ::= ALTER TABLE fullname RENAME TO nm", + /* 258 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist", + /* 259 */ "add_column_fullname ::= fullname", + /* 260 */ "cmd ::= create_vtab", + /* 261 */ "cmd ::= create_vtab LP vtabarglist RP", + /* 262 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm", + /* 263 */ "vtabarg ::=", + /* 264 */ "vtabargtoken ::= ANY", + /* 265 */ "vtabargtoken ::= lp anylist RP", + /* 266 */ "lp ::= LP", + /* 267 */ "with ::=", + /* 268 */ "with ::= WITH wqlist", + /* 269 */ "with ::= WITH RECURSIVE wqlist", + /* 270 */ "wqlist ::= nm eidlist_opt AS LP select RP", + /* 271 */ "wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP", + /* 272 */ "input ::= cmdlist", + /* 273 */ "cmdlist ::= cmdlist ecmd", + /* 274 */ "cmdlist ::= ecmd", + /* 275 */ "ecmd ::= SEMI", + /* 276 */ "ecmd ::= explain cmdx SEMI", + /* 277 */ "explain ::=", + /* 278 */ "trans_opt ::=", + /* 279 */ "trans_opt ::= TRANSACTION", + /* 280 */ "trans_opt ::= TRANSACTION nm", + /* 281 */ "savepoint_opt ::= SAVEPOINT", + /* 282 */ "savepoint_opt ::=", + /* 283 */ "cmd ::= create_table create_table_args", + /* 284 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 285 */ "columnlist ::= columnname carglist", + /* 286 */ "nm ::= ID|INDEXED", + /* 287 */ "nm ::= STRING", + /* 288 */ "nm ::= JOIN_KW", + /* 289 */ "typetoken ::= typename", + /* 290 */ "typename ::= ID|STRING", + /* 291 */ "signed ::= plus_num", + /* 292 */ "signed ::= minus_num", + /* 293 */ "carglist ::= carglist ccons", + /* 294 */ "carglist ::=", + /* 295 */ "ccons ::= NULL onconf", + /* 296 */ "conslist_opt ::= COMMA conslist", + /* 297 */ "conslist ::= conslist tconscomma tcons", + /* 298 */ "conslist ::= tcons", + /* 299 */ "tconscomma ::=", + /* 300 */ "defer_subclause_opt ::= defer_subclause", + /* 301 */ "resolvetype ::= raisetype", + /* 302 */ "selectnowith ::= oneselect", + /* 303 */ "oneselect ::= values", + /* 304 */ "sclp ::= selcollist COMMA", + /* 305 */ "as ::= ID|STRING", + /* 306 */ "expr ::= term", + /* 307 */ "exprlist ::= nexprlist", + /* 308 */ "nmnum ::= plus_num", + /* 309 */ "nmnum ::= nm", + /* 310 */ "nmnum ::= ON", + /* 311 */ "nmnum ::= DELETE", + /* 312 */ "nmnum ::= DEFAULT", + /* 313 */ "plus_num ::= INTEGER|FLOAT", + /* 314 */ "foreach_clause ::=", + /* 315 */ "foreach_clause ::= FOR EACH ROW", + /* 316 */ "trnm ::= nm", + /* 317 */ "tridxby ::=", + /* 318 */ "database_kw_opt ::= DATABASE", + /* 319 */ "database_kw_opt ::=", + /* 320 */ "kwcolumn_opt ::=", + /* 321 */ "kwcolumn_opt ::= COLUMNKW", + /* 322 */ "vtabarglist ::= vtabarg", + /* 323 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 324 */ "vtabarg ::= vtabarg vtabargtoken", + /* 325 */ "anylist ::=", + /* 326 */ "anylist ::= anylist LP anylist RP", + /* 327 */ "anylist ::= anylist ANY", }; #endif /* NDEBUG */ #if YYSTACKDEPTH<=0 /* -** Try to increase the size of the parser stack. +** Try to increase the size of the parser stack. Return the number +** of errors. Return 0 on success. */ -static void yyGrowStack(yyParser *p){ +static int yyGrowStack(yyParser *p){ int newSize; + int idx; yyStackEntry *pNew; newSize = p->yystksz*2 + 100; - pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + idx = p->yytos ? (int)(p->yytos - p->yystack) : 0; + if( p->yystack==&p->yystk0 ){ + pNew = malloc(newSize*sizeof(pNew[0])); + if( pNew ) pNew[0] = p->yystk0; + }else{ + pNew = realloc(p->yystack, newSize*sizeof(pNew[0])); + } if( pNew ){ p->yystack = pNew; - p->yystksz = newSize; + p->yytos = &p->yystack[idx]; #ifndef NDEBUG if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sStack grows to %d entries!\n", - yyTracePrompt, p->yystksz); + fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n", + yyTracePrompt, p->yystksz, newSize); } #endif + p->yystksz = newSize; } + return pNew==0; } #endif @@ -129747,15 +132988,24 @@ SQLITE_PRIVATE void *sqlite3ParserAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){ yyParser *pParser; pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) ); if( pParser ){ - pParser->yyidx = -1; #ifdef YYTRACKMAXSTACKDEPTH - pParser->yyidxMax = 0; + pParser->yyhwm = 0; #endif #if YYSTACKDEPTH<=0 + pParser->yytos = NULL; pParser->yystack = NULL; pParser->yystksz = 0; - yyGrowStack(pParser); + if( yyGrowStack(pParser) ){ + pParser->yystack = &pParser->yystk0; + pParser->yystksz = 1; + } #endif +#ifndef YYNOERRORRECOVERY + pParser->yyerrcnt = -1; +#endif + pParser->yytos = pParser->yystack; + pParser->yystack[0].stateno = 0; + pParser->yystack[0].major = 0; } return pParser; } @@ -129790,13 +133040,13 @@ static void yy_destructor( case 195: /* oneselect */ case 206: /* values */ { -sqlite3SelectDelete(pParse->db, (yypminor->yy159)); +sqlite3SelectDelete(pParse->db, (yypminor->yy243)); } break; case 172: /* term */ case 173: /* expr */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy342).pExpr); +sqlite3ExprDelete(pParse->db, (yypminor->yy190).pExpr); } break; case 177: /* eidlist_opt */ @@ -129809,9 +133059,10 @@ sqlite3ExprDelete(pParse->db, (yypminor->yy342).pExpr); case 208: /* exprlist */ case 209: /* sclp */ case 218: /* setlist */ - case 225: /* case_exprlist */ + case 224: /* paren_exprlist */ + case 226: /* case_exprlist */ { -sqlite3ExprListDelete(pParse->db, (yypminor->yy442)); +sqlite3ExprListDelete(pParse->db, (yypminor->yy148)); } break; case 193: /* fullname */ @@ -129819,42 +133070,42 @@ sqlite3ExprListDelete(pParse->db, (yypminor->yy442)); case 211: /* seltablist */ case 212: /* stl_prefix */ { -sqlite3SrcListDelete(pParse->db, (yypminor->yy347)); +sqlite3SrcListDelete(pParse->db, (yypminor->yy185)); } break; case 196: /* with */ - case 249: /* wqlist */ + case 250: /* wqlist */ { -sqlite3WithDelete(pParse->db, (yypminor->yy331)); +sqlite3WithDelete(pParse->db, (yypminor->yy285)); } break; case 201: /* where_opt */ case 203: /* having_opt */ case 215: /* on_opt */ - case 224: /* case_operand */ - case 226: /* case_else */ - case 235: /* when_clause */ - case 240: /* key_opt */ + case 225: /* case_operand */ + case 227: /* case_else */ + case 236: /* when_clause */ + case 241: /* key_opt */ { -sqlite3ExprDelete(pParse->db, (yypminor->yy122)); +sqlite3ExprDelete(pParse->db, (yypminor->yy72)); } break; case 216: /* using_opt */ case 217: /* idlist */ case 220: /* idlist_opt */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy180)); +sqlite3IdListDelete(pParse->db, (yypminor->yy254)); } break; - case 231: /* trigger_cmd_list */ - case 236: /* trigger_cmd */ + case 232: /* trigger_cmd_list */ + case 237: /* trigger_cmd */ { -sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy327)); +sqlite3DeleteTriggerStep(pParse->db, (yypminor->yy145)); } break; - case 233: /* trigger_event */ + case 234: /* trigger_event */ { -sqlite3IdListDelete(pParse->db, (yypminor->yy410).b); +sqlite3IdListDelete(pParse->db, (yypminor->yy332).b); } break; /********* End destructor definitions *****************************************/ @@ -129870,8 +133121,9 @@ sqlite3IdListDelete(pParse->db, (yypminor->yy410).b); */ static void yy_pop_parser_stack(yyParser *pParser){ yyStackEntry *yytos; - assert( pParser->yyidx>=0 ); - yytos = &pParser->yystack[pParser->yyidx--]; + assert( pParser->yytos!=0 ); + assert( pParser->yytos > pParser->yystack ); + yytos = pParser->yytos--; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sPopping %s\n", @@ -129898,9 +133150,9 @@ SQLITE_PRIVATE void sqlite3ParserFree( #ifndef YYPARSEFREENEVERNULL if( pParser==0 ) return; #endif - while( pParser->yyidx>=0 ) yy_pop_parser_stack(pParser); + while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser); #if YYSTACKDEPTH<=0 - free(pParser->yystack); + if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack); #endif (*freeProc)((void*)pParser); } @@ -129911,7 +133163,7 @@ SQLITE_PRIVATE void sqlite3ParserFree( #ifdef YYTRACKMAXSTACKDEPTH SQLITE_PRIVATE int sqlite3ParserStackPeak(void *p){ yyParser *pParser = (yyParser*)p; - return pParser->yyidxMax; + return pParser->yyhwm; } #endif @@ -129924,7 +133176,7 @@ static unsigned int yy_find_shift_action( YYCODETYPE iLookAhead /* The look-ahead token */ ){ int i; - int stateno = pParser->yystack[pParser->yyidx].stateno; + int stateno = pParser->yytos->stateno; if( stateno>=YY_MIN_REDUCE ) return stateno; assert( stateno <= YY_SHIFT_COUNT ); @@ -130017,13 +133269,13 @@ static int yy_find_reduce_action( */ static void yyStackOverflow(yyParser *yypParser){ sqlite3ParserARG_FETCH; - yypParser->yyidx--; + yypParser->yytos--; #ifndef NDEBUG if( yyTraceFILE ){ fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt); } #endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); + while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); /* Here code is inserted which will execute if the parser ** stack every overflows */ /******** Begin %stack_overflow code ******************************************/ @@ -130041,11 +133293,11 @@ static void yyTraceShift(yyParser *yypParser, int yyNewState){ if( yyTraceFILE ){ if( yyNewStateyystack[yypParser->yyidx].major], + yyTracePrompt,yyTokenName[yypParser->yytos->major], yyNewState); }else{ fprintf(yyTraceFILE,"%sShift '%s'\n", - yyTracePrompt,yyTokenName[yypParser->yystack[yypParser->yyidx].major]); + yyTracePrompt,yyTokenName[yypParser->yytos->major]); } } } @@ -130063,27 +133315,30 @@ static void yy_shift( sqlite3ParserTOKENTYPE yyMinor /* The minor token to shift in */ ){ yyStackEntry *yytos; - yypParser->yyidx++; + yypParser->yytos++; #ifdef YYTRACKMAXSTACKDEPTH - if( yypParser->yyidx>yypParser->yyidxMax ){ - yypParser->yyidxMax = yypParser->yyidx; + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) ); } #endif #if YYSTACKDEPTH>0 - if( yypParser->yyidx>=YYSTACKDEPTH ){ + if( yypParser->yytos>=&yypParser->yystack[YYSTACKDEPTH] ){ yyStackOverflow(yypParser); return; } #else - if( yypParser->yyidx>=yypParser->yystksz ){ - yyGrowStack(yypParser); - if( yypParser->yyidx>=yypParser->yystksz ){ + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){ + if( yyGrowStack(yypParser) ){ yyStackOverflow(yypParser); return; } } #endif - yytos = &yypParser->yystack[yypParser->yyidx]; + if( yyNewState > YY_MAX_SHIFT ){ + yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + } + yytos = yypParser->yytos; yytos->stateno = (YYACTIONTYPE)yyNewState; yytos->major = (YYCODETYPE)yyMajor; yytos->minor.yy0 = yyMinor; @@ -130287,27 +133542,29 @@ static const struct { { 173, 5 }, { 173, 3 }, { 173, 5 }, - { 173, 4 }, + { 173, 5 }, { 173, 4 }, { 173, 5 }, - { 225, 5 }, - { 225, 4 }, - { 226, 2 }, - { 226, 0 }, - { 224, 1 }, - { 224, 0 }, + { 226, 5 }, + { 226, 4 }, + { 227, 2 }, + { 227, 0 }, + { 225, 1 }, + { 225, 0 }, { 208, 0 }, { 207, 3 }, { 207, 1 }, + { 224, 0 }, + { 224, 3 }, { 149, 12 }, - { 227, 1 }, - { 227, 0 }, + { 228, 1 }, + { 228, 0 }, { 177, 0 }, { 177, 3 }, { 187, 5 }, { 187, 3 }, - { 228, 0 }, - { 228, 2 }, + { 229, 0 }, + { 229, 2 }, { 149, 4 }, { 149, 1 }, { 149, 2 }, @@ -130319,25 +133576,25 @@ static const struct { { 169, 2 }, { 170, 2 }, { 149, 5 }, - { 230, 11 }, - { 232, 1 }, - { 232, 1 }, - { 232, 2 }, - { 232, 0 }, + { 231, 11 }, { 233, 1 }, { 233, 1 }, - { 233, 3 }, - { 235, 0 }, - { 235, 2 }, - { 231, 3 }, - { 231, 2 }, - { 237, 3 }, + { 233, 2 }, + { 233, 0 }, + { 234, 1 }, + { 234, 1 }, + { 234, 3 }, + { 236, 0 }, + { 236, 2 }, + { 232, 3 }, + { 232, 2 }, { 238, 3 }, - { 238, 2 }, - { 236, 7 }, - { 236, 5 }, - { 236, 5 }, - { 236, 1 }, + { 239, 3 }, + { 239, 2 }, + { 237, 7 }, + { 237, 5 }, + { 237, 5 }, + { 237, 1 }, { 173, 4 }, { 173, 6 }, { 191, 1 }, @@ -130346,27 +133603,27 @@ static const struct { { 149, 4 }, { 149, 6 }, { 149, 3 }, - { 240, 0 }, - { 240, 2 }, + { 241, 0 }, + { 241, 2 }, { 149, 1 }, { 149, 3 }, { 149, 1 }, { 149, 3 }, { 149, 6 }, { 149, 7 }, - { 241, 1 }, + { 242, 1 }, { 149, 1 }, { 149, 4 }, - { 243, 8 }, - { 245, 0 }, - { 246, 1 }, - { 246, 3 }, + { 244, 8 }, + { 246, 0 }, { 247, 1 }, + { 247, 3 }, + { 248, 1 }, { 196, 0 }, { 196, 2 }, { 196, 3 }, - { 249, 6 }, - { 249, 8 }, + { 250, 6 }, + { 250, 8 }, { 144, 1 }, { 145, 2 }, { 145, 1 }, @@ -130403,26 +133660,26 @@ static const struct { { 210, 1 }, { 173, 1 }, { 208, 1 }, - { 229, 1 }, - { 229, 1 }, - { 229, 1 }, - { 229, 1 }, - { 229, 1 }, + { 230, 1 }, + { 230, 1 }, + { 230, 1 }, + { 230, 1 }, + { 230, 1 }, { 169, 1 }, - { 234, 0 }, - { 234, 3 }, - { 237, 1 }, - { 238, 0 }, - { 239, 1 }, + { 235, 0 }, + { 235, 3 }, + { 238, 1 }, { 239, 0 }, - { 242, 0 }, - { 242, 1 }, - { 244, 1 }, - { 244, 3 }, - { 245, 2 }, - { 248, 0 }, - { 248, 4 }, - { 248, 2 }, + { 240, 1 }, + { 240, 0 }, + { 243, 0 }, + { 243, 1 }, + { 245, 1 }, + { 245, 3 }, + { 246, 2 }, + { 249, 0 }, + { 249, 4 }, + { 249, 2 }, }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -130440,7 +133697,7 @@ static void yy_reduce( yyStackEntry *yymsp; /* The top of the parser's stack */ int yysize; /* Amount to pop the stack */ sqlite3ParserARG_FETCH; - yymsp = &yypParser->yystack[yypParser->yyidx]; + yymsp = yypParser->yytos; #ifndef NDEBUG if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ yysize = yyRuleInfo[yyruleno].nrhs; @@ -130454,22 +133711,23 @@ static void yy_reduce( ** enough on the stack to push the LHS value */ if( yyRuleInfo[yyruleno].nrhs==0 ){ #ifdef YYTRACKMAXSTACKDEPTH - if( yypParser->yyidx>yypParser->yyidxMax ){ - yypParser->yyidxMax = yypParser->yyidx; + if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){ + yypParser->yyhwm++; + assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack)); } #endif #if YYSTACKDEPTH>0 - if( yypParser->yyidx>=YYSTACKDEPTH-1 ){ + if( yypParser->yytos>=&yypParser->yystack[YYSTACKDEPTH-1] ){ yyStackOverflow(yypParser); return; } #else - if( yypParser->yyidx>=yypParser->yystksz-1 ){ - yyGrowStack(yypParser); - if( yypParser->yyidx>=yypParser->yystksz-1 ){ + if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){ + if( yyGrowStack(yypParser) ){ yyStackOverflow(yypParser); return; } + yymsp = yypParser->yytos; } #endif } @@ -130495,15 +133753,15 @@ static void yy_reduce( { sqlite3FinishCoding(pParse); } break; case 3: /* cmd ::= BEGIN transtype trans_opt */ -{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy392);} +{sqlite3BeginTransaction(pParse, yymsp[-1].minor.yy194);} break; case 4: /* transtype ::= */ -{yymsp[1].minor.yy392 = TK_DEFERRED;} +{yymsp[1].minor.yy194 = TK_DEFERRED;} break; case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); -{yymsp[0].minor.yy392 = yymsp[0].major; /*A-overwrites-X*/} +{yymsp[0].minor.yy194 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT trans_opt */ case 9: /* cmd ::= END trans_opt */ yytestcase(yyruleno==9); @@ -130529,7 +133787,7 @@ static void yy_reduce( break; case 14: /* create_table ::= createkw temp TABLE ifnotexists nm dbnm */ { - sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy392,0,0,yymsp[-2].minor.yy392); + sqlite3StartTable(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,yymsp[-4].minor.yy194,0,0,yymsp[-2].minor.yy194); } break; case 15: /* createkw ::= CREATE */ @@ -130543,33 +133801,33 @@ static void yy_reduce( case 67: /* defer_subclause_opt ::= */ yytestcase(yyruleno==67); case 76: /* ifexists ::= */ yytestcase(yyruleno==76); case 90: /* distinct ::= */ yytestcase(yyruleno==90); - case 209: /* collate ::= */ yytestcase(yyruleno==209); -{yymsp[1].minor.yy392 = 0;} + case 211: /* collate ::= */ yytestcase(yyruleno==211); +{yymsp[1].minor.yy194 = 0;} break; case 17: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy392 = 1;} +{yymsp[-2].minor.yy194 = 1;} break; case 18: /* temp ::= TEMP */ case 43: /* autoinc ::= AUTOINCR */ yytestcase(yyruleno==43); -{yymsp[0].minor.yy392 = 1;} +{yymsp[0].minor.yy194 = 1;} break; case 20: /* create_table_args ::= LP columnlist conslist_opt RP table_options */ { - sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy392,0); + sqlite3EndTable(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,yymsp[0].minor.yy194,0); } break; case 21: /* create_table_args ::= AS select */ { - sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy159); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy159); + sqlite3EndTable(pParse,0,0,0,yymsp[0].minor.yy243); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy243); } break; case 23: /* table_options ::= WITHOUT nm */ { if( yymsp[0].minor.yy0.n==5 && sqlite3_strnicmp(yymsp[0].minor.yy0.z,"rowid",5)==0 ){ - yymsp[-1].minor.yy392 = TF_WithoutRowid | TF_NoVisibleRowid; + yymsp[-1].minor.yy194 = TF_WithoutRowid | TF_NoVisibleRowid; }else{ - yymsp[-1].minor.yy392 = 0; + yymsp[-1].minor.yy194 = 0; sqlite3ErrorMsg(pParse, "unknown table option: %.*s", yymsp[0].minor.yy0.n, yymsp[0].minor.yy0.z); } } @@ -130601,17 +133859,17 @@ static void yy_reduce( break; case 30: /* ccons ::= DEFAULT term */ case 32: /* ccons ::= DEFAULT PLUS term */ yytestcase(yyruleno==32); -{sqlite3AddDefaultValue(pParse,&yymsp[0].minor.yy342);} +{sqlite3AddDefaultValue(pParse,&yymsp[0].minor.yy190);} break; case 31: /* ccons ::= DEFAULT LP expr RP */ -{sqlite3AddDefaultValue(pParse,&yymsp[-1].minor.yy342);} +{sqlite3AddDefaultValue(pParse,&yymsp[-1].minor.yy190);} break; case 33: /* ccons ::= DEFAULT MINUS term */ { ExprSpan v; - v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy342.pExpr, 0, 0); + v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy190.pExpr, 0, 0); v.zStart = yymsp[-1].minor.yy0.z; - v.zEnd = yymsp[0].minor.yy342.zEnd; + v.zEnd = yymsp[0].minor.yy190.zEnd; sqlite3AddDefaultValue(pParse,&v); } break; @@ -130623,147 +133881,149 @@ static void yy_reduce( } break; case 35: /* ccons ::= NOT NULL onconf */ -{sqlite3AddNotNull(pParse, yymsp[0].minor.yy392);} +{sqlite3AddNotNull(pParse, yymsp[0].minor.yy194);} break; case 36: /* ccons ::= PRIMARY KEY sortorder onconf autoinc */ -{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy392,yymsp[0].minor.yy392,yymsp[-2].minor.yy392);} +{sqlite3AddPrimaryKey(pParse,0,yymsp[-1].minor.yy194,yymsp[0].minor.yy194,yymsp[-2].minor.yy194);} break; case 37: /* ccons ::= UNIQUE onconf */ -{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy392,0,0,0,0);} +{sqlite3CreateIndex(pParse,0,0,0,0,yymsp[0].minor.yy194,0,0,0,0, + SQLITE_IDXTYPE_UNIQUE);} break; case 38: /* ccons ::= CHECK LP expr RP */ -{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy342.pExpr);} +{sqlite3AddCheckConstraint(pParse,yymsp[-1].minor.yy190.pExpr);} break; case 39: /* ccons ::= REFERENCES nm eidlist_opt refargs */ -{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy442,yymsp[0].minor.yy392);} +{sqlite3CreateForeignKey(pParse,0,&yymsp[-2].minor.yy0,yymsp[-1].minor.yy148,yymsp[0].minor.yy194);} break; case 40: /* ccons ::= defer_subclause */ -{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy392);} +{sqlite3DeferForeignKey(pParse,yymsp[0].minor.yy194);} break; case 41: /* ccons ::= COLLATE ID|STRING */ {sqlite3AddCollateType(pParse, &yymsp[0].minor.yy0);} break; case 44: /* refargs ::= */ -{ yymsp[1].minor.yy392 = OE_None*0x0101; /* EV: R-19803-45884 */} +{ yymsp[1].minor.yy194 = OE_None*0x0101; /* EV: R-19803-45884 */} break; case 45: /* refargs ::= refargs refarg */ -{ yymsp[-1].minor.yy392 = (yymsp[-1].minor.yy392 & ~yymsp[0].minor.yy207.mask) | yymsp[0].minor.yy207.value; } +{ yymsp[-1].minor.yy194 = (yymsp[-1].minor.yy194 & ~yymsp[0].minor.yy497.mask) | yymsp[0].minor.yy497.value; } break; case 46: /* refarg ::= MATCH nm */ -{ yymsp[-1].minor.yy207.value = 0; yymsp[-1].minor.yy207.mask = 0x000000; } +{ yymsp[-1].minor.yy497.value = 0; yymsp[-1].minor.yy497.mask = 0x000000; } break; case 47: /* refarg ::= ON INSERT refact */ -{ yymsp[-2].minor.yy207.value = 0; yymsp[-2].minor.yy207.mask = 0x000000; } +{ yymsp[-2].minor.yy497.value = 0; yymsp[-2].minor.yy497.mask = 0x000000; } break; case 48: /* refarg ::= ON DELETE refact */ -{ yymsp[-2].minor.yy207.value = yymsp[0].minor.yy392; yymsp[-2].minor.yy207.mask = 0x0000ff; } +{ yymsp[-2].minor.yy497.value = yymsp[0].minor.yy194; yymsp[-2].minor.yy497.mask = 0x0000ff; } break; case 49: /* refarg ::= ON UPDATE refact */ -{ yymsp[-2].minor.yy207.value = yymsp[0].minor.yy392<<8; yymsp[-2].minor.yy207.mask = 0x00ff00; } +{ yymsp[-2].minor.yy497.value = yymsp[0].minor.yy194<<8; yymsp[-2].minor.yy497.mask = 0x00ff00; } break; case 50: /* refact ::= SET NULL */ -{ yymsp[-1].minor.yy392 = OE_SetNull; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy194 = OE_SetNull; /* EV: R-33326-45252 */} break; case 51: /* refact ::= SET DEFAULT */ -{ yymsp[-1].minor.yy392 = OE_SetDflt; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy194 = OE_SetDflt; /* EV: R-33326-45252 */} break; case 52: /* refact ::= CASCADE */ -{ yymsp[0].minor.yy392 = OE_Cascade; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy194 = OE_Cascade; /* EV: R-33326-45252 */} break; case 53: /* refact ::= RESTRICT */ -{ yymsp[0].minor.yy392 = OE_Restrict; /* EV: R-33326-45252 */} +{ yymsp[0].minor.yy194 = OE_Restrict; /* EV: R-33326-45252 */} break; case 54: /* refact ::= NO ACTION */ -{ yymsp[-1].minor.yy392 = OE_None; /* EV: R-33326-45252 */} +{ yymsp[-1].minor.yy194 = OE_None; /* EV: R-33326-45252 */} break; case 55: /* defer_subclause ::= NOT DEFERRABLE init_deferred_pred_opt */ -{yymsp[-2].minor.yy392 = 0;} +{yymsp[-2].minor.yy194 = 0;} break; case 56: /* defer_subclause ::= DEFERRABLE init_deferred_pred_opt */ case 71: /* orconf ::= OR resolvetype */ yytestcase(yyruleno==71); case 142: /* insert_cmd ::= INSERT orconf */ yytestcase(yyruleno==142); -{yymsp[-1].minor.yy392 = yymsp[0].minor.yy392;} +{yymsp[-1].minor.yy194 = yymsp[0].minor.yy194;} break; case 58: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 75: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==75); case 183: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==183); case 186: /* in_op ::= NOT IN */ yytestcase(yyruleno==186); - case 210: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==210); -{yymsp[-1].minor.yy392 = 1;} + case 212: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==212); +{yymsp[-1].minor.yy194 = 1;} break; case 59: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */ -{yymsp[-1].minor.yy392 = 0;} +{yymsp[-1].minor.yy194 = 0;} break; case 61: /* tconscomma ::= COMMA */ {pParse->constraintName.n = 0;} break; case 63: /* tcons ::= PRIMARY KEY LP sortlist autoinc RP onconf */ -{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy442,yymsp[0].minor.yy392,yymsp[-2].minor.yy392,0);} +{sqlite3AddPrimaryKey(pParse,yymsp[-3].minor.yy148,yymsp[0].minor.yy194,yymsp[-2].minor.yy194,0);} break; case 64: /* tcons ::= UNIQUE LP sortlist RP onconf */ -{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy442,yymsp[0].minor.yy392,0,0,0,0);} +{sqlite3CreateIndex(pParse,0,0,0,yymsp[-2].minor.yy148,yymsp[0].minor.yy194,0,0,0,0, + SQLITE_IDXTYPE_UNIQUE);} break; case 65: /* tcons ::= CHECK LP expr RP onconf */ -{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy342.pExpr);} +{sqlite3AddCheckConstraint(pParse,yymsp[-2].minor.yy190.pExpr);} break; case 66: /* tcons ::= FOREIGN KEY LP eidlist RP REFERENCES nm eidlist_opt refargs defer_subclause_opt */ { - sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy442, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy442, yymsp[-1].minor.yy392); - sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy392); + sqlite3CreateForeignKey(pParse, yymsp[-6].minor.yy148, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy148, yymsp[-1].minor.yy194); + sqlite3DeferForeignKey(pParse, yymsp[0].minor.yy194); } break; case 68: /* onconf ::= */ case 70: /* orconf ::= */ yytestcase(yyruleno==70); -{yymsp[1].minor.yy392 = OE_Default;} +{yymsp[1].minor.yy194 = OE_Default;} break; case 69: /* onconf ::= ON CONFLICT resolvetype */ -{yymsp[-2].minor.yy392 = yymsp[0].minor.yy392;} +{yymsp[-2].minor.yy194 = yymsp[0].minor.yy194;} break; case 72: /* resolvetype ::= IGNORE */ -{yymsp[0].minor.yy392 = OE_Ignore;} +{yymsp[0].minor.yy194 = OE_Ignore;} break; case 73: /* resolvetype ::= REPLACE */ case 143: /* insert_cmd ::= REPLACE */ yytestcase(yyruleno==143); -{yymsp[0].minor.yy392 = OE_Replace;} +{yymsp[0].minor.yy194 = OE_Replace;} break; case 74: /* cmd ::= DROP TABLE ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy347, 0, yymsp[-1].minor.yy392); + sqlite3DropTable(pParse, yymsp[0].minor.yy185, 0, yymsp[-1].minor.yy194); } break; case 77: /* cmd ::= createkw temp VIEW ifnotexists nm dbnm eidlist_opt AS select */ { - sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy442, yymsp[0].minor.yy159, yymsp[-7].minor.yy392, yymsp[-5].minor.yy392); + sqlite3CreateView(pParse, &yymsp[-8].minor.yy0, &yymsp[-4].minor.yy0, &yymsp[-3].minor.yy0, yymsp[-2].minor.yy148, yymsp[0].minor.yy243, yymsp[-7].minor.yy194, yymsp[-5].minor.yy194); } break; case 78: /* cmd ::= DROP VIEW ifexists fullname */ { - sqlite3DropTable(pParse, yymsp[0].minor.yy347, 1, yymsp[-1].minor.yy392); + sqlite3DropTable(pParse, yymsp[0].minor.yy185, 1, yymsp[-1].minor.yy194); } break; case 79: /* cmd ::= select */ { SelectDest dest = {SRT_Output, 0, 0, 0, 0, 0}; - sqlite3Select(pParse, yymsp[0].minor.yy159, &dest); - sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy159); + sqlite3Select(pParse, yymsp[0].minor.yy243, &dest); + sqlite3SelectDelete(pParse->db, yymsp[0].minor.yy243); } break; case 80: /* select ::= with selectnowith */ { - Select *p = yymsp[0].minor.yy159; + Select *p = yymsp[0].minor.yy243; if( p ){ - p->pWith = yymsp[-1].minor.yy331; + p->pWith = yymsp[-1].minor.yy285; parserDoubleLinkSelect(pParse, p); }else{ - sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy331); + sqlite3WithDelete(pParse->db, yymsp[-1].minor.yy285); } - yymsp[-1].minor.yy159 = p; /*A-overwrites-W*/ + yymsp[-1].minor.yy243 = p; /*A-overwrites-W*/ } break; case 81: /* selectnowith ::= selectnowith multiselect_op oneselect */ { - Select *pRhs = yymsp[0].minor.yy159; - Select *pLhs = yymsp[-2].minor.yy159; + Select *pRhs = yymsp[0].minor.yy243; + Select *pLhs = yymsp[-2].minor.yy243; if( pRhs && pRhs->pPrior ){ SrcList *pFrom; Token x; @@ -130773,30 +134033,30 @@ static void yy_reduce( pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0,0); } if( pRhs ){ - pRhs->op = (u8)yymsp[-1].minor.yy392; + pRhs->op = (u8)yymsp[-1].minor.yy194; pRhs->pPrior = pLhs; if( ALWAYS(pLhs) ) pLhs->selFlags &= ~SF_MultiValue; pRhs->selFlags &= ~SF_MultiValue; - if( yymsp[-1].minor.yy392!=TK_ALL ) pParse->hasCompound = 1; + if( yymsp[-1].minor.yy194!=TK_ALL ) pParse->hasCompound = 1; }else{ sqlite3SelectDelete(pParse->db, pLhs); } - yymsp[-2].minor.yy159 = pRhs; + yymsp[-2].minor.yy243 = pRhs; } break; case 82: /* multiselect_op ::= UNION */ case 84: /* multiselect_op ::= EXCEPT|INTERSECT */ yytestcase(yyruleno==84); -{yymsp[0].minor.yy392 = yymsp[0].major; /*A-overwrites-OP*/} +{yymsp[0].minor.yy194 = yymsp[0].major; /*A-overwrites-OP*/} break; case 83: /* multiselect_op ::= UNION ALL */ -{yymsp[-1].minor.yy392 = TK_ALL;} +{yymsp[-1].minor.yy194 = TK_ALL;} break; case 85: /* oneselect ::= SELECT distinct selcollist from where_opt groupby_opt having_opt orderby_opt limit_opt */ { #if SELECTTRACE_ENABLED Token s = yymsp[-8].minor.yy0; /*A-overwrites-S*/ #endif - yymsp[-8].minor.yy159 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy442,yymsp[-5].minor.yy347,yymsp[-4].minor.yy122,yymsp[-3].minor.yy442,yymsp[-2].minor.yy122,yymsp[-1].minor.yy442,yymsp[-7].minor.yy392,yymsp[0].minor.yy64.pLimit,yymsp[0].minor.yy64.pOffset); + yymsp[-8].minor.yy243 = sqlite3SelectNew(pParse,yymsp[-6].minor.yy148,yymsp[-5].minor.yy185,yymsp[-4].minor.yy72,yymsp[-3].minor.yy148,yymsp[-2].minor.yy72,yymsp[-1].minor.yy148,yymsp[-7].minor.yy194,yymsp[0].minor.yy354.pLimit,yymsp[0].minor.yy354.pOffset); #if SELECTTRACE_ENABLED /* Populate the Select.zSelName[] string that is used to help with ** query planner debugging, to differentiate between multiple Select @@ -130807,17 +134067,17 @@ static void yy_reduce( ** comment to be the zSelName value. Otherwise, the label is #N where ** is an integer that is incremented with each SELECT statement seen. */ - if( yymsp[-8].minor.yy159!=0 ){ + if( yymsp[-8].minor.yy243!=0 ){ const char *z = s.z+6; int i; - sqlite3_snprintf(sizeof(yymsp[-8].minor.yy159->zSelName), yymsp[-8].minor.yy159->zSelName, "#%d", + sqlite3_snprintf(sizeof(yymsp[-8].minor.yy243->zSelName), yymsp[-8].minor.yy243->zSelName, "#%d", ++pParse->nSelect); while( z[0]==' ' ) z++; if( z[0]=='/' && z[1]=='*' ){ z += 2; while( z[0]==' ' ) z++; for(i=0; sqlite3Isalnum(z[i]); i++){} - sqlite3_snprintf(sizeof(yymsp[-8].minor.yy159->zSelName), yymsp[-8].minor.yy159->zSelName, "%.*s", i, z); + sqlite3_snprintf(sizeof(yymsp[-8].minor.yy243->zSelName), yymsp[-8].minor.yy243->zSelName, "%.*s", i, z); } } #endif /* SELECTRACE_ENABLED */ @@ -130825,47 +134085,48 @@ static void yy_reduce( break; case 86: /* values ::= VALUES LP nexprlist RP */ { - yymsp[-3].minor.yy159 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy442,0,0,0,0,0,SF_Values,0,0); + yymsp[-3].minor.yy243 = sqlite3SelectNew(pParse,yymsp[-1].minor.yy148,0,0,0,0,0,SF_Values,0,0); } break; case 87: /* values ::= values COMMA LP exprlist RP */ { - Select *pRight, *pLeft = yymsp[-4].minor.yy159; - pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy442,0,0,0,0,0,SF_Values|SF_MultiValue,0,0); + Select *pRight, *pLeft = yymsp[-4].minor.yy243; + pRight = sqlite3SelectNew(pParse,yymsp[-1].minor.yy148,0,0,0,0,0,SF_Values|SF_MultiValue,0,0); if( ALWAYS(pLeft) ) pLeft->selFlags &= ~SF_MultiValue; if( pRight ){ pRight->op = TK_ALL; pRight->pPrior = pLeft; - yymsp[-4].minor.yy159 = pRight; + yymsp[-4].minor.yy243 = pRight; }else{ - yymsp[-4].minor.yy159 = pLeft; + yymsp[-4].minor.yy243 = pLeft; } } break; case 88: /* distinct ::= DISTINCT */ -{yymsp[0].minor.yy392 = SF_Distinct;} +{yymsp[0].minor.yy194 = SF_Distinct;} break; case 89: /* distinct ::= ALL */ -{yymsp[0].minor.yy392 = SF_All;} +{yymsp[0].minor.yy194 = SF_All;} break; case 91: /* sclp ::= */ case 119: /* orderby_opt ::= */ yytestcase(yyruleno==119); case 126: /* groupby_opt ::= */ yytestcase(yyruleno==126); case 199: /* exprlist ::= */ yytestcase(yyruleno==199); - case 205: /* eidlist_opt ::= */ yytestcase(yyruleno==205); -{yymsp[1].minor.yy442 = 0;} + case 202: /* paren_exprlist ::= */ yytestcase(yyruleno==202); + case 207: /* eidlist_opt ::= */ yytestcase(yyruleno==207); +{yymsp[1].minor.yy148 = 0;} break; case 92: /* selcollist ::= sclp expr as */ { - yymsp[-2].minor.yy442 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy442, yymsp[-1].minor.yy342.pExpr); - if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-2].minor.yy442, &yymsp[0].minor.yy0, 1); - sqlite3ExprListSetSpan(pParse,yymsp[-2].minor.yy442,&yymsp[-1].minor.yy342); + yymsp[-2].minor.yy148 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy148, yymsp[-1].minor.yy190.pExpr); + if( yymsp[0].minor.yy0.n>0 ) sqlite3ExprListSetName(pParse, yymsp[-2].minor.yy148, &yymsp[0].minor.yy0, 1); + sqlite3ExprListSetSpan(pParse,yymsp[-2].minor.yy148,&yymsp[-1].minor.yy190); } break; case 93: /* selcollist ::= sclp STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); - yymsp[-1].minor.yy442 = sqlite3ExprListAppend(pParse, yymsp[-1].minor.yy442, p); + yymsp[-1].minor.yy148 = sqlite3ExprListAppend(pParse, yymsp[-1].minor.yy148, p); } break; case 94: /* selcollist ::= sclp nm DOT STAR */ @@ -130873,70 +134134,70 @@ static void yy_reduce( Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0, &yymsp[0].minor.yy0); Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0); - yymsp[-3].minor.yy442 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy442, pDot); + yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148, pDot); } break; case 95: /* as ::= AS nm */ case 106: /* dbnm ::= DOT nm */ yytestcase(yyruleno==106); - case 219: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==219); - case 220: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==220); + case 221: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==221); + case 222: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==222); {yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;} break; case 97: /* from ::= */ -{yymsp[1].minor.yy347 = sqlite3DbMallocZero(pParse->db, sizeof(*yymsp[1].minor.yy347));} +{yymsp[1].minor.yy185 = sqlite3DbMallocZero(pParse->db, sizeof(*yymsp[1].minor.yy185));} break; case 98: /* from ::= FROM seltablist */ { - yymsp[-1].minor.yy347 = yymsp[0].minor.yy347; - sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy347); + yymsp[-1].minor.yy185 = yymsp[0].minor.yy185; + sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy185); } break; case 99: /* stl_prefix ::= seltablist joinop */ { - if( ALWAYS(yymsp[-1].minor.yy347 && yymsp[-1].minor.yy347->nSrc>0) ) yymsp[-1].minor.yy347->a[yymsp[-1].minor.yy347->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy392; + if( ALWAYS(yymsp[-1].minor.yy185 && yymsp[-1].minor.yy185->nSrc>0) ) yymsp[-1].minor.yy185->a[yymsp[-1].minor.yy185->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy194; } break; case 100: /* stl_prefix ::= */ -{yymsp[1].minor.yy347 = 0;} +{yymsp[1].minor.yy185 = 0;} break; case 101: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */ { - yymsp[-6].minor.yy347 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy347,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy122,yymsp[0].minor.yy180); - sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy347, &yymsp[-2].minor.yy0); + yymsp[-6].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy185,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); + sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy185, &yymsp[-2].minor.yy0); } break; case 102: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */ { - yymsp[-8].minor.yy347 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy347,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy122,yymsp[0].minor.yy180); - sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy347, yymsp[-4].minor.yy442); + yymsp[-8].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy185,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); + sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy185, yymsp[-4].minor.yy148); } break; case 103: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */ { - yymsp[-6].minor.yy347 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy347,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy159,yymsp[-1].minor.yy122,yymsp[0].minor.yy180); + yymsp[-6].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy185,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy243,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); } break; case 104: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */ { - if( yymsp[-6].minor.yy347==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy122==0 && yymsp[0].minor.yy180==0 ){ - yymsp[-6].minor.yy347 = yymsp[-4].minor.yy347; - }else if( yymsp[-4].minor.yy347->nSrc==1 ){ - yymsp[-6].minor.yy347 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy347,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy122,yymsp[0].minor.yy180); - if( yymsp[-6].minor.yy347 ){ - struct SrcList_item *pNew = &yymsp[-6].minor.yy347->a[yymsp[-6].minor.yy347->nSrc-1]; - struct SrcList_item *pOld = yymsp[-4].minor.yy347->a; + if( yymsp[-6].minor.yy185==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy72==0 && yymsp[0].minor.yy254==0 ){ + yymsp[-6].minor.yy185 = yymsp[-4].minor.yy185; + }else if( yymsp[-4].minor.yy185->nSrc==1 ){ + yymsp[-6].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy185,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); + if( yymsp[-6].minor.yy185 ){ + struct SrcList_item *pNew = &yymsp[-6].minor.yy185->a[yymsp[-6].minor.yy185->nSrc-1]; + struct SrcList_item *pOld = yymsp[-4].minor.yy185->a; pNew->zName = pOld->zName; pNew->zDatabase = pOld->zDatabase; pNew->pSelect = pOld->pSelect; pOld->zName = pOld->zDatabase = 0; pOld->pSelect = 0; } - sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy347); + sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy185); }else{ Select *pSubquery; - sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy347); - pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy347,0,0,0,0,SF_NestedFrom,0,0); - yymsp[-6].minor.yy347 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy347,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy122,yymsp[0].minor.yy180); + sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy185); + pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy185,0,0,0,0,SF_NestedFrom,0,0); + yymsp[-6].minor.yy185 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy185,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy72,yymsp[0].minor.yy254); } } break; @@ -130945,32 +134206,32 @@ static void yy_reduce( {yymsp[1].minor.yy0.z=0; yymsp[1].minor.yy0.n=0;} break; case 107: /* fullname ::= nm dbnm */ -{yymsp[-1].minor.yy347 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} +{yymsp[-1].minor.yy185 = sqlite3SrcListAppend(pParse->db,0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 108: /* joinop ::= COMMA|JOIN */ -{ yymsp[0].minor.yy392 = JT_INNER; } +{ yymsp[0].minor.yy194 = JT_INNER; } break; case 109: /* joinop ::= JOIN_KW JOIN */ -{yymsp[-1].minor.yy392 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} +{yymsp[-1].minor.yy194 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/} break; case 110: /* joinop ::= JOIN_KW nm JOIN */ -{yymsp[-2].minor.yy392 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} +{yymsp[-2].minor.yy194 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/} break; case 111: /* joinop ::= JOIN_KW nm nm JOIN */ -{yymsp[-3].minor.yy392 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} +{yymsp[-3].minor.yy194 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/} break; case 112: /* on_opt ::= ON expr */ case 129: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==129); case 136: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==136); case 195: /* case_else ::= ELSE expr */ yytestcase(yyruleno==195); -{yymsp[-1].minor.yy122 = yymsp[0].minor.yy342.pExpr;} +{yymsp[-1].minor.yy72 = yymsp[0].minor.yy190.pExpr;} break; case 113: /* on_opt ::= */ case 128: /* having_opt ::= */ yytestcase(yyruleno==128); case 135: /* where_opt ::= */ yytestcase(yyruleno==135); case 196: /* case_else ::= */ yytestcase(yyruleno==196); case 198: /* case_operand ::= */ yytestcase(yyruleno==198); -{yymsp[1].minor.yy122 = 0;} +{yymsp[1].minor.yy72 = 0;} break; case 115: /* indexed_opt ::= INDEXED BY nm */ {yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;} @@ -130979,116 +134240,116 @@ static void yy_reduce( {yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;} break; case 117: /* using_opt ::= USING LP idlist RP */ -{yymsp[-3].minor.yy180 = yymsp[-1].minor.yy180;} +{yymsp[-3].minor.yy254 = yymsp[-1].minor.yy254;} break; case 118: /* using_opt ::= */ case 144: /* idlist_opt ::= */ yytestcase(yyruleno==144); -{yymsp[1].minor.yy180 = 0;} +{yymsp[1].minor.yy254 = 0;} break; case 120: /* orderby_opt ::= ORDER BY sortlist */ case 127: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==127); -{yymsp[-2].minor.yy442 = yymsp[0].minor.yy442;} +{yymsp[-2].minor.yy148 = yymsp[0].minor.yy148;} break; case 121: /* sortlist ::= sortlist COMMA expr sortorder */ { - yymsp[-3].minor.yy442 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy442,yymsp[-1].minor.yy342.pExpr); - sqlite3ExprListSetSortOrder(yymsp[-3].minor.yy442,yymsp[0].minor.yy392); + yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148,yymsp[-1].minor.yy190.pExpr); + sqlite3ExprListSetSortOrder(yymsp[-3].minor.yy148,yymsp[0].minor.yy194); } break; case 122: /* sortlist ::= expr sortorder */ { - yymsp[-1].minor.yy442 = sqlite3ExprListAppend(pParse,0,yymsp[-1].minor.yy342.pExpr); /*A-overwrites-Y*/ - sqlite3ExprListSetSortOrder(yymsp[-1].minor.yy442,yymsp[0].minor.yy392); + yymsp[-1].minor.yy148 = sqlite3ExprListAppend(pParse,0,yymsp[-1].minor.yy190.pExpr); /*A-overwrites-Y*/ + sqlite3ExprListSetSortOrder(yymsp[-1].minor.yy148,yymsp[0].minor.yy194); } break; case 123: /* sortorder ::= ASC */ -{yymsp[0].minor.yy392 = SQLITE_SO_ASC;} +{yymsp[0].minor.yy194 = SQLITE_SO_ASC;} break; case 124: /* sortorder ::= DESC */ -{yymsp[0].minor.yy392 = SQLITE_SO_DESC;} +{yymsp[0].minor.yy194 = SQLITE_SO_DESC;} break; case 125: /* sortorder ::= */ -{yymsp[1].minor.yy392 = SQLITE_SO_UNDEFINED;} +{yymsp[1].minor.yy194 = SQLITE_SO_UNDEFINED;} break; case 130: /* limit_opt ::= */ -{yymsp[1].minor.yy64.pLimit = 0; yymsp[1].minor.yy64.pOffset = 0;} +{yymsp[1].minor.yy354.pLimit = 0; yymsp[1].minor.yy354.pOffset = 0;} break; case 131: /* limit_opt ::= LIMIT expr */ -{yymsp[-1].minor.yy64.pLimit = yymsp[0].minor.yy342.pExpr; yymsp[-1].minor.yy64.pOffset = 0;} +{yymsp[-1].minor.yy354.pLimit = yymsp[0].minor.yy190.pExpr; yymsp[-1].minor.yy354.pOffset = 0;} break; case 132: /* limit_opt ::= LIMIT expr OFFSET expr */ -{yymsp[-3].minor.yy64.pLimit = yymsp[-2].minor.yy342.pExpr; yymsp[-3].minor.yy64.pOffset = yymsp[0].minor.yy342.pExpr;} +{yymsp[-3].minor.yy354.pLimit = yymsp[-2].minor.yy190.pExpr; yymsp[-3].minor.yy354.pOffset = yymsp[0].minor.yy190.pExpr;} break; case 133: /* limit_opt ::= LIMIT expr COMMA expr */ -{yymsp[-3].minor.yy64.pOffset = yymsp[-2].minor.yy342.pExpr; yymsp[-3].minor.yy64.pLimit = yymsp[0].minor.yy342.pExpr;} +{yymsp[-3].minor.yy354.pOffset = yymsp[-2].minor.yy190.pExpr; yymsp[-3].minor.yy354.pLimit = yymsp[0].minor.yy190.pExpr;} break; case 134: /* cmd ::= with DELETE FROM fullname indexed_opt where_opt */ { - sqlite3WithPush(pParse, yymsp[-5].minor.yy331, 1); - sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy347, &yymsp[-1].minor.yy0); - sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy347,yymsp[0].minor.yy122); + sqlite3WithPush(pParse, yymsp[-5].minor.yy285, 1); + sqlite3SrcListIndexedBy(pParse, yymsp[-2].minor.yy185, &yymsp[-1].minor.yy0); + sqlite3DeleteFrom(pParse,yymsp[-2].minor.yy185,yymsp[0].minor.yy72); } break; case 137: /* cmd ::= with UPDATE orconf fullname indexed_opt SET setlist where_opt */ { - sqlite3WithPush(pParse, yymsp[-7].minor.yy331, 1); - sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy347, &yymsp[-3].minor.yy0); - sqlite3ExprListCheckLength(pParse,yymsp[-1].minor.yy442,"set list"); - sqlite3Update(pParse,yymsp[-4].minor.yy347,yymsp[-1].minor.yy442,yymsp[0].minor.yy122,yymsp[-5].minor.yy392); + sqlite3WithPush(pParse, yymsp[-7].minor.yy285, 1); + sqlite3SrcListIndexedBy(pParse, yymsp[-4].minor.yy185, &yymsp[-3].minor.yy0); + sqlite3ExprListCheckLength(pParse,yymsp[-1].minor.yy148,"set list"); + sqlite3Update(pParse,yymsp[-4].minor.yy185,yymsp[-1].minor.yy148,yymsp[0].minor.yy72,yymsp[-5].minor.yy194); } break; case 138: /* setlist ::= setlist COMMA nm EQ expr */ { - yymsp[-4].minor.yy442 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy442, yymsp[0].minor.yy342.pExpr); - sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy442, &yymsp[-2].minor.yy0, 1); + yymsp[-4].minor.yy148 = sqlite3ExprListAppend(pParse, yymsp[-4].minor.yy148, yymsp[0].minor.yy190.pExpr); + sqlite3ExprListSetName(pParse, yymsp[-4].minor.yy148, &yymsp[-2].minor.yy0, 1); } break; case 139: /* setlist ::= nm EQ expr */ { - yylhsminor.yy442 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy342.pExpr); - sqlite3ExprListSetName(pParse, yylhsminor.yy442, &yymsp[-2].minor.yy0, 1); + yylhsminor.yy148 = sqlite3ExprListAppend(pParse, 0, yymsp[0].minor.yy190.pExpr); + sqlite3ExprListSetName(pParse, yylhsminor.yy148, &yymsp[-2].minor.yy0, 1); } - yymsp[-2].minor.yy442 = yylhsminor.yy442; + yymsp[-2].minor.yy148 = yylhsminor.yy148; break; case 140: /* cmd ::= with insert_cmd INTO fullname idlist_opt select */ { - sqlite3WithPush(pParse, yymsp[-5].minor.yy331, 1); - sqlite3Insert(pParse, yymsp[-2].minor.yy347, yymsp[0].minor.yy159, yymsp[-1].minor.yy180, yymsp[-4].minor.yy392); + sqlite3WithPush(pParse, yymsp[-5].minor.yy285, 1); + sqlite3Insert(pParse, yymsp[-2].minor.yy185, yymsp[0].minor.yy243, yymsp[-1].minor.yy254, yymsp[-4].minor.yy194); } break; case 141: /* cmd ::= with insert_cmd INTO fullname idlist_opt DEFAULT VALUES */ { - sqlite3WithPush(pParse, yymsp[-6].minor.yy331, 1); - sqlite3Insert(pParse, yymsp[-3].minor.yy347, 0, yymsp[-2].minor.yy180, yymsp[-5].minor.yy392); + sqlite3WithPush(pParse, yymsp[-6].minor.yy285, 1); + sqlite3Insert(pParse, yymsp[-3].minor.yy185, 0, yymsp[-2].minor.yy254, yymsp[-5].minor.yy194); } break; case 145: /* idlist_opt ::= LP idlist RP */ -{yymsp[-2].minor.yy180 = yymsp[-1].minor.yy180;} +{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;} break; case 146: /* idlist ::= idlist COMMA nm */ -{yymsp[-2].minor.yy180 = sqlite3IdListAppend(pParse->db,yymsp[-2].minor.yy180,&yymsp[0].minor.yy0);} +{yymsp[-2].minor.yy254 = sqlite3IdListAppend(pParse->db,yymsp[-2].minor.yy254,&yymsp[0].minor.yy0);} break; case 147: /* idlist ::= nm */ -{yymsp[0].minor.yy180 = sqlite3IdListAppend(pParse->db,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} +{yymsp[0].minor.yy254 = sqlite3IdListAppend(pParse->db,0,&yymsp[0].minor.yy0); /*A-overwrites-Y*/} break; case 148: /* expr ::= LP expr RP */ -{spanSet(&yymsp[-2].minor.yy342,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ yymsp[-2].minor.yy342.pExpr = yymsp[-1].minor.yy342.pExpr;} +{spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ yymsp[-2].minor.yy190.pExpr = yymsp[-1].minor.yy190.pExpr;} break; case 149: /* term ::= NULL */ case 154: /* term ::= INTEGER|FLOAT|BLOB */ yytestcase(yyruleno==154); case 155: /* term ::= STRING */ yytestcase(yyruleno==155); -{spanExpr(&yymsp[0].minor.yy342,pParse,yymsp[0].major,yymsp[0].minor.yy0);/*A-overwrites-X*/} +{spanExpr(&yymsp[0].minor.yy190,pParse,yymsp[0].major,yymsp[0].minor.yy0);/*A-overwrites-X*/} break; case 150: /* expr ::= ID|INDEXED */ case 151: /* expr ::= JOIN_KW */ yytestcase(yyruleno==151); -{spanExpr(&yymsp[0].minor.yy342,pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} +{spanExpr(&yymsp[0].minor.yy190,pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; case 152: /* expr ::= nm DOT nm */ { Expr *temp1 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0); - spanSet(&yymsp[-2].minor.yy342,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-2].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0); + spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0); } break; case 153: /* expr ::= nm DOT nm DOT nm */ @@ -131097,69 +134358,70 @@ static void yy_reduce( Expr *temp2 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0); Expr *temp3 = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[0].minor.yy0); Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3, 0); - spanSet(&yymsp[-4].minor.yy342,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-4].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0); + spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0); } break; case 156: /* expr ::= VARIABLE */ { - Token t = yymsp[0].minor.yy0; /*A-overwrites-X*/ - if( t.n>=2 && t.z[0]=='#' && sqlite3Isdigit(t.z[1]) ){ + if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ + spanExpr(&yymsp[0].minor.yy190, pParse, TK_VARIABLE, yymsp[0].minor.yy0); + sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy190.pExpr); + }else{ /* When doing a nested parse, one can include terms in an expression ** that look like this: #1 #2 ... These terms refer to registers ** in the virtual machine. #N is the N-th register. */ - spanSet(&yymsp[0].minor.yy342, &t, &t); + Token t = yymsp[0].minor.yy0; /*A-overwrites-X*/ + assert( t.n>=2 ); + spanSet(&yymsp[0].minor.yy190, &t, &t); if( pParse->nested==0 ){ sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t); - yymsp[0].minor.yy342.pExpr = 0; + yymsp[0].minor.yy190.pExpr = 0; }else{ - yymsp[0].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, &t); - if( yymsp[0].minor.yy342.pExpr ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy342.pExpr->iTable); + yymsp[0].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, &t); + if( yymsp[0].minor.yy190.pExpr ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy190.pExpr->iTable); } - }else{ - spanExpr(&yymsp[0].minor.yy342, pParse, TK_VARIABLE, t); - sqlite3ExprAssignVarNumber(pParse, yymsp[0].minor.yy342.pExpr); } } break; case 157: /* expr ::= expr COLLATE ID|STRING */ { - yymsp[-2].minor.yy342.pExpr = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy342.pExpr, &yymsp[0].minor.yy0, 1); - yymsp[-2].minor.yy342.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; + yymsp[-2].minor.yy190.pExpr = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy190.pExpr, &yymsp[0].minor.yy0, 1); + yymsp[-2].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 158: /* expr ::= CAST LP expr AS typetoken RP */ { - spanSet(&yymsp[-5].minor.yy342,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-5].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_CAST, yymsp[-3].minor.yy342.pExpr, 0, &yymsp[-1].minor.yy0); + spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-5].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CAST, yymsp[-3].minor.yy190.pExpr, 0, &yymsp[-1].minor.yy0); } break; case 159: /* expr ::= ID|INDEXED LP distinct exprlist RP */ { - if( yymsp[-1].minor.yy442 && yymsp[-1].minor.yy442->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){ + if( yymsp[-1].minor.yy148 && yymsp[-1].minor.yy148->nExpr>pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG] ){ sqlite3ErrorMsg(pParse, "too many arguments on function %T", &yymsp[-4].minor.yy0); } - yylhsminor.yy342.pExpr = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy442, &yymsp[-4].minor.yy0); - spanSet(&yylhsminor.yy342,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); - if( yymsp[-2].minor.yy392==SF_Distinct && yylhsminor.yy342.pExpr ){ - yylhsminor.yy342.pExpr->flags |= EP_Distinct; + yylhsminor.yy190.pExpr = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy148, &yymsp[-4].minor.yy0); + spanSet(&yylhsminor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); + if( yymsp[-2].minor.yy194==SF_Distinct && yylhsminor.yy190.pExpr ){ + yylhsminor.yy190.pExpr->flags |= EP_Distinct; } } - yymsp[-4].minor.yy342 = yylhsminor.yy342; + yymsp[-4].minor.yy190 = yylhsminor.yy190; break; case 160: /* expr ::= ID|INDEXED LP STAR RP */ { - yylhsminor.yy342.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0); - spanSet(&yylhsminor.yy342,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); + yylhsminor.yy190.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0); + spanSet(&yylhsminor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); } - yymsp[-3].minor.yy342 = yylhsminor.yy342; + yymsp[-3].minor.yy190 = yylhsminor.yy190; break; case 161: /* term ::= CTIME_KW */ { - yylhsminor.yy342.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0); - spanSet(&yylhsminor.yy342, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy190.pExpr = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0); + spanSet(&yylhsminor.yy190, &yymsp[0].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy342 = yylhsminor.yy342; + yymsp[0].minor.yy190 = yylhsminor.yy190; break; case 162: /* expr ::= expr AND expr */ case 163: /* expr ::= expr OR expr */ yytestcase(yyruleno==163); @@ -131169,86 +134431,86 @@ static void yy_reduce( case 167: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==167); case 168: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==168); case 169: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==169); -{spanBinaryExpr(pParse,yymsp[-1].major,&yymsp[-2].minor.yy342,&yymsp[0].minor.yy342);} +{spanBinaryExpr(pParse,yymsp[-1].major,&yymsp[-2].minor.yy190,&yymsp[0].minor.yy190);} break; case 170: /* likeop ::= LIKE_KW|MATCH */ -{yymsp[0].minor.yy318.eOperator = yymsp[0].minor.yy0; yymsp[0].minor.yy318.bNot = 0;/*A-overwrites-X*/} +{yymsp[0].minor.yy392.eOperator = yymsp[0].minor.yy0; yymsp[0].minor.yy392.bNot = 0;/*A-overwrites-X*/} break; case 171: /* likeop ::= NOT LIKE_KW|MATCH */ -{yymsp[-1].minor.yy318.eOperator = yymsp[0].minor.yy0; yymsp[-1].minor.yy318.bNot = 1;} +{yymsp[-1].minor.yy392.eOperator = yymsp[0].minor.yy0; yymsp[-1].minor.yy392.bNot = 1;} break; case 172: /* expr ::= expr likeop expr */ { ExprList *pList; - pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy342.pExpr); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy342.pExpr); - yymsp[-2].minor.yy342.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy318.eOperator); - exprNot(pParse, yymsp[-1].minor.yy318.bNot, &yymsp[-2].minor.yy342); - yymsp[-2].minor.yy342.zEnd = yymsp[0].minor.yy342.zEnd; - if( yymsp[-2].minor.yy342.pExpr ) yymsp[-2].minor.yy342.pExpr->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[0].minor.yy190.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-2].minor.yy190.pExpr); + yymsp[-2].minor.yy190.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-1].minor.yy392.eOperator); + exprNot(pParse, yymsp[-1].minor.yy392.bNot, &yymsp[-2].minor.yy190); + yymsp[-2].minor.yy190.zEnd = yymsp[0].minor.yy190.zEnd; + if( yymsp[-2].minor.yy190.pExpr ) yymsp[-2].minor.yy190.pExpr->flags |= EP_InfixFunc; } break; case 173: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; - pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy342.pExpr); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy342.pExpr); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy342.pExpr); - yymsp[-4].minor.yy342.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy318.eOperator); - exprNot(pParse, yymsp[-3].minor.yy318.bNot, &yymsp[-4].minor.yy342); - yymsp[-4].minor.yy342.zEnd = yymsp[0].minor.yy342.zEnd; - if( yymsp[-4].minor.yy342.pExpr ) yymsp[-4].minor.yy342.pExpr->flags |= EP_InfixFunc; + pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[-4].minor.yy190.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy190.pExpr); + yymsp[-4].minor.yy190.pExpr = sqlite3ExprFunction(pParse, pList, &yymsp[-3].minor.yy392.eOperator); + exprNot(pParse, yymsp[-3].minor.yy392.bNot, &yymsp[-4].minor.yy190); + yymsp[-4].minor.yy190.zEnd = yymsp[0].minor.yy190.zEnd; + if( yymsp[-4].minor.yy190.pExpr ) yymsp[-4].minor.yy190.pExpr->flags |= EP_InfixFunc; } break; case 174: /* expr ::= expr ISNULL|NOTNULL */ -{spanUnaryPostfix(pParse,yymsp[0].major,&yymsp[-1].minor.yy342,&yymsp[0].minor.yy0);} +{spanUnaryPostfix(pParse,yymsp[0].major,&yymsp[-1].minor.yy190,&yymsp[0].minor.yy0);} break; case 175: /* expr ::= expr NOT NULL */ -{spanUnaryPostfix(pParse,TK_NOTNULL,&yymsp[-2].minor.yy342,&yymsp[0].minor.yy0);} +{spanUnaryPostfix(pParse,TK_NOTNULL,&yymsp[-2].minor.yy190,&yymsp[0].minor.yy0);} break; case 176: /* expr ::= expr IS expr */ { - spanBinaryExpr(pParse,TK_IS,&yymsp[-2].minor.yy342,&yymsp[0].minor.yy342); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy342.pExpr, yymsp[-2].minor.yy342.pExpr, TK_ISNULL); + spanBinaryExpr(pParse,TK_IS,&yymsp[-2].minor.yy190,&yymsp[0].minor.yy190); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy190.pExpr, yymsp[-2].minor.yy190.pExpr, TK_ISNULL); } break; case 177: /* expr ::= expr IS NOT expr */ { - spanBinaryExpr(pParse,TK_ISNOT,&yymsp[-3].minor.yy342,&yymsp[0].minor.yy342); - binaryToUnaryIfNull(pParse, yymsp[0].minor.yy342.pExpr, yymsp[-3].minor.yy342.pExpr, TK_NOTNULL); + spanBinaryExpr(pParse,TK_ISNOT,&yymsp[-3].minor.yy190,&yymsp[0].minor.yy190); + binaryToUnaryIfNull(pParse, yymsp[0].minor.yy190.pExpr, yymsp[-3].minor.yy190.pExpr, TK_NOTNULL); } break; case 178: /* expr ::= NOT expr */ case 179: /* expr ::= BITNOT expr */ yytestcase(yyruleno==179); -{spanUnaryPrefix(&yymsp[-1].minor.yy342,pParse,yymsp[-1].major,&yymsp[0].minor.yy342,&yymsp[-1].minor.yy0);/*A-overwrites-B*/} +{spanUnaryPrefix(&yymsp[-1].minor.yy190,pParse,yymsp[-1].major,&yymsp[0].minor.yy190,&yymsp[-1].minor.yy0);/*A-overwrites-B*/} break; case 180: /* expr ::= MINUS expr */ -{spanUnaryPrefix(&yymsp[-1].minor.yy342,pParse,TK_UMINUS,&yymsp[0].minor.yy342,&yymsp[-1].minor.yy0);/*A-overwrites-B*/} +{spanUnaryPrefix(&yymsp[-1].minor.yy190,pParse,TK_UMINUS,&yymsp[0].minor.yy190,&yymsp[-1].minor.yy0);/*A-overwrites-B*/} break; case 181: /* expr ::= PLUS expr */ -{spanUnaryPrefix(&yymsp[-1].minor.yy342,pParse,TK_UPLUS,&yymsp[0].minor.yy342,&yymsp[-1].minor.yy0);/*A-overwrites-B*/} +{spanUnaryPrefix(&yymsp[-1].minor.yy190,pParse,TK_UPLUS,&yymsp[0].minor.yy190,&yymsp[-1].minor.yy0);/*A-overwrites-B*/} break; case 182: /* between_op ::= BETWEEN */ case 185: /* in_op ::= IN */ yytestcase(yyruleno==185); -{yymsp[0].minor.yy392 = 0;} +{yymsp[0].minor.yy194 = 0;} break; case 184: /* expr ::= expr between_op expr AND expr */ { - ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy342.pExpr); - pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy342.pExpr); - yymsp[-4].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy342.pExpr, 0, 0); - if( yymsp[-4].minor.yy342.pExpr ){ - yymsp[-4].minor.yy342.pExpr->x.pList = pList; + ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr); + pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy190.pExpr); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy190.pExpr, 0, 0); + if( yymsp[-4].minor.yy190.pExpr ){ + yymsp[-4].minor.yy190.pExpr->x.pList = pList; }else{ sqlite3ExprListDelete(pParse->db, pList); } - exprNot(pParse, yymsp[-3].minor.yy392, &yymsp[-4].minor.yy342); - yymsp[-4].minor.yy342.zEnd = yymsp[0].minor.yy342.zEnd; + exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); + yymsp[-4].minor.yy190.zEnd = yymsp[0].minor.yy190.zEnd; } break; case 187: /* expr ::= expr in_op LP exprlist RP */ { - if( yymsp[-1].minor.yy442==0 ){ + if( yymsp[-1].minor.yy148==0 ){ /* Expressions of the form ** ** expr1 IN () @@ -131257,9 +134519,9 @@ static void yy_reduce( ** simplify to constants 0 (false) and 1 (true), respectively, ** regardless of the value of expr1. */ - sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy342.pExpr); - yymsp[-4].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[yymsp[-3].minor.yy392]); - }else if( yymsp[-1].minor.yy442->nExpr==1 ){ + sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy190.pExpr); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[yymsp[-3].minor.yy194]); + }else if( yymsp[-1].minor.yy148->nExpr==1 ){ /* Expressions of the form: ** ** expr1 IN (?1) @@ -131276,223 +134538,202 @@ static void yy_reduce( ** affinity or the collating sequence to use for comparison. Otherwise, ** the semantics would be subtly different from IN or NOT IN. */ - Expr *pRHS = yymsp[-1].minor.yy442->a[0].pExpr; - yymsp[-1].minor.yy442->a[0].pExpr = 0; - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy442); + Expr *pRHS = yymsp[-1].minor.yy148->a[0].pExpr; + yymsp[-1].minor.yy148->a[0].pExpr = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy148); /* pRHS cannot be NULL because a malloc error would have been detected ** before now and control would have never reached this point */ if( ALWAYS(pRHS) ){ pRHS->flags &= ~EP_Collate; pRHS->flags |= EP_Generic; } - yymsp[-4].minor.yy342.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy392 ? TK_NE : TK_EQ, yymsp[-4].minor.yy342.pExpr, pRHS, 0); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy194 ? TK_NE : TK_EQ, yymsp[-4].minor.yy190.pExpr, pRHS, 0); }else{ - yymsp[-4].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy342.pExpr, 0, 0); - if( yymsp[-4].minor.yy342.pExpr ){ - yymsp[-4].minor.yy342.pExpr->x.pList = yymsp[-1].minor.yy442; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy342.pExpr); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0); + if( yymsp[-4].minor.yy190.pExpr ){ + yymsp[-4].minor.yy190.pExpr->x.pList = yymsp[-1].minor.yy148; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy190.pExpr); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy442); + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy148); } - exprNot(pParse, yymsp[-3].minor.yy392, &yymsp[-4].minor.yy342); + exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); } - yymsp[-4].minor.yy342.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; + yymsp[-4].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; case 188: /* expr ::= LP select RP */ { - spanSet(&yymsp[-2].minor.yy342,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ - yymsp[-2].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0); - if( yymsp[-2].minor.yy342.pExpr ){ - yymsp[-2].minor.yy342.pExpr->x.pSelect = yymsp[-1].minor.yy159; - ExprSetProperty(yymsp[-2].minor.yy342.pExpr, EP_xIsSelect|EP_Subquery); - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-2].minor.yy342.pExpr); - }else{ - sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy159); - } + spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ + yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy190.pExpr, yymsp[-1].minor.yy243); } break; case 189: /* expr ::= expr in_op LP select RP */ { - yymsp[-4].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy342.pExpr, 0, 0); - if( yymsp[-4].minor.yy342.pExpr ){ - yymsp[-4].minor.yy342.pExpr->x.pSelect = yymsp[-1].minor.yy159; - ExprSetProperty(yymsp[-4].minor.yy342.pExpr, EP_xIsSelect|EP_Subquery); - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy342.pExpr); - }else{ - sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy159); - } - exprNot(pParse, yymsp[-3].minor.yy392, &yymsp[-4].minor.yy342); - yymsp[-4].minor.yy342.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy190.pExpr, yymsp[-1].minor.yy243); + exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); + yymsp[-4].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n]; } break; - case 190: /* expr ::= expr in_op nm dbnm */ + case 190: /* expr ::= expr in_op nm dbnm paren_exprlist */ { - SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0); - yymsp[-3].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-3].minor.yy342.pExpr, 0, 0); - if( yymsp[-3].minor.yy342.pExpr ){ - yymsp[-3].minor.yy342.pExpr->x.pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0); - ExprSetProperty(yymsp[-3].minor.yy342.pExpr, EP_xIsSelect|EP_Subquery); - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-3].minor.yy342.pExpr); - }else{ - sqlite3SrcListDelete(pParse->db, pSrc); - } - exprNot(pParse, yymsp[-2].minor.yy392, &yymsp[-3].minor.yy342); - yymsp[-3].minor.yy342.zEnd = yymsp[0].minor.yy0.z ? &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n] : &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n]; + SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); + Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0); + if( yymsp[0].minor.yy148 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy148); + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy190.pExpr, pSelect); + exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190); + yymsp[-4].minor.yy190.zEnd = yymsp[-1].minor.yy0.z ? &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n] : &yymsp[-2].minor.yy0.z[yymsp[-2].minor.yy0.n]; } break; case 191: /* expr ::= EXISTS LP select RP */ { Expr *p; - spanSet(&yymsp[-3].minor.yy342,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ - p = yymsp[-3].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0); - if( p ){ - p->x.pSelect = yymsp[-1].minor.yy159; - ExprSetProperty(p, EP_xIsSelect|EP_Subquery); - sqlite3ExprSetHeightAndFlags(pParse, p); - }else{ - sqlite3SelectDelete(pParse->db, yymsp[-1].minor.yy159); - } + spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/ + p = yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0); + sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy243); } break; case 192: /* expr ::= CASE case_operand case_exprlist case_else END */ { - spanSet(&yymsp[-4].minor.yy342,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-C*/ - yymsp[-4].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy122, 0, 0); - if( yymsp[-4].minor.yy342.pExpr ){ - yymsp[-4].minor.yy342.pExpr->x.pList = yymsp[-1].minor.yy122 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy442,yymsp[-1].minor.yy122) : yymsp[-2].minor.yy442; - sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy342.pExpr); + spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-C*/ + yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy72, 0, 0); + if( yymsp[-4].minor.yy190.pExpr ){ + yymsp[-4].minor.yy190.pExpr->x.pList = yymsp[-1].minor.yy72 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy148,yymsp[-1].minor.yy72) : yymsp[-2].minor.yy148; + sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy190.pExpr); }else{ - sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy442); - sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy122); + sqlite3ExprListDelete(pParse->db, yymsp[-2].minor.yy148); + sqlite3ExprDelete(pParse->db, yymsp[-1].minor.yy72); } } break; case 193: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { - yymsp[-4].minor.yy442 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy442, yymsp[-2].minor.yy342.pExpr); - yymsp[-4].minor.yy442 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy442, yymsp[0].minor.yy342.pExpr); + yymsp[-4].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy148, yymsp[-2].minor.yy190.pExpr); + yymsp[-4].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy148, yymsp[0].minor.yy190.pExpr); } break; case 194: /* case_exprlist ::= WHEN expr THEN expr */ { - yymsp[-3].minor.yy442 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy342.pExpr); - yymsp[-3].minor.yy442 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy442, yymsp[0].minor.yy342.pExpr); + yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr); + yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148, yymsp[0].minor.yy190.pExpr); } break; case 197: /* case_operand ::= expr */ -{yymsp[0].minor.yy122 = yymsp[0].minor.yy342.pExpr; /*A-overwrites-X*/} +{yymsp[0].minor.yy72 = yymsp[0].minor.yy190.pExpr; /*A-overwrites-X*/} break; case 200: /* nexprlist ::= nexprlist COMMA expr */ -{yymsp[-2].minor.yy442 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy442,yymsp[0].minor.yy342.pExpr);} +{yymsp[-2].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy148,yymsp[0].minor.yy190.pExpr);} break; case 201: /* nexprlist ::= expr */ -{yymsp[0].minor.yy442 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy342.pExpr); /*A-overwrites-Y*/} +{yymsp[0].minor.yy148 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy190.pExpr); /*A-overwrites-Y*/} break; - case 202: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ + case 203: /* paren_exprlist ::= LP exprlist RP */ + case 208: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==208); +{yymsp[-2].minor.yy148 = yymsp[-1].minor.yy148;} + break; + case 204: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */ { sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, - sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy442, yymsp[-10].minor.yy392, - &yymsp[-11].minor.yy0, yymsp[0].minor.yy122, SQLITE_SO_ASC, yymsp[-8].minor.yy392); + sqlite3SrcListAppend(pParse->db,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy148, yymsp[-10].minor.yy194, + &yymsp[-11].minor.yy0, yymsp[0].minor.yy72, SQLITE_SO_ASC, yymsp[-8].minor.yy194, SQLITE_IDXTYPE_APPDEF); } break; - case 203: /* uniqueflag ::= UNIQUE */ - case 244: /* raisetype ::= ABORT */ yytestcase(yyruleno==244); -{yymsp[0].minor.yy392 = OE_Abort;} - break; - case 204: /* uniqueflag ::= */ -{yymsp[1].minor.yy392 = OE_None;} + case 205: /* uniqueflag ::= UNIQUE */ + case 246: /* raisetype ::= ABORT */ yytestcase(yyruleno==246); +{yymsp[0].minor.yy194 = OE_Abort;} break; - case 206: /* eidlist_opt ::= LP eidlist RP */ -{yymsp[-2].minor.yy442 = yymsp[-1].minor.yy442;} + case 206: /* uniqueflag ::= */ +{yymsp[1].minor.yy194 = OE_None;} break; - case 207: /* eidlist ::= eidlist COMMA nm collate sortorder */ + case 209: /* eidlist ::= eidlist COMMA nm collate sortorder */ { - yymsp[-4].minor.yy442 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy442, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy392, yymsp[0].minor.yy392); + yymsp[-4].minor.yy148 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy148, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy194, yymsp[0].minor.yy194); } break; - case 208: /* eidlist ::= nm collate sortorder */ + case 210: /* eidlist ::= nm collate sortorder */ { - yymsp[-2].minor.yy442 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy392, yymsp[0].minor.yy392); /*A-overwrites-Y*/ + yymsp[-2].minor.yy148 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy194, yymsp[0].minor.yy194); /*A-overwrites-Y*/ } break; - case 211: /* cmd ::= DROP INDEX ifexists fullname */ -{sqlite3DropIndex(pParse, yymsp[0].minor.yy347, yymsp[-1].minor.yy392);} + case 213: /* cmd ::= DROP INDEX ifexists fullname */ +{sqlite3DropIndex(pParse, yymsp[0].minor.yy185, yymsp[-1].minor.yy194);} break; - case 212: /* cmd ::= VACUUM */ - case 213: /* cmd ::= VACUUM nm */ yytestcase(yyruleno==213); + case 214: /* cmd ::= VACUUM */ + case 215: /* cmd ::= VACUUM nm */ yytestcase(yyruleno==215); {sqlite3Vacuum(pParse);} break; - case 214: /* cmd ::= PRAGMA nm dbnm */ + case 216: /* cmd ::= PRAGMA nm dbnm */ {sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);} break; - case 215: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ + case 217: /* cmd ::= PRAGMA nm dbnm EQ nmnum */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);} break; - case 216: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ + case 218: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);} break; - case 217: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ + case 219: /* cmd ::= PRAGMA nm dbnm EQ minus_num */ {sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);} break; - case 218: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ + case 220: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */ {sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);} break; - case 221: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ + case 223: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */ { Token all; all.z = yymsp[-3].minor.yy0.z; all.n = (int)(yymsp[0].minor.yy0.z - yymsp[-3].minor.yy0.z) + yymsp[0].minor.yy0.n; - sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy327, &all); + sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy145, &all); } break; - case 222: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ + case 224: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */ { - sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy392, yymsp[-4].minor.yy410.a, yymsp[-4].minor.yy410.b, yymsp[-2].minor.yy347, yymsp[0].minor.yy122, yymsp[-10].minor.yy392, yymsp[-8].minor.yy392); + sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy194, yymsp[-4].minor.yy332.a, yymsp[-4].minor.yy332.b, yymsp[-2].minor.yy185, yymsp[0].minor.yy72, yymsp[-10].minor.yy194, yymsp[-8].minor.yy194); yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/ } break; - case 223: /* trigger_time ::= BEFORE */ -{ yymsp[0].minor.yy392 = TK_BEFORE; } + case 225: /* trigger_time ::= BEFORE */ +{ yymsp[0].minor.yy194 = TK_BEFORE; } break; - case 224: /* trigger_time ::= AFTER */ -{ yymsp[0].minor.yy392 = TK_AFTER; } + case 226: /* trigger_time ::= AFTER */ +{ yymsp[0].minor.yy194 = TK_AFTER; } break; - case 225: /* trigger_time ::= INSTEAD OF */ -{ yymsp[-1].minor.yy392 = TK_INSTEAD;} + case 227: /* trigger_time ::= INSTEAD OF */ +{ yymsp[-1].minor.yy194 = TK_INSTEAD;} break; - case 226: /* trigger_time ::= */ -{ yymsp[1].minor.yy392 = TK_BEFORE; } + case 228: /* trigger_time ::= */ +{ yymsp[1].minor.yy194 = TK_BEFORE; } break; - case 227: /* trigger_event ::= DELETE|INSERT */ - case 228: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==228); -{yymsp[0].minor.yy410.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy410.b = 0;} + case 229: /* trigger_event ::= DELETE|INSERT */ + case 230: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==230); +{yymsp[0].minor.yy332.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy332.b = 0;} break; - case 229: /* trigger_event ::= UPDATE OF idlist */ -{yymsp[-2].minor.yy410.a = TK_UPDATE; yymsp[-2].minor.yy410.b = yymsp[0].minor.yy180;} + case 231: /* trigger_event ::= UPDATE OF idlist */ +{yymsp[-2].minor.yy332.a = TK_UPDATE; yymsp[-2].minor.yy332.b = yymsp[0].minor.yy254;} break; - case 230: /* when_clause ::= */ - case 249: /* key_opt ::= */ yytestcase(yyruleno==249); -{ yymsp[1].minor.yy122 = 0; } + case 232: /* when_clause ::= */ + case 251: /* key_opt ::= */ yytestcase(yyruleno==251); +{ yymsp[1].minor.yy72 = 0; } break; - case 231: /* when_clause ::= WHEN expr */ - case 250: /* key_opt ::= KEY expr */ yytestcase(yyruleno==250); -{ yymsp[-1].minor.yy122 = yymsp[0].minor.yy342.pExpr; } + case 233: /* when_clause ::= WHEN expr */ + case 252: /* key_opt ::= KEY expr */ yytestcase(yyruleno==252); +{ yymsp[-1].minor.yy72 = yymsp[0].minor.yy190.pExpr; } break; - case 232: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ + case 234: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */ { - assert( yymsp[-2].minor.yy327!=0 ); - yymsp[-2].minor.yy327->pLast->pNext = yymsp[-1].minor.yy327; - yymsp[-2].minor.yy327->pLast = yymsp[-1].minor.yy327; + assert( yymsp[-2].minor.yy145!=0 ); + yymsp[-2].minor.yy145->pLast->pNext = yymsp[-1].minor.yy145; + yymsp[-2].minor.yy145->pLast = yymsp[-1].minor.yy145; } break; - case 233: /* trigger_cmd_list ::= trigger_cmd SEMI */ + case 235: /* trigger_cmd_list ::= trigger_cmd SEMI */ { - assert( yymsp[-1].minor.yy327!=0 ); - yymsp[-1].minor.yy327->pLast = yymsp[-1].minor.yy327; + assert( yymsp[-1].minor.yy145!=0 ); + yymsp[-1].minor.yy145->pLast = yymsp[-1].minor.yy145; } break; - case 234: /* trnm ::= nm DOT nm */ + case 236: /* trnm ::= nm DOT nm */ { yymsp[-2].minor.yy0 = yymsp[0].minor.yy0; sqlite3ErrorMsg(pParse, @@ -131500,195 +134741,195 @@ static void yy_reduce( "statements within triggers"); } break; - case 235: /* tridxby ::= INDEXED BY nm */ + case 237: /* tridxby ::= INDEXED BY nm */ { sqlite3ErrorMsg(pParse, "the INDEXED BY clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 236: /* tridxby ::= NOT INDEXED */ + case 238: /* tridxby ::= NOT INDEXED */ { sqlite3ErrorMsg(pParse, "the NOT INDEXED clause is not allowed on UPDATE or DELETE statements " "within triggers"); } break; - case 237: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt */ -{yymsp[-6].minor.yy327 = sqlite3TriggerUpdateStep(pParse->db, &yymsp[-4].minor.yy0, yymsp[-1].minor.yy442, yymsp[0].minor.yy122, yymsp[-5].minor.yy392);} + case 239: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist where_opt */ +{yymsp[-6].minor.yy145 = sqlite3TriggerUpdateStep(pParse->db, &yymsp[-4].minor.yy0, yymsp[-1].minor.yy148, yymsp[0].minor.yy72, yymsp[-5].minor.yy194);} break; - case 238: /* trigger_cmd ::= insert_cmd INTO trnm idlist_opt select */ -{yymsp[-4].minor.yy327 = sqlite3TriggerInsertStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy180, yymsp[0].minor.yy159, yymsp[-4].minor.yy392);/*A-overwrites-R*/} + case 240: /* trigger_cmd ::= insert_cmd INTO trnm idlist_opt select */ +{yymsp[-4].minor.yy145 = sqlite3TriggerInsertStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy254, yymsp[0].minor.yy243, yymsp[-4].minor.yy194);/*A-overwrites-R*/} break; - case 239: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt */ -{yymsp[-4].minor.yy327 = sqlite3TriggerDeleteStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[0].minor.yy122);} + case 241: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt */ +{yymsp[-4].minor.yy145 = sqlite3TriggerDeleteStep(pParse->db, &yymsp[-2].minor.yy0, yymsp[0].minor.yy72);} break; - case 240: /* trigger_cmd ::= select */ -{yymsp[0].minor.yy327 = sqlite3TriggerSelectStep(pParse->db, yymsp[0].minor.yy159); /*A-overwrites-X*/} + case 242: /* trigger_cmd ::= select */ +{yymsp[0].minor.yy145 = sqlite3TriggerSelectStep(pParse->db, yymsp[0].minor.yy243); /*A-overwrites-X*/} break; - case 241: /* expr ::= RAISE LP IGNORE RP */ + case 243: /* expr ::= RAISE LP IGNORE RP */ { - spanSet(&yymsp[-3].minor.yy342,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-3].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0); - if( yymsp[-3].minor.yy342.pExpr ){ - yymsp[-3].minor.yy342.pExpr->affinity = OE_Ignore; + spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0); + if( yymsp[-3].minor.yy190.pExpr ){ + yymsp[-3].minor.yy190.pExpr->affinity = OE_Ignore; } } break; - case 242: /* expr ::= RAISE LP raisetype COMMA nm RP */ + case 244: /* expr ::= RAISE LP raisetype COMMA nm RP */ { - spanSet(&yymsp[-5].minor.yy342,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ - yymsp[-5].minor.yy342.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &yymsp[-1].minor.yy0); - if( yymsp[-5].minor.yy342.pExpr ) { - yymsp[-5].minor.yy342.pExpr->affinity = (char)yymsp[-3].minor.yy392; + spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/ + yymsp[-5].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &yymsp[-1].minor.yy0); + if( yymsp[-5].minor.yy190.pExpr ) { + yymsp[-5].minor.yy190.pExpr->affinity = (char)yymsp[-3].minor.yy194; } } break; - case 243: /* raisetype ::= ROLLBACK */ -{yymsp[0].minor.yy392 = OE_Rollback;} + case 245: /* raisetype ::= ROLLBACK */ +{yymsp[0].minor.yy194 = OE_Rollback;} break; - case 245: /* raisetype ::= FAIL */ -{yymsp[0].minor.yy392 = OE_Fail;} + case 247: /* raisetype ::= FAIL */ +{yymsp[0].minor.yy194 = OE_Fail;} break; - case 246: /* cmd ::= DROP TRIGGER ifexists fullname */ + case 248: /* cmd ::= DROP TRIGGER ifexists fullname */ { - sqlite3DropTrigger(pParse,yymsp[0].minor.yy347,yymsp[-1].minor.yy392); + sqlite3DropTrigger(pParse,yymsp[0].minor.yy185,yymsp[-1].minor.yy194); } break; - case 247: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ + case 249: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */ { - sqlite3Attach(pParse, yymsp[-3].minor.yy342.pExpr, yymsp[-1].minor.yy342.pExpr, yymsp[0].minor.yy122); + sqlite3Attach(pParse, yymsp[-3].minor.yy190.pExpr, yymsp[-1].minor.yy190.pExpr, yymsp[0].minor.yy72); } break; - case 248: /* cmd ::= DETACH database_kw_opt expr */ + case 250: /* cmd ::= DETACH database_kw_opt expr */ { - sqlite3Detach(pParse, yymsp[0].minor.yy342.pExpr); + sqlite3Detach(pParse, yymsp[0].minor.yy190.pExpr); } break; - case 251: /* cmd ::= REINDEX */ + case 253: /* cmd ::= REINDEX */ {sqlite3Reindex(pParse, 0, 0);} break; - case 252: /* cmd ::= REINDEX nm dbnm */ + case 254: /* cmd ::= REINDEX nm dbnm */ {sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 253: /* cmd ::= ANALYZE */ + case 255: /* cmd ::= ANALYZE */ {sqlite3Analyze(pParse, 0, 0);} break; - case 254: /* cmd ::= ANALYZE nm dbnm */ + case 256: /* cmd ::= ANALYZE nm dbnm */ {sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);} break; - case 255: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ + case 257: /* cmd ::= ALTER TABLE fullname RENAME TO nm */ { - sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy347,&yymsp[0].minor.yy0); + sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy185,&yymsp[0].minor.yy0); } break; - case 256: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ + case 258: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */ { yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n; sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0); } break; - case 257: /* add_column_fullname ::= fullname */ + case 259: /* add_column_fullname ::= fullname */ { disableLookaside(pParse); - sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy347); + sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy185); } break; - case 258: /* cmd ::= create_vtab */ + case 260: /* cmd ::= create_vtab */ {sqlite3VtabFinishParse(pParse,0);} break; - case 259: /* cmd ::= create_vtab LP vtabarglist RP */ + case 261: /* cmd ::= create_vtab LP vtabarglist RP */ {sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);} break; - case 260: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ + case 262: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */ { - sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy392); + sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy194); } break; - case 261: /* vtabarg ::= */ + case 263: /* vtabarg ::= */ {sqlite3VtabArgInit(pParse);} break; - case 262: /* vtabargtoken ::= ANY */ - case 263: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==263); - case 264: /* lp ::= LP */ yytestcase(yyruleno==264); + case 264: /* vtabargtoken ::= ANY */ + case 265: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==265); + case 266: /* lp ::= LP */ yytestcase(yyruleno==266); {sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);} break; - case 265: /* with ::= */ -{yymsp[1].minor.yy331 = 0;} + case 267: /* with ::= */ +{yymsp[1].minor.yy285 = 0;} break; - case 266: /* with ::= WITH wqlist */ -{ yymsp[-1].minor.yy331 = yymsp[0].minor.yy331; } + case 268: /* with ::= WITH wqlist */ +{ yymsp[-1].minor.yy285 = yymsp[0].minor.yy285; } break; - case 267: /* with ::= WITH RECURSIVE wqlist */ -{ yymsp[-2].minor.yy331 = yymsp[0].minor.yy331; } + case 269: /* with ::= WITH RECURSIVE wqlist */ +{ yymsp[-2].minor.yy285 = yymsp[0].minor.yy285; } break; - case 268: /* wqlist ::= nm eidlist_opt AS LP select RP */ + case 270: /* wqlist ::= nm eidlist_opt AS LP select RP */ { - yymsp[-5].minor.yy331 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy442, yymsp[-1].minor.yy159); /*A-overwrites-X*/ + yymsp[-5].minor.yy285 = sqlite3WithAdd(pParse, 0, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy148, yymsp[-1].minor.yy243); /*A-overwrites-X*/ } break; - case 269: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */ + case 271: /* wqlist ::= wqlist COMMA nm eidlist_opt AS LP select RP */ { - yymsp[-7].minor.yy331 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy331, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy442, yymsp[-1].minor.yy159); + yymsp[-7].minor.yy285 = sqlite3WithAdd(pParse, yymsp[-7].minor.yy285, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy148, yymsp[-1].minor.yy243); } break; default: - /* (270) input ::= cmdlist */ yytestcase(yyruleno==270); - /* (271) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==271); - /* (272) cmdlist ::= ecmd */ yytestcase(yyruleno==272); - /* (273) ecmd ::= SEMI */ yytestcase(yyruleno==273); - /* (274) ecmd ::= explain cmdx SEMI */ yytestcase(yyruleno==274); - /* (275) explain ::= */ yytestcase(yyruleno==275); - /* (276) trans_opt ::= */ yytestcase(yyruleno==276); - /* (277) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==277); - /* (278) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==278); - /* (279) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==279); - /* (280) savepoint_opt ::= */ yytestcase(yyruleno==280); - /* (281) cmd ::= create_table create_table_args */ yytestcase(yyruleno==281); - /* (282) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==282); - /* (283) columnlist ::= columnname carglist */ yytestcase(yyruleno==283); - /* (284) nm ::= ID|INDEXED */ yytestcase(yyruleno==284); - /* (285) nm ::= STRING */ yytestcase(yyruleno==285); - /* (286) nm ::= JOIN_KW */ yytestcase(yyruleno==286); - /* (287) typetoken ::= typename */ yytestcase(yyruleno==287); - /* (288) typename ::= ID|STRING */ yytestcase(yyruleno==288); - /* (289) signed ::= plus_num */ yytestcase(yyruleno==289); - /* (290) signed ::= minus_num */ yytestcase(yyruleno==290); - /* (291) carglist ::= carglist ccons */ yytestcase(yyruleno==291); - /* (292) carglist ::= */ yytestcase(yyruleno==292); - /* (293) ccons ::= NULL onconf */ yytestcase(yyruleno==293); - /* (294) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==294); - /* (295) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==295); - /* (296) conslist ::= tcons */ yytestcase(yyruleno==296); - /* (297) tconscomma ::= */ yytestcase(yyruleno==297); - /* (298) defer_subclause_opt ::= defer_subclause */ yytestcase(yyruleno==298); - /* (299) resolvetype ::= raisetype */ yytestcase(yyruleno==299); - /* (300) selectnowith ::= oneselect */ yytestcase(yyruleno==300); - /* (301) oneselect ::= values */ yytestcase(yyruleno==301); - /* (302) sclp ::= selcollist COMMA */ yytestcase(yyruleno==302); - /* (303) as ::= ID|STRING */ yytestcase(yyruleno==303); - /* (304) expr ::= term */ yytestcase(yyruleno==304); - /* (305) exprlist ::= nexprlist */ yytestcase(yyruleno==305); - /* (306) nmnum ::= plus_num */ yytestcase(yyruleno==306); - /* (307) nmnum ::= nm */ yytestcase(yyruleno==307); - /* (308) nmnum ::= ON */ yytestcase(yyruleno==308); - /* (309) nmnum ::= DELETE */ yytestcase(yyruleno==309); - /* (310) nmnum ::= DEFAULT */ yytestcase(yyruleno==310); - /* (311) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==311); - /* (312) foreach_clause ::= */ yytestcase(yyruleno==312); - /* (313) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==313); - /* (314) trnm ::= nm */ yytestcase(yyruleno==314); - /* (315) tridxby ::= */ yytestcase(yyruleno==315); - /* (316) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==316); - /* (317) database_kw_opt ::= */ yytestcase(yyruleno==317); - /* (318) kwcolumn_opt ::= */ yytestcase(yyruleno==318); - /* (319) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==319); - /* (320) vtabarglist ::= vtabarg */ yytestcase(yyruleno==320); - /* (321) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==321); - /* (322) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==322); - /* (323) anylist ::= */ yytestcase(yyruleno==323); - /* (324) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==324); - /* (325) anylist ::= anylist ANY */ yytestcase(yyruleno==325); + /* (272) input ::= cmdlist */ yytestcase(yyruleno==272); + /* (273) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==273); + /* (274) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=274); + /* (275) ecmd ::= SEMI */ yytestcase(yyruleno==275); + /* (276) ecmd ::= explain cmdx SEMI */ yytestcase(yyruleno==276); + /* (277) explain ::= */ yytestcase(yyruleno==277); + /* (278) trans_opt ::= */ yytestcase(yyruleno==278); + /* (279) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==279); + /* (280) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==280); + /* (281) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==281); + /* (282) savepoint_opt ::= */ yytestcase(yyruleno==282); + /* (283) cmd ::= create_table create_table_args */ yytestcase(yyruleno==283); + /* (284) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==284); + /* (285) columnlist ::= columnname carglist */ yytestcase(yyruleno==285); + /* (286) nm ::= ID|INDEXED */ yytestcase(yyruleno==286); + /* (287) nm ::= STRING */ yytestcase(yyruleno==287); + /* (288) nm ::= JOIN_KW */ yytestcase(yyruleno==288); + /* (289) typetoken ::= typename */ yytestcase(yyruleno==289); + /* (290) typename ::= ID|STRING */ yytestcase(yyruleno==290); + /* (291) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=291); + /* (292) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=292); + /* (293) carglist ::= carglist ccons */ yytestcase(yyruleno==293); + /* (294) carglist ::= */ yytestcase(yyruleno==294); + /* (295) ccons ::= NULL onconf */ yytestcase(yyruleno==295); + /* (296) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==296); + /* (297) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==297); + /* (298) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=298); + /* (299) tconscomma ::= */ yytestcase(yyruleno==299); + /* (300) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=300); + /* (301) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=301); + /* (302) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=302); + /* (303) oneselect ::= values */ yytestcase(yyruleno==303); + /* (304) sclp ::= selcollist COMMA */ yytestcase(yyruleno==304); + /* (305) as ::= ID|STRING */ yytestcase(yyruleno==305); + /* (306) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=306); + /* (307) exprlist ::= nexprlist */ yytestcase(yyruleno==307); + /* (308) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=308); + /* (309) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=309); + /* (310) nmnum ::= ON */ yytestcase(yyruleno==310); + /* (311) nmnum ::= DELETE */ yytestcase(yyruleno==311); + /* (312) nmnum ::= DEFAULT */ yytestcase(yyruleno==312); + /* (313) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==313); + /* (314) foreach_clause ::= */ yytestcase(yyruleno==314); + /* (315) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==315); + /* (316) trnm ::= nm */ yytestcase(yyruleno==316); + /* (317) tridxby ::= */ yytestcase(yyruleno==317); + /* (318) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==318); + /* (319) database_kw_opt ::= */ yytestcase(yyruleno==319); + /* (320) kwcolumn_opt ::= */ yytestcase(yyruleno==320); + /* (321) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==321); + /* (322) vtabarglist ::= vtabarg */ yytestcase(yyruleno==322); + /* (323) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==323); + /* (324) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==324); + /* (325) anylist ::= */ yytestcase(yyruleno==325); + /* (326) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==326); + /* (327) anylist ::= anylist ANY */ yytestcase(yyruleno==327); break; /********** End reduce actions ************************************************/ }; @@ -131697,15 +134938,17 @@ static void yy_reduce( yysize = yyRuleInfo[yyruleno].nrhs; yyact = yy_find_reduce_action(yymsp[-yysize].stateno,(YYCODETYPE)yygoto); if( yyact <= YY_MAX_SHIFTREDUCE ){ - if( yyact>YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; - yypParser->yyidx -= yysize - 1; + if( yyact>YY_MAX_SHIFT ){ + yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; + } yymsp -= yysize-1; + yypParser->yytos = yymsp; yymsp->stateno = (YYACTIONTYPE)yyact; yymsp->major = (YYCODETYPE)yygoto; yyTraceShift(yypParser, yyact); }else{ assert( yyact == YY_ACCEPT_ACTION ); - yypParser->yyidx -= yysize; + yypParser->yytos -= yysize; yy_accept(yypParser); } } @@ -131723,7 +134966,7 @@ static void yy_parse_failed( fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt); } #endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); + while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser); /* Here code is inserted which will be executed whenever the ** parser fails */ /************ Begin %parse_failure code ***************************************/ @@ -131763,7 +135006,10 @@ static void yy_accept( fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt); } #endif - while( yypParser->yyidx>=0 ) yy_pop_parser_stack(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif + assert( yypParser->yytos==yypParser->yystack ); /* Here code is inserted which will be executed whenever the ** parser accepts */ /*********** Begin %parse_accept code *****************************************/ @@ -131806,28 +135052,8 @@ SQLITE_PRIVATE void sqlite3Parser( #endif yyParser *yypParser; /* The parser */ - /* (re)initialize the parser, if necessary */ yypParser = (yyParser*)yyp; - if( yypParser->yyidx<0 ){ -#if YYSTACKDEPTH<=0 - if( yypParser->yystksz <=0 ){ - yyStackOverflow(yypParser); - return; - } -#endif - yypParser->yyidx = 0; -#ifndef YYNOERRORRECOVERY - yypParser->yyerrcnt = -1; -#endif - yypParser->yystack[0].stateno = 0; - yypParser->yystack[0].major = 0; -#ifndef NDEBUG - if( yyTraceFILE ){ - fprintf(yyTraceFILE,"%sInitialize. Empty stack. State 0\n", - yyTracePrompt); - } -#endif - } + assert( yypParser->yytos!=0 ); #if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY) yyendofinput = (yymajor==0); #endif @@ -131842,7 +135068,6 @@ SQLITE_PRIVATE void sqlite3Parser( do{ yyact = yy_find_shift_action(yypParser,(YYCODETYPE)yymajor); if( yyact <= YY_MAX_SHIFTREDUCE ){ - if( yyact > YY_MAX_SHIFT ) yyact += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE; yy_shift(yypParser,yyact,yymajor,yyminor); #ifndef YYNOERRORRECOVERY yypParser->yyerrcnt--; @@ -131884,7 +135109,7 @@ SQLITE_PRIVATE void sqlite3Parser( if( yypParser->yyerrcnt<0 ){ yy_syntax_error(yypParser,yymajor,yyminor); } - yymx = yypParser->yystack[yypParser->yyidx].major; + yymx = yypParser->yytos->major; if( yymx==YYERRORSYMBOL || yyerrorhit ){ #ifndef NDEBUG if( yyTraceFILE ){ @@ -131895,18 +135120,20 @@ SQLITE_PRIVATE void sqlite3Parser( yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion); yymajor = YYNOCODE; }else{ - while( - yypParser->yyidx >= 0 && - yymx != YYERRORSYMBOL && - (yyact = yy_find_reduce_action( - yypParser->yystack[yypParser->yyidx].stateno, + while( yypParser->yytos >= &yypParser->yystack + && yymx != YYERRORSYMBOL + && (yyact = yy_find_reduce_action( + yypParser->yytos->stateno, YYERRORSYMBOL)) >= YY_MIN_REDUCE ){ yy_pop_parser_stack(yypParser); } - if( yypParser->yyidx < 0 || yymajor==0 ){ + if( yypParser->yytos < yypParser->yystack || yymajor==0 ){ yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif yymajor = YYNOCODE; }else if( yymx!=YYERRORSYMBOL ){ yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor); @@ -131943,18 +135170,23 @@ SQLITE_PRIVATE void sqlite3Parser( yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion); if( yyendofinput ){ yy_parse_failed(yypParser); +#ifndef YYNOERRORRECOVERY + yypParser->yyerrcnt = -1; +#endif } yymajor = YYNOCODE; #endif } - }while( yymajor!=YYNOCODE && yypParser->yyidx>=0 ); + }while( yymajor!=YYNOCODE && yypParser->yytos>yypParser->yystack ); #ifndef NDEBUG if( yyTraceFILE ){ - int i; + yyStackEntry *i; + char cDiv = '['; fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt); - for(i=1; i<=yypParser->yyidx; i++) - fprintf(yyTraceFILE,"%c%s", i==1 ? '[' : ' ', - yyTokenName[yypParser->yystack[i].major]); + for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){ + fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]); + cDiv = ' '; + } fprintf(yyTraceFILE,"]\n"); } #endif @@ -132837,7 +136069,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr sqlite3DeleteTable(db, pParse->pNewTable); } - sqlite3WithDelete(db, pParse->pWithToFree); + if( pParse->pWithToFree ) sqlite3WithDelete(db, pParse->pWithToFree); sqlite3DeleteTrigger(db, pParse->pNewTrigger); for(i=pParse->nzVar-1; i>=0; i--) sqlite3DbFree(db, pParse->azVar[i]); sqlite3DbFree(db, pParse->azVar); @@ -134047,6 +137279,7 @@ SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3 *db, int op, ...){ { SQLITE_DBCONFIG_ENABLE_FKEY, SQLITE_ForeignKeys }, { SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger }, { SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER, SQLITE_Fts3Tokenizer }, + { SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, SQLITE_LoadExtension }, }; unsigned int i; rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ @@ -134275,6 +137508,9 @@ static int sqlite3Close(sqlite3 *db, int forceZombie){ return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(db->mutex); + if( db->mTrace & SQLITE_TRACE_CLOSE ){ + db->xTrace(SQLITE_TRACE_CLOSE, db->pTraceArg, db, 0); + } /* Force xDisconnect calls on all virtual tables */ disconnectAllVtab(db); @@ -135043,7 +138279,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_overload_function( ** trace is a pointer to a function that is invoked at the start of each ** SQL statement. */ -SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3 *db, void (*xTrace)(void*,const char*), void *pArg){ +#ifndef SQLITE_OMIT_DEPRECATED +SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3 *db, void(*xTrace)(void*,const char*), void *pArg){ void *pOld; #ifdef SQLITE_ENABLE_API_ARMOR @@ -135054,11 +138291,36 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3 *db, void (*xTrace)(void*, #endif sqlite3_mutex_enter(db->mutex); pOld = db->pTraceArg; - db->xTrace = xTrace; + db->mTrace = xTrace ? SQLITE_TRACE_LEGACY : 0; + db->xTrace = (int(*)(u32,void*,void*,void*))xTrace; db->pTraceArg = pArg; sqlite3_mutex_leave(db->mutex); return pOld; } +#endif /* SQLITE_OMIT_DEPRECATED */ + +/* Register a trace callback using the version-2 interface. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2( + sqlite3 *db, /* Trace this connection */ + unsigned mTrace, /* Mask of events to be traced */ + int(*xTrace)(unsigned,void*,void*,void*), /* Callback to invoke */ + void *pArg /* Context */ +){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ){ + return SQLITE_MISUSE_BKPT; + } +#endif + sqlite3_mutex_enter(db->mutex); + db->mTrace = mTrace; + db->xTrace = xTrace; + db->pTraceArg = pArg; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + +#ifndef SQLITE_OMIT_DEPRECATED /* ** Register a profile function. The pArg from the previously registered ** profile function is returned. @@ -135087,6 +138349,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_profile( sqlite3_mutex_leave(db->mutex); return pOld; } +#endif /* SQLITE_OMIT_DEPRECATED */ #endif /* SQLITE_OMIT_TRACE */ /* @@ -135165,6 +138428,27 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook( return pRet; } +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +/* +** Register a callback to be invoked each time a row is updated, +** inserted or deleted using this database connection. +*/ +SQLITE_API void *SQLITE_STDCALL sqlite3_preupdate_hook( + sqlite3 *db, /* Attach the hook to this database */ + void(*xCallback)( /* Callback function */ + void*,sqlite3*,int,char const*,char const*,sqlite3_int64,sqlite3_int64), + void *pArg /* First callback argument */ +){ + void *pRet; + sqlite3_mutex_enter(db->mutex); + pRet = db->pPreUpdateArg; + db->xPreUpdateCallback = xCallback; + db->pPreUpdateArg = pArg; + sqlite3_mutex_leave(db->mutex); + return pRet; +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + #ifndef SQLITE_OMIT_WAL /* ** The sqlite3_wal_hook() callback registered by sqlite3_wal_autocheckpoint(). @@ -147516,7 +150800,11 @@ SQLITE_PRIVATE int sqlite3Fts3InitTokenizer( #ifdef SQLITE_TEST -#include +#if defined(INCLUDE_SQLITE_TCL_H) +# include "sqlite_tcl.h" +#else +# include "tcl.h" +#endif /* #include */ /* @@ -159455,6 +162743,53 @@ static RtreeValue rtreeValueUp(sqlite3_value *v){ } #endif /* !defined(SQLITE_RTREE_INT_ONLY) */ +/* +** A constraint has failed while inserting a row into an rtree table. +** Assuming no OOM error occurs, this function sets the error message +** (at pRtree->base.zErrMsg) to an appropriate value and returns +** SQLITE_CONSTRAINT. +** +** Parameter iCol is the index of the leftmost column involved in the +** constraint failure. If it is 0, then the constraint that failed is +** the unique constraint on the id column. Otherwise, it is the rtree +** (c1<=c2) constraint on columns iCol and iCol+1 that has failed. +** +** If an OOM occurs, SQLITE_NOMEM is returned instead of SQLITE_CONSTRAINT. +*/ +static int rtreeConstraintError(Rtree *pRtree, int iCol){ + sqlite3_stmt *pStmt = 0; + char *zSql; + int rc; + + assert( iCol==0 || iCol%2 ); + zSql = sqlite3_mprintf("SELECT * FROM %Q.%Q", pRtree->zDb, pRtree->zName); + if( zSql ){ + rc = sqlite3_prepare_v2(pRtree->db, zSql, -1, &pStmt, 0); + }else{ + rc = SQLITE_NOMEM; + } + sqlite3_free(zSql); + + if( rc==SQLITE_OK ){ + if( iCol==0 ){ + const char *zCol = sqlite3_column_name(pStmt, 0); + pRtree->base.zErrMsg = sqlite3_mprintf( + "UNIQUE constraint failed: %s.%s", pRtree->zName, zCol + ); + }else{ + const char *zCol1 = sqlite3_column_name(pStmt, iCol); + const char *zCol2 = sqlite3_column_name(pStmt, iCol+1); + pRtree->base.zErrMsg = sqlite3_mprintf( + "rtree constraint failed: %s.(%s<=%s)", pRtree->zName, zCol1, zCol2 + ); + } + } + + sqlite3_finalize(pStmt); + return (rc==SQLITE_OK ? SQLITE_CONSTRAINT : rc); +} + + /* ** The xUpdate method for rtree module virtual tables. @@ -159505,7 +162840,7 @@ static int rtreeUpdate( cell.aCoord[ii].f = rtreeValueDown(azData[ii+3]); cell.aCoord[ii+1].f = rtreeValueUp(azData[ii+4]); if( cell.aCoord[ii].f>cell.aCoord[ii+1].f ){ - rc = SQLITE_CONSTRAINT; + rc = rtreeConstraintError(pRtree, ii+1); goto constraint; } } @@ -159516,7 +162851,7 @@ static int rtreeUpdate( cell.aCoord[ii].i = sqlite3_value_int(azData[ii+3]); cell.aCoord[ii+1].i = sqlite3_value_int(azData[ii+4]); if( cell.aCoord[ii].i>cell.aCoord[ii+1].i ){ - rc = SQLITE_CONSTRAINT; + rc = rtreeConstraintError(pRtree, ii+1); goto constraint; } } @@ -159537,7 +162872,7 @@ static int rtreeUpdate( if( sqlite3_vtab_on_conflict(pRtree->db)==SQLITE_REPLACE ){ rc = rtreeDeleteRowid(pRtree, cell.iRowid); }else{ - rc = SQLITE_CONSTRAINT; + rc = rtreeConstraintError(pRtree, 0); goto constraint; } } @@ -159620,6 +162955,11 @@ static int rtreeQueryStat1(sqlite3 *db, Rtree *pRtree){ int rc; i64 nRow = 0; + if( sqlite3_table_column_metadata(db,pRtree->zDb,"sqlite_stat1", + 0,0,0,0,0,0)==SQLITE_ERROR ){ + pRtree->nRowEst = RTREE_DEFAULT_ROWEST; + return SQLITE_OK; + } zSql = sqlite3_mprintf(zFmt, pRtree->zDb, pRtree->zName); if( zSql==0 ){ rc = SQLITE_NOMEM; @@ -160524,15 +163864,17 @@ static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){ ** http://www.icu-project.org/userguide/posix.html#case_mappings */ static void icuCaseFunc16(sqlite3_context *p, int nArg, sqlite3_value **apArg){ - const UChar *zInput; - UChar *zOutput = 0; - int nInput; - int nOut; + const UChar *zInput; /* Pointer to input string */ + UChar *zOutput = 0; /* Pointer to output buffer */ + int nInput; /* Size of utf-16 input string in bytes */ + int nOut; /* Size of output buffer in bytes */ int cnt; + int bToUpper; /* True for toupper(), false for tolower() */ UErrorCode status; const char *zLocale = 0; assert(nArg==1 || nArg==2); + bToUpper = (sqlite3_user_data(p)!=0); if( nArg==2 ){ zLocale = (const char *)sqlite3_value_text(apArg[1]); } @@ -160556,19 +163898,23 @@ static void icuCaseFunc16(sqlite3_context *p, int nArg, sqlite3_value **apArg){ } zOutput = zNew; status = U_ZERO_ERROR; - if( sqlite3_user_data(p) ){ + if( bToUpper ){ nOut = 2*u_strToUpper(zOutput,nOut/2,zInput,nInput/2,zLocale,&status); }else{ nOut = 2*u_strToLower(zOutput,nOut/2,zInput,nInput/2,zLocale,&status); } - if( !U_SUCCESS(status) ){ - if( status==U_BUFFER_OVERFLOW_ERROR ) continue; - icuFunctionError(p, - sqlite3_user_data(p) ? "u_strToUpper" : "u_strToLower", status); - return; + + if( U_SUCCESS(status) ){ + sqlite3_result_text16(p, zOutput, nOut, xFree); + }else if( status==U_BUFFER_OVERFLOW_ERROR ){ + assert( cnt==0 ); + continue; + }else{ + icuFunctionError(p, bToUpper ? "u_strToUpper" : "u_strToLower", status); } + return; } - sqlite3_result_text16(p, zOutput, nOut, xFree); + assert( 0 ); /* Unreachable */ } /* @@ -161385,6 +164731,38 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open( const char *zState ); +/* +** Open an RBU handle to perform an RBU vacuum on database file zTarget. +** An RBU vacuum is similar to SQLite's built-in VACUUM command, except +** that it can be suspended and resumed like an RBU update. +** +** The second argument to this function, which may not be NULL, identifies +** a database in which to store the state of the RBU vacuum operation if +** it is suspended. The first time sqlite3rbu_vacuum() is called, to start +** an RBU vacuum operation, the state database should either not exist or +** be empty (contain no tables). If an RBU vacuum is suspended by calling +** sqlite3rbu_close() on the RBU handle before sqlite3rbu_step() has +** returned SQLITE_DONE, the vacuum state is stored in the state database. +** The vacuum can be resumed by calling this function to open a new RBU +** handle specifying the same target and state databases. +** +** This function does not delete the state database after an RBU vacuum +** is completed, even if it created it. However, if the call to +** sqlite3rbu_close() returns any value other than SQLITE_OK, the contents +** of the state tables within the state database are zeroed. This way, +** the next call to sqlite3rbu_vacuum() opens a handle that starts a +** new RBU vacuum operation. +** +** As with sqlite3rbu_open(), Zipvfs users should rever to the comment +** describing the sqlite3rbu_create_vfs() API function below for +** a description of the complications associated with using RBU with +** zipvfs databases. +*/ +SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_vacuum( + const char *zTarget, + const char *zState +); + /* ** Internally, each RBU connection uses a separate SQLite database ** connection to access the target and rbu update databases. This @@ -161513,6 +164891,44 @@ SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3rbu_progress(sqlite3rbu *pRbu); */ SQLITE_API void SQLITE_STDCALL sqlite3rbu_bp_progress(sqlite3rbu *pRbu, int *pnOne, int *pnTwo); +/* +** Obtain an indication as to the current stage of an RBU update or vacuum. +** This function always returns one of the SQLITE_RBU_STATE_XXX constants +** defined in this file. Return values should be interpreted as follows: +** +** SQLITE_RBU_STATE_OAL: +** RBU is currently building a *-oal file. The next call to sqlite3rbu_step() +** may either add further data to the *-oal file, or compute data that will +** be added by a subsequent call. +** +** SQLITE_RBU_STATE_MOVE: +** RBU has finished building the *-oal file. The next call to sqlite3rbu_step() +** will move the *-oal file to the equivalent *-wal path. If the current +** operation is an RBU update, then the updated version of the database +** file will become visible to ordinary SQLite clients following the next +** call to sqlite3rbu_step(). +** +** SQLITE_RBU_STATE_CHECKPOINT: +** RBU is currently performing an incremental checkpoint. The next call to +** sqlite3rbu_step() will copy a page of data from the *-wal file into +** the target database file. +** +** SQLITE_RBU_STATE_DONE: +** The RBU operation has finished. Any subsequent calls to sqlite3rbu_step() +** will immediately return SQLITE_DONE. +** +** SQLITE_RBU_STATE_ERROR: +** An error has occurred. Any subsequent calls to sqlite3rbu_step() will +** immediately return the SQLite error code associated with the error. +*/ +#define SQLITE_RBU_STATE_OAL 1 +#define SQLITE_RBU_STATE_MOVE 2 +#define SQLITE_RBU_STATE_CHECKPOINT 3 +#define SQLITE_RBU_STATE_DONE 4 +#define SQLITE_RBU_STATE_ERROR 5 + +SQLITE_API int SQLITE_STDCALL sqlite3rbu_state(sqlite3rbu *pRbu); + /* ** Create an RBU VFS named zName that accesses the underlying file-system ** via existing VFS zParent. Or, if the zParent parameter is passed NULL, @@ -161663,6 +165079,7 @@ typedef struct RbuUpdateStmt RbuUpdateStmt; #if !defined(SQLITE_AMALGAMATION) typedef unsigned int u32; +typedef unsigned short u16; typedef unsigned char u8; typedef sqlite3_int64 i64; #endif @@ -161676,6 +165093,8 @@ typedef sqlite3_int64 i64; #define WAL_LOCK_CKPT 1 #define WAL_LOCK_READ0 3 +#define SQLITE_FCNTL_RBUCNT 5149216 + /* ** A structure to store values read from the rbu_state table in memory. */ @@ -161854,6 +165273,10 @@ struct sqlite3rbu { int pgsz; u8 *aBuf; i64 iWalCksum; + + /* Used in RBU vacuum mode only */ + int nRbu; /* Number of RBU VFS in the stack */ + rbu_file *pRbuFd; /* Fd for main db of dbRbu */ }; /* @@ -161879,6 +165302,7 @@ struct rbu_file { int openFlags; /* Flags this file was opened with */ u32 iCookie; /* Cookie value for main db files */ u8 iWriteVer; /* "write-version" value for main db files */ + u8 bNolock; /* True to fail EXCLUSIVE locks */ int nShm; /* Number of entries in apShm[] array */ char **apShm; /* Array of mmap'd *-shm regions */ @@ -161889,6 +165313,11 @@ struct rbu_file { rbu_file *pMainNext; /* Next MAIN_DB file */ }; +/* +** True for an RBU vacuum handle, or false otherwise. +*/ +#define rbuIsVacuum(p) ((p)->zTarget==0) + /************************************************************************* ** The following three functions, found below: @@ -162337,8 +165766,11 @@ static int rbuObjIterNext(sqlite3rbu *p, RbuObjIter *pIter){ /* ** The implementation of the rbu_target_name() SQL function. This function -** accepts one argument - the name of a table in the RBU database. If the -** table name matches the pattern: +** accepts one or two arguments. The first argument is the name of a table - +** the name of a table in the RBU database. The second, if it is present, is 1 +** for a view or 0 for a table. +** +** For a non-vacuum RBU handle, if the table name matches the pattern: ** ** data[0-9]_ ** @@ -162349,21 +165781,33 @@ static int rbuObjIterNext(sqlite3rbu *p, RbuObjIter *pIter){ ** "data_t1" -> "t1" ** "data0123_t2" -> "t2" ** "dataAB_t3" -> NULL +** +** For an rbu vacuum handle, a copy of the first argument is returned if +** the second argument is either missing or 0 (not a view). */ static void rbuTargetNameFunc( - sqlite3_context *context, + sqlite3_context *pCtx, int argc, sqlite3_value **argv ){ + sqlite3rbu *p = sqlite3_user_data(pCtx); const char *zIn; - assert( argc==1 ); + assert( argc==1 || argc==2 ); zIn = (const char*)sqlite3_value_text(argv[0]); - if( zIn && strlen(zIn)>4 && memcmp("data", zIn, 4)==0 ){ - int i; - for(i=4; zIn[i]>='0' && zIn[i]<='9'; i++); - if( zIn[i]=='_' && zIn[i+1] ){ - sqlite3_result_text(context, &zIn[i+1], -1, SQLITE_STATIC); + if( zIn ){ + if( rbuIsVacuum(p) ){ + if( argc==1 || 0==sqlite3_value_int(argv[1]) ){ + sqlite3_result_text(pCtx, zIn, -1, SQLITE_STATIC); + } + }else{ + if( strlen(zIn)>4 && memcmp("data", zIn, 4)==0 ){ + int i; + for(i=4; zIn[i]>='0' && zIn[i]<='9'; i++); + if( zIn[i]=='_' && zIn[i+1] ){ + sqlite3_result_text(pCtx, &zIn[i+1], -1, SQLITE_STATIC); + } + } } } } @@ -162380,11 +165824,14 @@ static int rbuObjIterFirst(sqlite3rbu *p, RbuObjIter *pIter){ int rc; memset(pIter, 0, sizeof(RbuObjIter)); - rc = prepareAndCollectError(p->dbRbu, &pIter->pTblIter, &p->zErrmsg, - "SELECT rbu_target_name(name) AS target, name FROM sqlite_master " + rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pTblIter, &p->zErrmsg, + sqlite3_mprintf( + "SELECT rbu_target_name(name, type='view') AS target, name " + "FROM sqlite_master " "WHERE type IN ('table', 'view') AND target IS NOT NULL " + " %s " "ORDER BY name" - ); + , rbuIsVacuum(p) ? "AND rootpage!=0 AND rootpage IS NOT NULL" : "")); if( rc==SQLITE_OK ){ rc = prepareAndCollectError(p->dbMain, &pIter->pIdxIter, &p->zErrmsg, @@ -162757,6 +166204,7 @@ static int rbuObjIterCacheTableInfo(sqlite3rbu *p, RbuObjIter *pIter){ pStmt = 0; if( p->rc==SQLITE_OK + && rbuIsVacuum(p)==0 && bRbuRowid!=(pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE) ){ p->rc = SQLITE_ERROR; @@ -162896,6 +166344,8 @@ static char *rbuObjIterGetIndexCols( for(i=0; pIter->abTblPk[i]==0; i++); assert( inTblCol ); zCol = pIter->azTblCol[i]; + }else if( rbuIsVacuum(p) ){ + zCol = "_rowid_"; }else{ zCol = "rbu_rowid"; } @@ -163436,7 +166886,7 @@ static int rbuObjIterPrepareAll( } /* And to delete index entries */ - if( p->rc==SQLITE_OK ){ + if( rbuIsVacuum(p)==0 && p->rc==SQLITE_OK ){ p->rc = prepareFreeAndCollectError( p->dbMain, &pIter->pDelete, &p->zErrmsg, sqlite3_mprintf("DELETE FROM \"rbu_imp_%w\" WHERE %s", zTbl, zWhere) @@ -163446,6 +166896,15 @@ static int rbuObjIterPrepareAll( /* Create the SELECT statement to read keys in sorted order */ if( p->rc==SQLITE_OK ){ char *zSql; + if( rbuIsVacuum(p) ){ + zSql = sqlite3_mprintf( + "SELECT %s, 0 AS rbu_control FROM '%q' ORDER BY %s%s", + zCollist, + pIter->zDataTbl, + zCollist, zLimit + ); + }else + if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ zSql = sqlite3_mprintf( "SELECT %s, rbu_control FROM %s.'rbu_tmp_%q' ORDER BY %s%s", @@ -163472,7 +166931,9 @@ static int rbuObjIterPrepareAll( sqlite3_free(zWhere); sqlite3_free(zBind); }else{ - int bRbuRowid = (pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE); + int bRbuRowid = (pIter->eType==RBU_PK_VTAB) + ||(pIter->eType==RBU_PK_NONE) + ||(pIter->eType==RBU_PK_EXTERNAL && rbuIsVacuum(p)); const char *zTbl = pIter->zTbl; /* Table this step applies to */ const char *zWrite; /* Imposter table name */ @@ -163499,8 +166960,10 @@ static int rbuObjIterPrepareAll( ); } - /* Create the DELETE statement to write to the target PK b-tree */ - if( p->rc==SQLITE_OK ){ + /* Create the DELETE statement to write to the target PK b-tree. + ** Because it only performs INSERT operations, this is not required for + ** an rbu vacuum handle. */ + if( rbuIsVacuum(p)==0 && p->rc==SQLITE_OK ){ p->rc = prepareFreeAndCollectError(p->dbMain, &pIter->pDelete, pz, sqlite3_mprintf( "DELETE FROM \"%s%w\" WHERE %s", zWrite, zTbl, zWhere @@ -163508,7 +166971,7 @@ static int rbuObjIterPrepareAll( ); } - if( pIter->abIndexed ){ + if( rbuIsVacuum(p)==0 && pIter->abIndexed ){ const char *zRbuRowid = ""; if( pIter->eType==RBU_PK_EXTERNAL || pIter->eType==RBU_PK_NONE ){ zRbuRowid = ", rbu_rowid"; @@ -163558,10 +167021,16 @@ static int rbuObjIterPrepareAll( /* Create the SELECT statement to read keys from data_xxx */ if( p->rc==SQLITE_OK ){ + const char *zRbuRowid = ""; + if( bRbuRowid ){ + zRbuRowid = rbuIsVacuum(p) ? ",_rowid_ " : ",rbu_rowid"; + } p->rc = prepareFreeAndCollectError(p->dbRbu, &pIter->pSelect, pz, sqlite3_mprintf( - "SELECT %s, rbu_control%s FROM '%q'%s", - zCollist, (bRbuRowid ? ", rbu_rowid" : ""), + "SELECT %s,%s rbu_control%s FROM '%q'%s", + zCollist, + (rbuIsVacuum(p) ? "0 AS " : ""), + zRbuRowid, pIter->zDataTbl, zLimit ) ); @@ -163656,11 +167125,15 @@ static int rbuGetUpdateStmt( return p->rc; } -static sqlite3 *rbuOpenDbhandle(sqlite3rbu *p, const char *zName){ +static sqlite3 *rbuOpenDbhandle( + sqlite3rbu *p, + const char *zName, + int bUseVfs +){ sqlite3 *db = 0; if( p->rc==SQLITE_OK ){ const int flags = SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE|SQLITE_OPEN_URI; - p->rc = sqlite3_open_v2(zName, &db, flags, p->zVfsName); + p->rc = sqlite3_open_v2(zName, &db, flags, bUseVfs ? p->zVfsName : 0); if( p->rc ){ p->zErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(db)); sqlite3_close(db); @@ -163670,6 +167143,95 @@ static sqlite3 *rbuOpenDbhandle(sqlite3rbu *p, const char *zName){ return db; } +/* +** Free an RbuState object allocated by rbuLoadState(). +*/ +static void rbuFreeState(RbuState *p){ + if( p ){ + sqlite3_free(p->zTbl); + sqlite3_free(p->zIdx); + sqlite3_free(p); + } +} + +/* +** Allocate an RbuState object and load the contents of the rbu_state +** table into it. Return a pointer to the new object. It is the +** responsibility of the caller to eventually free the object using +** sqlite3_free(). +** +** If an error occurs, leave an error code and message in the rbu handle +** and return NULL. +*/ +static RbuState *rbuLoadState(sqlite3rbu *p){ + RbuState *pRet = 0; + sqlite3_stmt *pStmt = 0; + int rc; + int rc2; + + pRet = (RbuState*)rbuMalloc(p, sizeof(RbuState)); + if( pRet==0 ) return 0; + + rc = prepareFreeAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg, + sqlite3_mprintf("SELECT k, v FROM %s.rbu_state", p->zStateDb) + ); + while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + switch( sqlite3_column_int(pStmt, 0) ){ + case RBU_STATE_STAGE: + pRet->eStage = sqlite3_column_int(pStmt, 1); + if( pRet->eStage!=RBU_STAGE_OAL + && pRet->eStage!=RBU_STAGE_MOVE + && pRet->eStage!=RBU_STAGE_CKPT + ){ + p->rc = SQLITE_CORRUPT; + } + break; + + case RBU_STATE_TBL: + pRet->zTbl = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc); + break; + + case RBU_STATE_IDX: + pRet->zIdx = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc); + break; + + case RBU_STATE_ROW: + pRet->nRow = sqlite3_column_int(pStmt, 1); + break; + + case RBU_STATE_PROGRESS: + pRet->nProgress = sqlite3_column_int64(pStmt, 1); + break; + + case RBU_STATE_CKPT: + pRet->iWalCksum = sqlite3_column_int64(pStmt, 1); + break; + + case RBU_STATE_COOKIE: + pRet->iCookie = (u32)sqlite3_column_int64(pStmt, 1); + break; + + case RBU_STATE_OALSZ: + pRet->iOalSz = (u32)sqlite3_column_int64(pStmt, 1); + break; + + case RBU_STATE_PHASEONESTEP: + pRet->nPhaseOneStep = sqlite3_column_int64(pStmt, 1); + break; + + default: + rc = SQLITE_CORRUPT; + break; + } + } + rc2 = sqlite3_finalize(pStmt); + if( rc==SQLITE_OK ) rc = rc2; + + p->rc = rc; + return pRet; +} + + /* ** Open the database handle and attach the RBU database as "rbu". If an ** error occurs, leave an error code and message in the RBU handle. @@ -163677,10 +167239,14 @@ static sqlite3 *rbuOpenDbhandle(sqlite3rbu *p, const char *zName){ static void rbuOpenDatabase(sqlite3rbu *p){ assert( p->rc==SQLITE_OK ); assert( p->dbMain==0 && p->dbRbu==0 ); + assert( rbuIsVacuum(p) || p->zTarget!=0 ); - p->eStage = 0; - p->dbMain = rbuOpenDbhandle(p, p->zTarget); - p->dbRbu = rbuOpenDbhandle(p, p->zRbu); + /* Open the RBU database */ + p->dbRbu = rbuOpenDbhandle(p, p->zRbu, 1); + + if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){ + sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p); + } /* If using separate RBU and state databases, attach the state database to ** the RBU db handle now. */ @@ -163691,6 +167257,96 @@ static void rbuOpenDatabase(sqlite3rbu *p){ memcpy(p->zStateDb, "main", 4); } +#if 0 + if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){ + p->rc = sqlite3_exec(p->dbRbu, "BEGIN", 0, 0, 0); + } +#endif + + /* If it has not already been created, create the rbu_state table */ + rbuMPrintfExec(p, p->dbRbu, RBU_CREATE_STATE, p->zStateDb); + +#if 0 + if( rbuIsVacuum(p) ){ + if( p->rc==SQLITE_OK ){ + int rc2; + int bOk = 0; + sqlite3_stmt *pCnt = 0; + p->rc = prepareAndCollectError(p->dbRbu, &pCnt, &p->zErrmsg, + "SELECT count(*) FROM stat.sqlite_master" + ); + if( p->rc==SQLITE_OK + && sqlite3_step(pCnt)==SQLITE_ROW + && 1==sqlite3_column_int(pCnt, 0) + ){ + bOk = 1; + } + rc2 = sqlite3_finalize(pCnt); + if( p->rc==SQLITE_OK ) p->rc = rc2; + + if( p->rc==SQLITE_OK && bOk==0 ){ + p->rc = SQLITE_ERROR; + p->zErrmsg = sqlite3_mprintf("invalid state database"); + } + + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_exec(p->dbRbu, "COMMIT", 0, 0, 0); + } + } + } +#endif + + if( p->rc==SQLITE_OK && rbuIsVacuum(p) ){ + int bOpen = 0; + int rc; + p->nRbu = 0; + p->pRbuFd = 0; + rc = sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p); + if( rc!=SQLITE_NOTFOUND ) p->rc = rc; + if( p->eStage>=RBU_STAGE_MOVE ){ + bOpen = 1; + }else{ + RbuState *pState = rbuLoadState(p); + if( pState ){ + bOpen = (pState->eStage>RBU_STAGE_MOVE); + rbuFreeState(pState); + } + } + if( bOpen ) p->dbMain = rbuOpenDbhandle(p, p->zRbu, p->nRbu<=1); + } + + p->eStage = 0; + if( p->rc==SQLITE_OK && p->dbMain==0 ){ + if( !rbuIsVacuum(p) ){ + p->dbMain = rbuOpenDbhandle(p, p->zTarget, 1); + }else if( p->pRbuFd->pWalFd ){ + p->rc = SQLITE_ERROR; + p->zErrmsg = sqlite3_mprintf("cannot vacuum wal mode database"); + }else{ + char *zTarget; + char *zExtra = 0; + if( strlen(p->zRbu)>=5 && 0==memcmp("file:", p->zRbu, 5) ){ + zExtra = &p->zRbu[5]; + while( *zExtra ){ + if( *zExtra++=='?' ) break; + } + if( *zExtra=='\0' ) zExtra = 0; + } + + zTarget = sqlite3_mprintf("file:%s-vacuum?rbu_memory=1%s%s", + sqlite3_db_filename(p->dbRbu, "main"), + (zExtra==0 ? "" : "&"), (zExtra==0 ? "" : zExtra) + ); + + if( zTarget==0 ){ + p->rc = SQLITE_NOMEM; + return; + } + p->dbMain = rbuOpenDbhandle(p, zTarget, p->nRbu<=1); + sqlite3_free(zTarget); + } + } + if( p->rc==SQLITE_OK ){ p->rc = sqlite3_create_function(p->dbMain, "rbu_tmp_insert", -1, SQLITE_UTF8, (void*)p, rbuTmpInsertFunc, 0, 0 @@ -163705,7 +167361,7 @@ static void rbuOpenDatabase(sqlite3rbu *p){ if( p->rc==SQLITE_OK ){ p->rc = sqlite3_create_function(p->dbRbu, - "rbu_target_name", 1, SQLITE_UTF8, (void*)p, rbuTargetNameFunc, 0, 0 + "rbu_target_name", -1, SQLITE_UTF8, (void*)p, rbuTargetNameFunc, 0, 0 ); } @@ -163754,9 +167410,9 @@ static void rbuFileSuffix3(const char *zBase, char *z){ #endif { int i, sz; - sz = sqlite3Strlen30(z); + sz = (int)strlen(z)&0xffffff; for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} - if( z[i]=='.' && ALWAYS(sz>i+4) ) memmove(&z[i+1], &z[sz-3], 4); + if( z[i]=='.' && sz>i+4 ) memmove(&z[i+1], &z[sz-3], 4); } #endif } @@ -163964,9 +167620,15 @@ static LPWSTR rbuWinUtf8ToUnicode(const char *zFilename){ */ static void rbuMoveOalFile(sqlite3rbu *p){ const char *zBase = sqlite3_db_filename(p->dbMain, "main"); + const char *zMove = zBase; + char *zOal; + char *zWal; - char *zWal = sqlite3_mprintf("%s-wal", zBase); - char *zOal = sqlite3_mprintf("%s-oal", zBase); + if( rbuIsVacuum(p) ){ + zMove = sqlite3_db_filename(p->dbRbu, "main"); + } + zOal = sqlite3_mprintf("%s-oal", zMove); + zWal = sqlite3_mprintf("%s-wal", zMove); assert( p->eStage==RBU_STAGE_MOVE ); assert( p->rc==SQLITE_OK && p->zErrmsg==0 ); @@ -163987,8 +167649,8 @@ static void rbuMoveOalFile(sqlite3rbu *p){ /* Re-open the databases. */ rbuObjIterFinalize(&p->objiter); - sqlite3_close(p->dbMain); sqlite3_close(p->dbRbu); + sqlite3_close(p->dbMain); p->dbMain = 0; p->dbRbu = 0; @@ -164150,19 +167812,24 @@ static void rbuStepOneOp(sqlite3rbu *p, int eType){ p->rc = sqlite3_bind_value(pWriter, i+1, pVal); if( p->rc ) return; } - if( pIter->zIdx==0 - && (pIter->eType==RBU_PK_VTAB || pIter->eType==RBU_PK_NONE) - ){ - /* For a virtual table, or a table with no primary key, the - ** SELECT statement is: - ** - ** SELECT , rbu_control, rbu_rowid FROM .... - ** - ** Hence column_value(pIter->nCol+1). - */ - assertColumnName(pIter->pSelect, pIter->nCol+1, "rbu_rowid"); - pVal = sqlite3_column_value(pIter->pSelect, pIter->nCol+1); - p->rc = sqlite3_bind_value(pWriter, pIter->nCol+1, pVal); + if( pIter->zIdx==0 ){ + if( pIter->eType==RBU_PK_VTAB + || pIter->eType==RBU_PK_NONE + || (pIter->eType==RBU_PK_EXTERNAL && rbuIsVacuum(p)) + ){ + /* For a virtual table, or a table with no primary key, the + ** SELECT statement is: + ** + ** SELECT , rbu_control, rbu_rowid FROM .... + ** + ** Hence column_value(pIter->nCol+1). + */ + assertColumnName(pIter->pSelect, pIter->nCol+1, + rbuIsVacuum(p) ? "rowid" : "rbu_rowid" + ); + pVal = sqlite3_column_value(pIter->pSelect, pIter->nCol+1); + p->rc = sqlite3_bind_value(pWriter, pIter->nCol+1, pVal); + } } if( p->rc==SQLITE_OK ){ sqlite3_step(pWriter); @@ -164241,13 +167908,18 @@ static int rbuStep(sqlite3rbu *p){ /* ** Increment the schema cookie of the main database opened by p->dbMain. +** +** Or, if this is an RBU vacuum, set the schema cookie of the main db +** opened by p->dbMain to one more than the schema cookie of the main +** db opened by p->dbRbu. */ static void rbuIncrSchemaCookie(sqlite3rbu *p){ if( p->rc==SQLITE_OK ){ + sqlite3 *dbread = (rbuIsVacuum(p) ? p->dbRbu : p->dbMain); int iCookie = 1000000; sqlite3_stmt *pStmt; - p->rc = prepareAndCollectError(p->dbMain, &pStmt, &p->zErrmsg, + p->rc = prepareAndCollectError(dbread, &pStmt, &p->zErrmsg, "PRAGMA schema_version" ); if( p->rc==SQLITE_OK ){ @@ -164275,6 +167947,7 @@ static void rbuIncrSchemaCookie(sqlite3rbu *p){ static void rbuSaveState(sqlite3rbu *p, int eStage){ if( p->rc==SQLITE_OK || p->rc==SQLITE_DONE ){ sqlite3_stmt *pInsert = 0; + rbu_file *pFd = (rbuIsVacuum(p) ? p->pRbuFd : p->pTargetFd); int rc; assert( p->zErrmsg==0 ); @@ -164297,7 +167970,7 @@ static void rbuSaveState(sqlite3rbu *p, int eStage){ RBU_STATE_ROW, p->nStep, RBU_STATE_PROGRESS, p->nProgress, RBU_STATE_CKPT, p->iWalCksum, - RBU_STATE_COOKIE, (i64)p->pTargetFd->iCookie, + RBU_STATE_COOKIE, (i64)pFd->iCookie, RBU_STATE_OALSZ, p->iOalSz, RBU_STATE_PHASEONESTEP, p->nPhaseOneStep ) @@ -164313,6 +167986,92 @@ static void rbuSaveState(sqlite3rbu *p, int eStage){ } +/* +** The second argument passed to this function is the name of a PRAGMA +** setting - "page_size", "auto_vacuum", "user_version" or "application_id". +** This function executes the following on sqlite3rbu.dbRbu: +** +** "PRAGMA main.$zPragma" +** +** where $zPragma is the string passed as the second argument, then +** on sqlite3rbu.dbMain: +** +** "PRAGMA main.$zPragma = $val" +** +** where $val is the value returned by the first PRAGMA invocation. +** +** In short, it copies the value of the specified PRAGMA setting from +** dbRbu to dbMain. +*/ +static void rbuCopyPragma(sqlite3rbu *p, const char *zPragma){ + if( p->rc==SQLITE_OK ){ + sqlite3_stmt *pPragma = 0; + p->rc = prepareFreeAndCollectError(p->dbRbu, &pPragma, &p->zErrmsg, + sqlite3_mprintf("PRAGMA main.%s", zPragma) + ); + if( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pPragma) ){ + p->rc = rbuMPrintfExec(p, p->dbMain, "PRAGMA main.%s = %d", + zPragma, sqlite3_column_int(pPragma, 0) + ); + } + rbuFinalize(p, pPragma); + } +} + +/* +** The RBU handle passed as the only argument has just been opened and +** the state database is empty. If this RBU handle was opened for an +** RBU vacuum operation, create the schema in the target db. +*/ +static void rbuCreateTargetSchema(sqlite3rbu *p){ + sqlite3_stmt *pSql = 0; + sqlite3_stmt *pInsert = 0; + + assert( rbuIsVacuum(p) ); + p->rc = sqlite3_exec(p->dbMain, "PRAGMA writable_schema=1", 0,0, &p->zErrmsg); + if( p->rc==SQLITE_OK ){ + p->rc = prepareAndCollectError(p->dbRbu, &pSql, &p->zErrmsg, + "SELECT sql FROM sqlite_master WHERE sql!='' AND rootpage!=0" + " AND name!='sqlite_sequence' " + " ORDER BY type DESC" + ); + } + + while( p->rc==SQLITE_OK && sqlite3_step(pSql)==SQLITE_ROW ){ + const char *zSql = (const char*)sqlite3_column_text(pSql, 0); + p->rc = sqlite3_exec(p->dbMain, zSql, 0, 0, &p->zErrmsg); + } + rbuFinalize(p, pSql); + if( p->rc!=SQLITE_OK ) return; + + if( p->rc==SQLITE_OK ){ + p->rc = prepareAndCollectError(p->dbRbu, &pSql, &p->zErrmsg, + "SELECT * FROM sqlite_master WHERE rootpage=0 OR rootpage IS NULL" + ); + } + + if( p->rc==SQLITE_OK ){ + p->rc = prepareAndCollectError(p->dbMain, &pInsert, &p->zErrmsg, + "INSERT INTO sqlite_master VALUES(?,?,?,?,?)" + ); + } + + while( p->rc==SQLITE_OK && sqlite3_step(pSql)==SQLITE_ROW ){ + int i; + for(i=0; i<5; i++){ + sqlite3_bind_value(pInsert, i+1, sqlite3_column_value(pSql, i)); + } + sqlite3_step(pInsert); + p->rc = sqlite3_reset(pInsert); + } + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_exec(p->dbMain, "PRAGMA writable_schema=0",0,0,&p->zErrmsg); + } + + rbuFinalize(p, pSql); + rbuFinalize(p, pInsert); +} + /* ** Step the RBU object. */ @@ -164321,13 +168080,22 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_step(sqlite3rbu *p){ switch( p->eStage ){ case RBU_STAGE_OAL: { RbuObjIter *pIter = &p->objiter; + + /* If this is an RBU vacuum operation and the state table was empty + ** when this handle was opened, create the target database schema. */ + if( rbuIsVacuum(p) && p->nProgress==0 && p->rc==SQLITE_OK ){ + rbuCreateTargetSchema(p); + rbuCopyPragma(p, "user_version"); + rbuCopyPragma(p, "application_id"); + } + while( p->rc==SQLITE_OK && pIter->zTbl ){ if( pIter->bCleanup ){ /* Clean up the rbu_tmp_xxx table for the previous table. It ** cannot be dropped as there are currently active SQL statements. ** But the contents can be deleted. */ - if( pIter->abIndexed ){ + if( rbuIsVacuum(p)==0 && pIter->abIndexed ){ rbuMPrintfExec(p, p->dbRbu, "DELETE FROM %s.'rbu_tmp_%q'", p->zStateDb, pIter->zDataTbl ); @@ -164414,94 +168182,6 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_step(sqlite3rbu *p){ } } -/* -** Free an RbuState object allocated by rbuLoadState(). -*/ -static void rbuFreeState(RbuState *p){ - if( p ){ - sqlite3_free(p->zTbl); - sqlite3_free(p->zIdx); - sqlite3_free(p); - } -} - -/* -** Allocate an RbuState object and load the contents of the rbu_state -** table into it. Return a pointer to the new object. It is the -** responsibility of the caller to eventually free the object using -** sqlite3_free(). -** -** If an error occurs, leave an error code and message in the rbu handle -** and return NULL. -*/ -static RbuState *rbuLoadState(sqlite3rbu *p){ - RbuState *pRet = 0; - sqlite3_stmt *pStmt = 0; - int rc; - int rc2; - - pRet = (RbuState*)rbuMalloc(p, sizeof(RbuState)); - if( pRet==0 ) return 0; - - rc = prepareFreeAndCollectError(p->dbRbu, &pStmt, &p->zErrmsg, - sqlite3_mprintf("SELECT k, v FROM %s.rbu_state", p->zStateDb) - ); - while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ - switch( sqlite3_column_int(pStmt, 0) ){ - case RBU_STATE_STAGE: - pRet->eStage = sqlite3_column_int(pStmt, 1); - if( pRet->eStage!=RBU_STAGE_OAL - && pRet->eStage!=RBU_STAGE_MOVE - && pRet->eStage!=RBU_STAGE_CKPT - ){ - p->rc = SQLITE_CORRUPT; - } - break; - - case RBU_STATE_TBL: - pRet->zTbl = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc); - break; - - case RBU_STATE_IDX: - pRet->zIdx = rbuStrndup((char*)sqlite3_column_text(pStmt, 1), &rc); - break; - - case RBU_STATE_ROW: - pRet->nRow = sqlite3_column_int(pStmt, 1); - break; - - case RBU_STATE_PROGRESS: - pRet->nProgress = sqlite3_column_int64(pStmt, 1); - break; - - case RBU_STATE_CKPT: - pRet->iWalCksum = sqlite3_column_int64(pStmt, 1); - break; - - case RBU_STATE_COOKIE: - pRet->iCookie = (u32)sqlite3_column_int64(pStmt, 1); - break; - - case RBU_STATE_OALSZ: - pRet->iOalSz = (u32)sqlite3_column_int64(pStmt, 1); - break; - - case RBU_STATE_PHASEONESTEP: - pRet->nPhaseOneStep = sqlite3_column_int64(pStmt, 1); - break; - - default: - rc = SQLITE_CORRUPT; - break; - } - } - rc2 = sqlite3_finalize(pStmt); - if( rc==SQLITE_OK ) rc = rc2; - - p->rc = rc; - return pRet; -} - /* ** Compare strings z1 and z2, returning 0 if they are identical, or non-zero ** otherwise. Either or both argument may be NULL. Two NULL values are @@ -164691,16 +168371,14 @@ static void rbuInitPhaseOneSteps(sqlite3rbu *p){ } } -/* -** Open and return a new RBU handle. -*/ -SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open( + +static sqlite3rbu *openRbuHandle( const char *zTarget, const char *zRbu, const char *zState ){ sqlite3rbu *p; - size_t nTarget = strlen(zTarget); + size_t nTarget = zTarget ? strlen(zTarget) : 0; size_t nRbu = strlen(zRbu); size_t nState = zState ? strlen(zState) : 0; size_t nByte = sizeof(sqlite3rbu) + nTarget+1 + nRbu+1+ nState+1; @@ -164713,22 +168391,24 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open( memset(p, 0, sizeof(sqlite3rbu)); rbuCreateVfs(p); - /* Open the target database */ + /* Open the target, RBU and state databases */ if( p->rc==SQLITE_OK ){ - p->zTarget = (char*)&p[1]; - memcpy(p->zTarget, zTarget, nTarget+1); - p->zRbu = &p->zTarget[nTarget+1]; + char *pCsr = (char*)&p[1]; + if( zTarget ){ + p->zTarget = pCsr; + memcpy(p->zTarget, zTarget, nTarget+1); + pCsr += nTarget+1; + } + p->zRbu = pCsr; memcpy(p->zRbu, zRbu, nRbu+1); + pCsr += nRbu+1; if( zState ){ - p->zState = &p->zRbu[nRbu+1]; + p->zState = pCsr; memcpy(p->zState, zState, nState+1); } rbuOpenDatabase(p); } - /* If it has not already been created, create the rbu_state table */ - rbuMPrintfExec(p, p->dbRbu, RBU_CREATE_STATE, p->zStateDb); - if( p->rc==SQLITE_OK ){ pState = rbuLoadState(p); assert( pState || p->rc!=SQLITE_OK ); @@ -164758,38 +168438,27 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open( } } - if( p->rc==SQLITE_OK + if( p->rc==SQLITE_OK && (p->eStage==RBU_STAGE_OAL || p->eStage==RBU_STAGE_MOVE) - && pState->eStage!=0 && p->pTargetFd->iCookie!=pState->iCookie - ){ - /* At this point (pTargetFd->iCookie) contains the value of the - ** change-counter cookie (the thing that gets incremented when a - ** transaction is committed in rollback mode) currently stored on - ** page 1 of the database file. */ - p->rc = SQLITE_BUSY; - p->zErrmsg = sqlite3_mprintf("database modified during rbu update"); + && pState->eStage!=0 + ){ + rbu_file *pFd = (rbuIsVacuum(p) ? p->pRbuFd : p->pTargetFd); + if( pFd->iCookie!=pState->iCookie ){ + /* At this point (pTargetFd->iCookie) contains the value of the + ** change-counter cookie (the thing that gets incremented when a + ** transaction is committed in rollback mode) currently stored on + ** page 1 of the database file. */ + p->rc = SQLITE_BUSY; + p->zErrmsg = sqlite3_mprintf("database modified during rbu %s", + (rbuIsVacuum(p) ? "vacuum" : "update") + ); + } } if( p->rc==SQLITE_OK ){ if( p->eStage==RBU_STAGE_OAL ){ sqlite3 *db = p->dbMain; - - /* Open transactions both databases. The *-oal file is opened or - ** created at this point. */ - p->rc = sqlite3_exec(db, "BEGIN IMMEDIATE", 0, 0, &p->zErrmsg); - if( p->rc==SQLITE_OK ){ - p->rc = sqlite3_exec(p->dbRbu, "BEGIN IMMEDIATE", 0, 0, &p->zErrmsg); - } - - /* Check if the main database is a zipvfs db. If it is, set the upper - ** level pager to use "journal_mode=off". This prevents it from - ** generating a large journal using a temp file. */ - if( p->rc==SQLITE_OK ){ - int frc = sqlite3_file_control(db, "main", SQLITE_FCNTL_ZIPVFS, 0); - if( frc==SQLITE_OK ){ - p->rc = sqlite3_exec(db, "PRAGMA journal_mode=off",0,0,&p->zErrmsg); - } - } + p->rc = sqlite3_exec(p->dbRbu, "BEGIN", 0, 0, &p->zErrmsg); /* Point the object iterator at the first object */ if( p->rc==SQLITE_OK ){ @@ -164800,12 +168469,34 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open( ** update finished. */ if( p->rc==SQLITE_OK && p->objiter.zTbl==0 ){ p->rc = SQLITE_DONE; - } + p->eStage = RBU_STAGE_DONE; + }else{ + if( p->rc==SQLITE_OK && pState->eStage==0 && rbuIsVacuum(p) ){ + rbuCopyPragma(p, "page_size"); + rbuCopyPragma(p, "auto_vacuum"); + } - if( p->rc==SQLITE_OK ){ - rbuSetupOal(p, pState); - } + /* Open transactions both databases. The *-oal file is opened or + ** created at this point. */ + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_exec(db, "BEGIN IMMEDIATE", 0, 0, &p->zErrmsg); + } + /* Check if the main database is a zipvfs db. If it is, set the upper + ** level pager to use "journal_mode=off". This prevents it from + ** generating a large journal using a temp file. */ + if( p->rc==SQLITE_OK ){ + int frc = sqlite3_file_control(db, "main", SQLITE_FCNTL_ZIPVFS, 0); + if( frc==SQLITE_OK ){ + p->rc = sqlite3_exec( + db, "PRAGMA journal_mode=off",0,0,&p->zErrmsg); + } + } + + if( p->rc==SQLITE_OK ){ + rbuSetupOal(p, pState); + } + } }else if( p->eStage==RBU_STAGE_MOVE ){ /* no-op */ }else if( p->eStage==RBU_STAGE_CKPT ){ @@ -164823,6 +168514,28 @@ SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open( return p; } +/* +** Open and return a new RBU handle. +*/ +SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_open( + const char *zTarget, + const char *zRbu, + const char *zState +){ + /* TODO: Check that zTarget and zRbu are non-NULL */ + return openRbuHandle(zTarget, zRbu, zState); +} + +/* +** Open a handle to begin or resume an RBU VACUUM operation. +*/ +SQLITE_API sqlite3rbu *SQLITE_STDCALL sqlite3rbu_vacuum( + const char *zTarget, + const char *zState +){ + /* TODO: Check that both arguments are non-NULL */ + return openRbuHandle(0, zTarget, zState); +} /* ** Return the database handle used by pRbu. @@ -164843,7 +168556,7 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3rbu_db(sqlite3rbu *pRbu, int bRbu){ */ static void rbuEditErrmsg(sqlite3rbu *p){ if( p->rc==SQLITE_CONSTRAINT && p->zErrmsg ){ - int i; + unsigned int i; size_t nErrmsg = strlen(p->zErrmsg); for(i=0; i<(nErrmsg-8); i++){ if( memcmp(&p->zErrmsg[i], "rbu_imp_", 8)==0 ){ @@ -164877,9 +168590,19 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_close(sqlite3rbu *p, char **pzErrmsg){ /* Close any open statement handles. */ rbuObjIterFinalize(&p->objiter); + /* If this is an RBU vacuum handle and the vacuum has either finished + ** successfully or encountered an error, delete the contents of the + ** state table. This causes the next call to sqlite3rbu_vacuum() + ** specifying the current target and state databases to start a new + ** vacuum from scratch. */ + if( rbuIsVacuum(p) && p->rc!=SQLITE_OK && p->dbRbu ){ + int rc2 = sqlite3_exec(p->dbRbu, "DELETE FROM stat.rbu_state", 0, 0, 0); + if( p->rc==SQLITE_DONE && rc2!=SQLITE_OK ) p->rc = rc2; + } + /* Close the open database handle and VFS object. */ - sqlite3_close(p->dbMain); sqlite3_close(p->dbRbu); + sqlite3_close(p->dbMain); rbuDeleteVfs(p); sqlite3_free(p->aBuf); sqlite3_free(p->aFrame); @@ -164940,9 +168663,39 @@ SQLITE_API void SQLITE_STDCALL sqlite3rbu_bp_progress(sqlite3rbu *p, int *pnOne, } } +/* +** Return the current state of the RBU vacuum or update operation. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3rbu_state(sqlite3rbu *p){ + int aRes[] = { + 0, SQLITE_RBU_STATE_OAL, SQLITE_RBU_STATE_MOVE, + 0, SQLITE_RBU_STATE_CHECKPOINT, SQLITE_RBU_STATE_DONE + }; + + assert( RBU_STAGE_OAL==1 ); + assert( RBU_STAGE_MOVE==2 ); + assert( RBU_STAGE_CKPT==4 ); + assert( RBU_STAGE_DONE==5 ); + assert( aRes[RBU_STAGE_OAL]==SQLITE_RBU_STATE_OAL ); + assert( aRes[RBU_STAGE_MOVE]==SQLITE_RBU_STATE_MOVE ); + assert( aRes[RBU_STAGE_CKPT]==SQLITE_RBU_STATE_CHECKPOINT ); + assert( aRes[RBU_STAGE_DONE]==SQLITE_RBU_STATE_DONE ); + + if( p->rc!=SQLITE_OK && p->rc!=SQLITE_DONE ){ + return SQLITE_RBU_STATE_ERROR; + }else{ + assert( p->rc!=SQLITE_DONE || p->eStage==RBU_STAGE_DONE ); + assert( p->eStage==RBU_STAGE_OAL + || p->eStage==RBU_STAGE_MOVE + || p->eStage==RBU_STAGE_CKPT + || p->eStage==RBU_STAGE_DONE + ); + return aRes[p->eStage]; + } +} + SQLITE_API int SQLITE_STDCALL sqlite3rbu_savestate(sqlite3rbu *p){ int rc = p->rc; - if( rc==SQLITE_DONE ) return SQLITE_OK; assert( p->eStage>=RBU_STAGE_OAL && p->eStage<=RBU_STAGE_DONE ); @@ -165081,6 +168834,22 @@ static u32 rbuGetU32(u8 *aBuf){ + ((u32)aBuf[3]); } +/* +** Write an unsigned 32-bit value in big-endian format to the supplied +** buffer. +*/ +static void rbuPutU32(u8 *aBuf, u32 iVal){ + aBuf[0] = (iVal >> 24) & 0xFF; + aBuf[1] = (iVal >> 16) & 0xFF; + aBuf[2] = (iVal >> 8) & 0xFF; + aBuf[3] = (iVal >> 0) & 0xFF; +} + +static void rbuPutU16(u8 *aBuf, u16 iVal){ + aBuf[0] = (iVal >> 8) & 0xFF; + aBuf[1] = (iVal >> 0) & 0xFF; +} + /* ** Read data from an rbuVfs-file. */ @@ -165106,6 +168875,35 @@ static int rbuVfsRead( memset(zBuf, 0, iAmt); }else{ rc = p->pReal->pMethods->xRead(p->pReal, zBuf, iAmt, iOfst); +#if 1 + /* If this is being called to read the first page of the target + ** database as part of an rbu vacuum operation, synthesize the + ** contents of the first page if it does not yet exist. Otherwise, + ** SQLite will not check for a *-wal file. */ + if( pRbu && rbuIsVacuum(pRbu) + && rc==SQLITE_IOERR_SHORT_READ && iOfst==0 + && (p->openFlags & SQLITE_OPEN_MAIN_DB) + && pRbu->rc==SQLITE_OK + ){ + sqlite3_file *pFd = (sqlite3_file*)pRbu->pRbuFd; + rc = pFd->pMethods->xRead(pFd, zBuf, iAmt, iOfst); + if( rc==SQLITE_OK ){ + u8 *aBuf = (u8*)zBuf; + u32 iRoot = rbuGetU32(&aBuf[52]) ? 1 : 0; + rbuPutU32(&aBuf[52], iRoot); /* largest root page number */ + rbuPutU32(&aBuf[36], 0); /* number of free pages */ + rbuPutU32(&aBuf[32], 0); /* first page on free list trunk */ + rbuPutU32(&aBuf[28], 1); /* size of db file in pages */ + rbuPutU32(&aBuf[24], pRbu->pRbuFd->iCookie+1); /* Change counter */ + + if( iAmt>100 ){ + memset(&aBuf[100], 0, iAmt-100); + rbuPutU16(&aBuf[105], iAmt & 0xFFFF); + aBuf[100] = 0x0D; + } + } + } +#endif } if( rc==SQLITE_OK && iOfst==0 && (p->openFlags & SQLITE_OPEN_MAIN_DB) ){ /* These look like magic numbers. But they are stable, as they are part @@ -165180,7 +168978,20 @@ static int rbuVfsSync(sqlite3_file *pFile, int flags){ */ static int rbuVfsFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){ rbu_file *p = (rbu_file *)pFile; - return p->pReal->pMethods->xFileSize(p->pReal, pSize); + int rc; + rc = p->pReal->pMethods->xFileSize(p->pReal, pSize); + + /* If this is an RBU vacuum operation and this is the target database, + ** pretend that it has at least one page. Otherwise, SQLite will not + ** check for the existance of a *-wal file. rbuVfsRead() contains + ** similar logic. */ + if( rc==SQLITE_OK && *pSize==0 + && p->pRbu && rbuIsVacuum(p->pRbu) + && (p->openFlags & SQLITE_OPEN_MAIN_DB) + ){ + *pSize = 1024; + } + return rc; } /* @@ -165192,7 +169003,9 @@ static int rbuVfsLock(sqlite3_file *pFile, int eLock){ int rc = SQLITE_OK; assert( p->openFlags & (SQLITE_OPEN_MAIN_DB|SQLITE_OPEN_TEMP_DB) ); - if( pRbu && eLock==SQLITE_LOCK_EXCLUSIVE && pRbu->eStage!=RBU_STAGE_DONE ){ + if( eLock==SQLITE_LOCK_EXCLUSIVE + && (p->bNolock || (pRbu && pRbu->eStage!=RBU_STAGE_DONE)) + ){ /* Do not allow EXCLUSIVE locks. Preventing SQLite from taking this ** prevents it from checkpointing the database from sqlite3_close(). */ rc = SQLITE_BUSY; @@ -165255,6 +169068,12 @@ static int rbuVfsFileControl(sqlite3_file *pFile, int op, void *pArg){ } return rc; } + else if( op==SQLITE_FCNTL_RBUCNT ){ + sqlite3rbu *pRbu = (sqlite3rbu*)pArg; + pRbu->nRbu++; + pRbu->pRbuFd = p; + p->bNolock = 1; + } rc = xControl(p->pReal, op, pArg); if( rc==SQLITE_OK && op==SQLITE_FCNTL_VFSNAME ){ @@ -165418,6 +169237,33 @@ static rbu_file *rbuFindMaindb(rbu_vfs *pRbuVfs, const char *zWal){ return pDb; } +/* +** A main database named zName has just been opened. The following +** function returns a pointer to a buffer owned by SQLite that contains +** the name of the *-wal file this db connection will use. SQLite +** happens to pass a pointer to this buffer when using xAccess() +** or xOpen() to operate on the *-wal file. +*/ +static const char *rbuMainToWal(const char *zName, int flags){ + int n = (int)strlen(zName); + const char *z = &zName[n]; + if( flags & SQLITE_OPEN_URI ){ + int odd = 0; + while( 1 ){ + if( z[0]==0 ){ + odd = 1 - odd; + if( odd && z[1]==0 ) break; + } + z++; + } + z += 2; + }else{ + while( *z==0 ) z++; + } + z += (n + 8 + 1); + return z; +} + /* ** Open an rbu file handle. */ @@ -165453,6 +169299,7 @@ static int rbuVfsOpen( rbu_file *pFd = (rbu_file *)pFile; int rc = SQLITE_OK; const char *zOpen = zName; + int oflags = flags; memset(pFd, 0, sizeof(rbu_file)); pFd->pReal = (sqlite3_file*)&pFd[1]; @@ -165465,23 +169312,7 @@ static int rbuVfsOpen( ** the name of the *-wal file this db connection will use. SQLite ** happens to pass a pointer to this buffer when using xAccess() ** or xOpen() to operate on the *-wal file. */ - int n = (int)strlen(zName); - const char *z = &zName[n]; - if( flags & SQLITE_OPEN_URI ){ - int odd = 0; - while( 1 ){ - if( z[0]==0 ){ - odd = 1 - odd; - if( odd && z[1]==0 ) break; - } - z++; - } - z += 2; - }else{ - while( *z==0 ) z++; - } - z += (n + 8 + 1); - pFd->zWal = z; + pFd->zWal = rbuMainToWal(zName, flags); } else if( flags & SQLITE_OPEN_WAL ){ rbu_file *pDb = rbuFindMaindb(pRbuVfs, zName); @@ -165491,10 +169322,17 @@ static int rbuVfsOpen( ** code ensures that the string passed to xOpen() is terminated by a ** pair of '\0' bytes in case the VFS attempts to extract a URI ** parameter from it. */ - size_t nCopy = strlen(zName); - char *zCopy = sqlite3_malloc64(nCopy+2); + const char *zBase = zName; + size_t nCopy; + char *zCopy; + if( rbuIsVacuum(pDb->pRbu) ){ + zBase = sqlite3_db_filename(pDb->pRbu->dbRbu, "main"); + zBase = rbuMainToWal(zBase, SQLITE_OPEN_URI); + } + nCopy = strlen(zBase); + zCopy = sqlite3_malloc64(nCopy+2); if( zCopy ){ - memcpy(zCopy, zName, nCopy); + memcpy(zCopy, zBase, nCopy); zCopy[nCopy-3] = 'o'; zCopy[nCopy] = '\0'; zCopy[nCopy+1] = '\0'; @@ -165509,8 +169347,17 @@ static int rbuVfsOpen( } } + if( oflags & SQLITE_OPEN_MAIN_DB + && sqlite3_uri_boolean(zName, "rbu_memory", 0) + ){ + assert( oflags & SQLITE_OPEN_MAIN_DB ); + oflags = SQLITE_OPEN_TEMP_DB | SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | + SQLITE_OPEN_EXCLUSIVE | SQLITE_OPEN_DELETEONCLOSE; + zOpen = 0; + } + if( rc==SQLITE_OK ){ - rc = pRealVfs->xOpen(pRealVfs, zOpen, pFd->pReal, flags, pOutFlags); + rc = pRealVfs->xOpen(pRealVfs, zOpen, pFd->pReal, oflags, pOutFlags); } if( pFd->pReal->pMethods ){ /* The xOpen() operation has succeeded. Set the sqlite3_file.pMethods @@ -165830,10 +169677,10 @@ SQLITE_API int SQLITE_STDCALL sqlite3rbu_create_vfs(const char *zName, const cha */ #define VTAB_SCHEMA \ "CREATE TABLE xx( " \ - " name STRING, /* Name of table or index */" \ - " path INTEGER, /* Path to page from root */" \ + " name TEXT, /* Name of table or index */" \ + " path TEXT, /* Path to page from root */" \ " pageno INTEGER, /* Page number */" \ - " pagetype STRING, /* 'internal', 'leaf' or 'overflow' */" \ + " pagetype TEXT, /* 'internal', 'leaf' or 'overflow' */" \ " ncell INTEGER, /* Cells on page (0 for overflow) */" \ " payload INTEGER, /* Bytes of payload on this page */" \ " unused INTEGER, /* Bytes of unused space on this page */" \ @@ -166474,6 +170321,4650 @@ SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){ return SQLITE_OK; } #endif /* SQLITE_ENABLE_DBSTAT_VTAB */ /************** End of dbstat.c **********************************************/ +/************** Begin file sqlite3session.c **********************************/ + +#if defined(SQLITE_ENABLE_SESSION) && defined(SQLITE_ENABLE_PREUPDATE_HOOK) +/* #include "sqlite3session.h" */ +/* #include */ +/* #include */ + +#ifndef SQLITE_AMALGAMATION +/* # include "sqliteInt.h" */ +/* # include "vdbeInt.h" */ +#endif + +typedef struct SessionTable SessionTable; +typedef struct SessionChange SessionChange; +typedef struct SessionBuffer SessionBuffer; +typedef struct SessionInput SessionInput; + +/* +** Minimum chunk size used by streaming versions of functions. +*/ +#ifndef SESSIONS_STRM_CHUNK_SIZE +# ifdef SQLITE_TEST +# define SESSIONS_STRM_CHUNK_SIZE 64 +# else +# define SESSIONS_STRM_CHUNK_SIZE 1024 +# endif +#endif + +typedef struct SessionHook SessionHook; +struct SessionHook { + void *pCtx; + int (*xOld)(void*,int,sqlite3_value**); + int (*xNew)(void*,int,sqlite3_value**); + int (*xCount)(void*); + int (*xDepth)(void*); +}; + +/* +** Session handle structure. +*/ +struct sqlite3_session { + sqlite3 *db; /* Database handle session is attached to */ + char *zDb; /* Name of database session is attached to */ + int bEnable; /* True if currently recording */ + int bIndirect; /* True if all changes are indirect */ + int bAutoAttach; /* True to auto-attach tables */ + int rc; /* Non-zero if an error has occurred */ + void *pFilterCtx; /* First argument to pass to xTableFilter */ + int (*xTableFilter)(void *pCtx, const char *zTab); + sqlite3_session *pNext; /* Next session object on same db. */ + SessionTable *pTable; /* List of attached tables */ + SessionHook hook; /* APIs to grab new and old data with */ +}; + +/* +** Instances of this structure are used to build strings or binary records. +*/ +struct SessionBuffer { + u8 *aBuf; /* Pointer to changeset buffer */ + int nBuf; /* Size of buffer aBuf */ + int nAlloc; /* Size of allocation containing aBuf */ +}; + +/* +** An object of this type is used internally as an abstraction for +** input data. Input data may be supplied either as a single large buffer +** (e.g. sqlite3changeset_start()) or using a stream function (e.g. +** sqlite3changeset_start_strm()). +*/ +struct SessionInput { + int bNoDiscard; /* If true, discard no data */ + int iCurrent; /* Offset in aData[] of current change */ + int iNext; /* Offset in aData[] of next change */ + u8 *aData; /* Pointer to buffer containing changeset */ + int nData; /* Number of bytes in aData */ + + SessionBuffer buf; /* Current read buffer */ + int (*xInput)(void*, void*, int*); /* Input stream call (or NULL) */ + void *pIn; /* First argument to xInput */ + int bEof; /* Set to true after xInput finished */ +}; + +/* +** Structure for changeset iterators. +*/ +struct sqlite3_changeset_iter { + SessionInput in; /* Input buffer or stream */ + SessionBuffer tblhdr; /* Buffer to hold apValue/zTab/abPK/ */ + int bPatchset; /* True if this is a patchset */ + int rc; /* Iterator error code */ + sqlite3_stmt *pConflict; /* Points to conflicting row, if any */ + char *zTab; /* Current table */ + int nCol; /* Number of columns in zTab */ + int op; /* Current operation */ + int bIndirect; /* True if current change was indirect */ + u8 *abPK; /* Primary key array */ + sqlite3_value **apValue; /* old.* and new.* values */ +}; + +/* +** Each session object maintains a set of the following structures, one +** for each table the session object is monitoring. The structures are +** stored in a linked list starting at sqlite3_session.pTable. +** +** The keys of the SessionTable.aChange[] hash table are all rows that have +** been modified in any way since the session object was attached to the +** table. +** +** The data associated with each hash-table entry is a structure containing +** a subset of the initial values that the modified row contained at the +** start of the session. Or no initial values if the row was inserted. +*/ +struct SessionTable { + SessionTable *pNext; + char *zName; /* Local name of table */ + int nCol; /* Number of columns in table zName */ + const char **azCol; /* Column names */ + u8 *abPK; /* Array of primary key flags */ + int nEntry; /* Total number of entries in hash table */ + int nChange; /* Size of apChange[] array */ + SessionChange **apChange; /* Hash table buckets */ +}; + +/* +** RECORD FORMAT: +** +** The following record format is similar to (but not compatible with) that +** used in SQLite database files. This format is used as part of the +** change-set binary format, and so must be architecture independent. +** +** Unlike the SQLite database record format, each field is self-contained - +** there is no separation of header and data. Each field begins with a +** single byte describing its type, as follows: +** +** 0x00: Undefined value. +** 0x01: Integer value. +** 0x02: Real value. +** 0x03: Text value. +** 0x04: Blob value. +** 0x05: SQL NULL value. +** +** Note that the above match the definitions of SQLITE_INTEGER, SQLITE_TEXT +** and so on in sqlite3.h. For undefined and NULL values, the field consists +** only of the single type byte. For other types of values, the type byte +** is followed by: +** +** Text values: +** A varint containing the number of bytes in the value (encoded using +** UTF-8). Followed by a buffer containing the UTF-8 representation +** of the text value. There is no nul terminator. +** +** Blob values: +** A varint containing the number of bytes in the value, followed by +** a buffer containing the value itself. +** +** Integer values: +** An 8-byte big-endian integer value. +** +** Real values: +** An 8-byte big-endian IEEE 754-2008 real value. +** +** Varint values are encoded in the same way as varints in the SQLite +** record format. +** +** CHANGESET FORMAT: +** +** A changeset is a collection of DELETE, UPDATE and INSERT operations on +** one or more tables. Operations on a single table are grouped together, +** but may occur in any order (i.e. deletes, updates and inserts are all +** mixed together). +** +** Each group of changes begins with a table header: +** +** 1 byte: Constant 0x54 (capital 'T') +** Varint: Number of columns in the table. +** nCol bytes: 0x01 for PK columns, 0x00 otherwise. +** N bytes: Unqualified table name (encoded using UTF-8). Nul-terminated. +** +** Followed by one or more changes to the table. +** +** 1 byte: Either SQLITE_INSERT (0x12), UPDATE (0x17) or DELETE (0x09). +** 1 byte: The "indirect-change" flag. +** old.* record: (delete and update only) +** new.* record: (insert and update only) +** +** The "old.*" and "new.*" records, if present, are N field records in the +** format described above under "RECORD FORMAT", where N is the number of +** columns in the table. The i'th field of each record is associated with +** the i'th column of the table, counting from left to right in the order +** in which columns were declared in the CREATE TABLE statement. +** +** The new.* record that is part of each INSERT change contains the values +** that make up the new row. Similarly, the old.* record that is part of each +** DELETE change contains the values that made up the row that was deleted +** from the database. In the changeset format, the records that are part +** of INSERT or DELETE changes never contain any undefined (type byte 0x00) +** fields. +** +** Within the old.* record associated with an UPDATE change, all fields +** associated with table columns that are not PRIMARY KEY columns and are +** not modified by the UPDATE change are set to "undefined". Other fields +** are set to the values that made up the row before the UPDATE that the +** change records took place. Within the new.* record, fields associated +** with table columns modified by the UPDATE change contain the new +** values. Fields associated with table columns that are not modified +** are set to "undefined". +** +** PATCHSET FORMAT: +** +** A patchset is also a collection of changes. It is similar to a changeset, +** but leaves undefined those fields that are not useful if no conflict +** resolution is required when applying the changeset. +** +** Each group of changes begins with a table header: +** +** 1 byte: Constant 0x50 (capital 'P') +** Varint: Number of columns in the table. +** nCol bytes: 0x01 for PK columns, 0x00 otherwise. +** N bytes: Unqualified table name (encoded using UTF-8). Nul-terminated. +** +** Followed by one or more changes to the table. +** +** 1 byte: Either SQLITE_INSERT (0x12), UPDATE (0x17) or DELETE (0x09). +** 1 byte: The "indirect-change" flag. +** single record: (PK fields for DELETE, PK and modified fields for UPDATE, +** full record for INSERT). +** +** As in the changeset format, each field of the single record that is part +** of a patchset change is associated with the correspondingly positioned +** table column, counting from left to right within the CREATE TABLE +** statement. +** +** For a DELETE change, all fields within the record except those associated +** with PRIMARY KEY columns are set to "undefined". The PRIMARY KEY fields +** contain the values identifying the row to delete. +** +** For an UPDATE change, all fields except those associated with PRIMARY KEY +** columns and columns that are modified by the UPDATE are set to "undefined". +** PRIMARY KEY fields contain the values identifying the table row to update, +** and fields associated with modified columns contain the new column values. +** +** The records associated with INSERT changes are in the same format as for +** changesets. It is not possible for a record associated with an INSERT +** change to contain a field set to "undefined". +*/ + +/* +** For each row modified during a session, there exists a single instance of +** this structure stored in a SessionTable.aChange[] hash table. +*/ +struct SessionChange { + int op; /* One of UPDATE, DELETE, INSERT */ + int bIndirect; /* True if this change is "indirect" */ + int nRecord; /* Number of bytes in buffer aRecord[] */ + u8 *aRecord; /* Buffer containing old.* record */ + SessionChange *pNext; /* For hash-table collisions */ +}; + +/* +** Write a varint with value iVal into the buffer at aBuf. Return the +** number of bytes written. +*/ +static int sessionVarintPut(u8 *aBuf, int iVal){ + return putVarint32(aBuf, iVal); +} + +/* +** Return the number of bytes required to store value iVal as a varint. +*/ +static int sessionVarintLen(int iVal){ + return sqlite3VarintLen(iVal); +} + +/* +** Read a varint value from aBuf[] into *piVal. Return the number of +** bytes read. +*/ +static int sessionVarintGet(u8 *aBuf, int *piVal){ + return getVarint32(aBuf, *piVal); +} + +/* Load an unaligned and unsigned 32-bit integer */ +#define SESSION_UINT32(x) (((u32)(x)[0]<<24)|((x)[1]<<16)|((x)[2]<<8)|(x)[3]) + +/* +** Read a 64-bit big-endian integer value from buffer aRec[]. Return +** the value read. +*/ +static sqlite3_int64 sessionGetI64(u8 *aRec){ + u64 x = SESSION_UINT32(aRec); + u32 y = SESSION_UINT32(aRec+4); + x = (x<<32) + y; + return (sqlite3_int64)x; +} + +/* +** Write a 64-bit big-endian integer value to the buffer aBuf[]. +*/ +static void sessionPutI64(u8 *aBuf, sqlite3_int64 i){ + aBuf[0] = (i>>56) & 0xFF; + aBuf[1] = (i>>48) & 0xFF; + aBuf[2] = (i>>40) & 0xFF; + aBuf[3] = (i>>32) & 0xFF; + aBuf[4] = (i>>24) & 0xFF; + aBuf[5] = (i>>16) & 0xFF; + aBuf[6] = (i>> 8) & 0xFF; + aBuf[7] = (i>> 0) & 0xFF; +} + +/* +** This function is used to serialize the contents of value pValue (see +** comment titled "RECORD FORMAT" above). +** +** If it is non-NULL, the serialized form of the value is written to +** buffer aBuf. *pnWrite is set to the number of bytes written before +** returning. Or, if aBuf is NULL, the only thing this function does is +** set *pnWrite. +** +** If no error occurs, SQLITE_OK is returned. Or, if an OOM error occurs +** within a call to sqlite3_value_text() (may fail if the db is utf-16)) +** SQLITE_NOMEM is returned. +*/ +static int sessionSerializeValue( + u8 *aBuf, /* If non-NULL, write serialized value here */ + sqlite3_value *pValue, /* Value to serialize */ + int *pnWrite /* IN/OUT: Increment by bytes written */ +){ + int nByte; /* Size of serialized value in bytes */ + + if( pValue ){ + int eType; /* Value type (SQLITE_NULL, TEXT etc.) */ + + eType = sqlite3_value_type(pValue); + if( aBuf ) aBuf[0] = eType; + + switch( eType ){ + case SQLITE_NULL: + nByte = 1; + break; + + case SQLITE_INTEGER: + case SQLITE_FLOAT: + if( aBuf ){ + /* TODO: SQLite does something special to deal with mixed-endian + ** floating point values (e.g. ARM7). This code probably should + ** too. */ + u64 i; + if( eType==SQLITE_INTEGER ){ + i = (u64)sqlite3_value_int64(pValue); + }else{ + double r; + assert( sizeof(double)==8 && sizeof(u64)==8 ); + r = sqlite3_value_double(pValue); + memcpy(&i, &r, 8); + } + sessionPutI64(&aBuf[1], i); + } + nByte = 9; + break; + + default: { + u8 *z; + int n; + int nVarint; + + assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB ); + if( eType==SQLITE_TEXT ){ + z = (u8 *)sqlite3_value_text(pValue); + }else{ + z = (u8 *)sqlite3_value_blob(pValue); + } + n = sqlite3_value_bytes(pValue); + if( z==0 && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM; + nVarint = sessionVarintLen(n); + + if( aBuf ){ + sessionVarintPut(&aBuf[1], n); + memcpy(&aBuf[nVarint + 1], eType==SQLITE_TEXT ? + sqlite3_value_text(pValue) : sqlite3_value_blob(pValue), n + ); + } + + nByte = 1 + nVarint + n; + break; + } + } + }else{ + nByte = 1; + if( aBuf ) aBuf[0] = '\0'; + } + + if( pnWrite ) *pnWrite += nByte; + return SQLITE_OK; +} + + +/* +** This macro is used to calculate hash key values for data structures. In +** order to use this macro, the entire data structure must be represented +** as a series of unsigned integers. In order to calculate a hash-key value +** for a data structure represented as three such integers, the macro may +** then be used as follows: +** +** int hash_key_value; +** hash_key_value = HASH_APPEND(0, ); +** hash_key_value = HASH_APPEND(hash_key_value, ); +** hash_key_value = HASH_APPEND(hash_key_value, ); +** +** In practice, the data structures this macro is used for are the primary +** key values of modified rows. +*/ +#define HASH_APPEND(hash, add) ((hash) << 3) ^ (hash) ^ (unsigned int)(add) + +/* +** Append the hash of the 64-bit integer passed as the second argument to the +** hash-key value passed as the first. Return the new hash-key value. +*/ +static unsigned int sessionHashAppendI64(unsigned int h, i64 i){ + h = HASH_APPEND(h, i & 0xFFFFFFFF); + return HASH_APPEND(h, (i>>32)&0xFFFFFFFF); +} + +/* +** Append the hash of the blob passed via the second and third arguments to +** the hash-key value passed as the first. Return the new hash-key value. +*/ +static unsigned int sessionHashAppendBlob(unsigned int h, int n, const u8 *z){ + int i; + for(i=0; inCol==pSession->hook.xCount(pSession->hook.pCtx) ); + for(i=0; inCol; i++){ + if( pTab->abPK[i] ){ + int rc; + int eType; + sqlite3_value *pVal; + + if( bNew ){ + rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); + }else{ + rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); + } + if( rc!=SQLITE_OK ) return rc; + + eType = sqlite3_value_type(pVal); + h = sessionHashAppendType(h, eType); + if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + i64 iVal; + if( eType==SQLITE_INTEGER ){ + iVal = sqlite3_value_int64(pVal); + }else{ + double rVal = sqlite3_value_double(pVal); + assert( sizeof(iVal)==8 && sizeof(rVal)==8 ); + memcpy(&iVal, &rVal, 8); + } + h = sessionHashAppendI64(h, iVal); + }else if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){ + const u8 *z; + int n; + if( eType==SQLITE_TEXT ){ + z = (const u8 *)sqlite3_value_text(pVal); + }else{ + z = (const u8 *)sqlite3_value_blob(pVal); + } + n = sqlite3_value_bytes(pVal); + if( !z && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM; + h = sessionHashAppendBlob(h, n, z); + }else{ + assert( eType==SQLITE_NULL ); + *pbNullPK = 1; + } + } + } + + *piHash = (h % pTab->nChange); + return SQLITE_OK; +} + +/* +** The buffer that the argument points to contains a serialized SQL value. +** Return the number of bytes of space occupied by the value (including +** the type byte). +*/ +static int sessionSerialLen(u8 *a){ + int e = *a; + int n; + if( e==0 ) return 1; + if( e==SQLITE_NULL ) return 1; + if( e==SQLITE_INTEGER || e==SQLITE_FLOAT ) return 9; + return sessionVarintGet(&a[1], &n) + 1 + n; +} + +/* +** Based on the primary key values stored in change aRecord, calculate a +** hash key. Assume the has table has nBucket buckets. The hash keys +** calculated by this function are compatible with those calculated by +** sessionPreupdateHash(). +** +** The bPkOnly argument is non-zero if the record at aRecord[] is from +** a patchset DELETE. In this case the non-PK fields are omitted entirely. +*/ +static unsigned int sessionChangeHash( + SessionTable *pTab, /* Table handle */ + int bPkOnly, /* Record consists of PK fields only */ + u8 *aRecord, /* Change record */ + int nBucket /* Assume this many buckets in hash table */ +){ + unsigned int h = 0; /* Value to return */ + int i; /* Used to iterate through columns */ + u8 *a = aRecord; /* Used to iterate through change record */ + + for(i=0; inCol; i++){ + int eType = *a; + int isPK = pTab->abPK[i]; + if( bPkOnly && isPK==0 ) continue; + + /* It is not possible for eType to be SQLITE_NULL here. The session + ** module does not record changes for rows with NULL values stored in + ** primary key columns. */ + assert( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT + || eType==SQLITE_TEXT || eType==SQLITE_BLOB + || eType==SQLITE_NULL || eType==0 + ); + assert( !isPK || (eType!=0 && eType!=SQLITE_NULL) ); + + if( isPK ){ + a++; + h = sessionHashAppendType(h, eType); + if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + h = sessionHashAppendI64(h, sessionGetI64(a)); + a += 8; + }else{ + int n; + a += sessionVarintGet(a, &n); + h = sessionHashAppendBlob(h, n, a); + a += n; + } + }else{ + a += sessionSerialLen(a); + } + } + return (h % nBucket); +} + +/* +** Arguments aLeft and aRight are pointers to change records for table pTab. +** This function returns true if the two records apply to the same row (i.e. +** have the same values stored in the primary key columns), or false +** otherwise. +*/ +static int sessionChangeEqual( + SessionTable *pTab, /* Table used for PK definition */ + int bLeftPkOnly, /* True if aLeft[] contains PK fields only */ + u8 *aLeft, /* Change record */ + int bRightPkOnly, /* True if aRight[] contains PK fields only */ + u8 *aRight /* Change record */ +){ + u8 *a1 = aLeft; /* Cursor to iterate through aLeft */ + u8 *a2 = aRight; /* Cursor to iterate through aRight */ + int iCol; /* Used to iterate through table columns */ + + for(iCol=0; iColnCol; iCol++){ + if( pTab->abPK[iCol] ){ + int n1 = sessionSerialLen(a1); + int n2 = sessionSerialLen(a2); + + if( pTab->abPK[iCol] && (n1!=n2 || memcmp(a1, a2, n1)) ){ + return 0; + } + a1 += n1; + a2 += n2; + }else{ + if( bLeftPkOnly==0 ) a1 += sessionSerialLen(a1); + if( bRightPkOnly==0 ) a2 += sessionSerialLen(a2); + } + } + + return 1; +} + +/* +** Arguments aLeft and aRight both point to buffers containing change +** records with nCol columns. This function "merges" the two records into +** a single records which is written to the buffer at *paOut. *paOut is +** then set to point to one byte after the last byte written before +** returning. +** +** The merging of records is done as follows: For each column, if the +** aRight record contains a value for the column, copy the value from +** their. Otherwise, if aLeft contains a value, copy it. If neither +** record contains a value for a given column, then neither does the +** output record. +*/ +static void sessionMergeRecord( + u8 **paOut, + int nCol, + u8 *aLeft, + u8 *aRight +){ + u8 *a1 = aLeft; /* Cursor used to iterate through aLeft */ + u8 *a2 = aRight; /* Cursor used to iterate through aRight */ + u8 *aOut = *paOut; /* Output cursor */ + int iCol; /* Used to iterate from 0 to nCol */ + + for(iCol=0; iColnCol; i++){ + int nOld; + u8 *aOld; + int nNew; + u8 *aNew; + + aOld = sessionMergeValue(&aOld1, &aOld2, &nOld); + aNew = sessionMergeValue(&aNew1, &aNew2, &nNew); + if( pTab->abPK[i] || nOld!=nNew || memcmp(aOld, aNew, nNew) ){ + if( pTab->abPK[i]==0 ) bRequired = 1; + memcpy(aOut, aOld, nOld); + aOut += nOld; + }else{ + *(aOut++) = '\0'; + } + } + + if( !bRequired ) return 0; + } + + /* Write the new.* vector */ + aOld1 = aOldRecord1; + aOld2 = aOldRecord2; + aNew1 = aNewRecord1; + aNew2 = aNewRecord2; + for(i=0; inCol; i++){ + int nOld; + u8 *aOld; + int nNew; + u8 *aNew; + + aOld = sessionMergeValue(&aOld1, &aOld2, &nOld); + aNew = sessionMergeValue(&aNew1, &aNew2, &nNew); + if( bPatchset==0 + && (pTab->abPK[i] || (nOld==nNew && 0==memcmp(aOld, aNew, nNew))) + ){ + *(aOut++) = '\0'; + }else{ + memcpy(aOut, aNew, nNew); + aOut += nNew; + } + } + + *paOut = aOut; + return 1; +} + +/* +** This function is only called from within a pre-update-hook callback. +** It determines if the current pre-update-hook change affects the same row +** as the change stored in argument pChange. If so, it returns true. Otherwise +** if the pre-update-hook does not affect the same row as pChange, it returns +** false. +*/ +static int sessionPreupdateEqual( + sqlite3_session *pSession, /* Session object that owns SessionTable */ + SessionTable *pTab, /* Table associated with change */ + SessionChange *pChange, /* Change to compare to */ + int op /* Current pre-update operation */ +){ + int iCol; /* Used to iterate through columns */ + u8 *a = pChange->aRecord; /* Cursor used to scan change record */ + + assert( op==SQLITE_INSERT || op==SQLITE_UPDATE || op==SQLITE_DELETE ); + for(iCol=0; iColnCol; iCol++){ + if( !pTab->abPK[iCol] ){ + a += sessionSerialLen(a); + }else{ + sqlite3_value *pVal; /* Value returned by preupdate_new/old */ + int rc; /* Error code from preupdate_new/old */ + int eType = *a++; /* Type of value from change record */ + + /* The following calls to preupdate_new() and preupdate_old() can not + ** fail. This is because they cache their return values, and by the + ** time control flows to here they have already been called once from + ** within sessionPreupdateHash(). The first two asserts below verify + ** this (that the method has already been called). */ + if( op==SQLITE_INSERT ){ + /* assert( db->pPreUpdate->pNewUnpacked || db->pPreUpdate->aNew ); */ + rc = pSession->hook.xNew(pSession->hook.pCtx, iCol, &pVal); + }else{ + /* assert( db->pPreUpdate->pUnpacked ); */ + rc = pSession->hook.xOld(pSession->hook.pCtx, iCol, &pVal); + } + assert( rc==SQLITE_OK ); + if( sqlite3_value_type(pVal)!=eType ) return 0; + + /* A SessionChange object never has a NULL value in a PK column */ + assert( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT + || eType==SQLITE_BLOB || eType==SQLITE_TEXT + ); + + if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + i64 iVal = sessionGetI64(a); + a += 8; + if( eType==SQLITE_INTEGER ){ + if( sqlite3_value_int64(pVal)!=iVal ) return 0; + }else{ + double rVal; + assert( sizeof(iVal)==8 && sizeof(rVal)==8 ); + memcpy(&rVal, &iVal, 8); + if( sqlite3_value_double(pVal)!=rVal ) return 0; + } + }else{ + int n; + const u8 *z; + a += sessionVarintGet(a, &n); + if( sqlite3_value_bytes(pVal)!=n ) return 0; + if( eType==SQLITE_TEXT ){ + z = sqlite3_value_text(pVal); + }else{ + z = sqlite3_value_blob(pVal); + } + if( memcmp(a, z, n) ) return 0; + a += n; + break; + } + } + } + + return 1; +} + +/* +** If required, grow the hash table used to store changes on table pTab +** (part of the session pSession). If a fatal OOM error occurs, set the +** session object to failed and return SQLITE_ERROR. Otherwise, return +** SQLITE_OK. +** +** It is possible that a non-fatal OOM error occurs in this function. In +** that case the hash-table does not grow, but SQLITE_OK is returned anyway. +** Growing the hash table in this case is a performance optimization only, +** it is not required for correct operation. +*/ +static int sessionGrowHash(int bPatchset, SessionTable *pTab){ + if( pTab->nChange==0 || pTab->nEntry>=(pTab->nChange/2) ){ + int i; + SessionChange **apNew; + int nNew = (pTab->nChange ? pTab->nChange : 128) * 2; + + apNew = (SessionChange **)sqlite3_malloc(sizeof(SessionChange *) * nNew); + if( apNew==0 ){ + if( pTab->nChange==0 ){ + return SQLITE_ERROR; + } + return SQLITE_OK; + } + memset(apNew, 0, sizeof(SessionChange *) * nNew); + + for(i=0; inChange; i++){ + SessionChange *p; + SessionChange *pNext; + for(p=pTab->apChange[i]; p; p=pNext){ + int bPkOnly = (p->op==SQLITE_DELETE && bPatchset); + int iHash = sessionChangeHash(pTab, bPkOnly, p->aRecord, nNew); + pNext = p->pNext; + p->pNext = apNew[iHash]; + apNew[iHash] = p; + } + } + + sqlite3_free(pTab->apChange); + pTab->nChange = nNew; + pTab->apChange = apNew; + } + + return SQLITE_OK; +} + +/* +** This function queries the database for the names of the columns of table +** zThis, in schema zDb. It is expected that the table has nCol columns. If +** not, SQLITE_SCHEMA is returned and none of the output variables are +** populated. +** +** Otherwise, if they are not NULL, variable *pnCol is set to the number +** of columns in the database table and variable *pzTab is set to point to a +** nul-terminated copy of the table name. *pazCol (if not NULL) is set to +** point to an array of pointers to column names. And *pabPK (again, if not +** NULL) is set to point to an array of booleans - true if the corresponding +** column is part of the primary key. +** +** For example, if the table is declared as: +** +** CREATE TABLE tbl1(w, x, y, z, PRIMARY KEY(w, z)); +** +** Then the four output variables are populated as follows: +** +** *pnCol = 4 +** *pzTab = "tbl1" +** *pazCol = {"w", "x", "y", "z"} +** *pabPK = {1, 0, 0, 1} +** +** All returned buffers are part of the same single allocation, which must +** be freed using sqlite3_free() by the caller. If pazCol was not NULL, then +** pointer *pazCol should be freed to release all memory. Otherwise, pointer +** *pabPK. It is illegal for both pazCol and pabPK to be NULL. +*/ +static int sessionTableInfo( + sqlite3 *db, /* Database connection */ + const char *zDb, /* Name of attached database (e.g. "main") */ + const char *zThis, /* Table name */ + int *pnCol, /* OUT: number of columns */ + const char **pzTab, /* OUT: Copy of zThis */ + const char ***pazCol, /* OUT: Array of column names for table */ + u8 **pabPK /* OUT: Array of booleans - true for PK col */ +){ + char *zPragma; + sqlite3_stmt *pStmt; + int rc; + int nByte; + int nDbCol = 0; + int nThis; + int i; + u8 *pAlloc = 0; + char **azCol = 0; + u8 *abPK = 0; + + assert( pazCol && pabPK ); + + nThis = sqlite3Strlen30(zThis); + zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis); + if( !zPragma ) return SQLITE_NOMEM; + + rc = sqlite3_prepare_v2(db, zPragma, -1, &pStmt, 0); + sqlite3_free(zPragma); + if( rc!=SQLITE_OK ) return rc; + + nByte = nThis + 1; + while( SQLITE_ROW==sqlite3_step(pStmt) ){ + nByte += sqlite3_column_bytes(pStmt, 1); + nDbCol++; + } + rc = sqlite3_reset(pStmt); + + if( rc==SQLITE_OK ){ + nByte += nDbCol * (sizeof(const char *) + sizeof(u8) + 1); + pAlloc = sqlite3_malloc(nByte); + if( pAlloc==0 ){ + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + azCol = (char **)pAlloc; + pAlloc = (u8 *)&azCol[nDbCol]; + abPK = (u8 *)pAlloc; + pAlloc = &abPK[nDbCol]; + if( pzTab ){ + memcpy(pAlloc, zThis, nThis+1); + *pzTab = (char *)pAlloc; + pAlloc += nThis+1; + } + + i = 0; + while( SQLITE_ROW==sqlite3_step(pStmt) ){ + int nName = sqlite3_column_bytes(pStmt, 1); + const unsigned char *zName = sqlite3_column_text(pStmt, 1); + if( zName==0 ) break; + memcpy(pAlloc, zName, nName+1); + azCol[i] = (char *)pAlloc; + pAlloc += nName+1; + abPK[i] = sqlite3_column_int(pStmt, 5); + i++; + } + rc = sqlite3_reset(pStmt); + + } + + /* If successful, populate the output variables. Otherwise, zero them and + ** free any allocation made. An error code will be returned in this case. + */ + if( rc==SQLITE_OK ){ + *pazCol = (const char **)azCol; + *pabPK = abPK; + *pnCol = nDbCol; + }else{ + *pazCol = 0; + *pabPK = 0; + *pnCol = 0; + if( pzTab ) *pzTab = 0; + sqlite3_free(azCol); + } + sqlite3_finalize(pStmt); + return rc; +} + +/* +** This function is only called from within a pre-update handler for a +** write to table pTab, part of session pSession. If this is the first +** write to this table, initalize the SessionTable.nCol, azCol[] and +** abPK[] arrays accordingly. +** +** If an error occurs, an error code is stored in sqlite3_session.rc and +** non-zero returned. Or, if no error occurs but the table has no primary +** key, sqlite3_session.rc is left set to SQLITE_OK and non-zero returned to +** indicate that updates on this table should be ignored. SessionTable.abPK +** is set to NULL in this case. +*/ +static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){ + if( pTab->nCol==0 ){ + u8 *abPK; + assert( pTab->azCol==0 || pTab->abPK==0 ); + pSession->rc = sessionTableInfo(pSession->db, pSession->zDb, + pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK + ); + if( pSession->rc==SQLITE_OK ){ + int i; + for(i=0; inCol; i++){ + if( abPK[i] ){ + pTab->abPK = abPK; + break; + } + } + } + } + return (pSession->rc || pTab->abPK==0); +} + +/* +** This function is only called from with a pre-update-hook reporting a +** change on table pTab (attached to session pSession). The type of change +** (UPDATE, INSERT, DELETE) is specified by the first argument. +** +** Unless one is already present or an error occurs, an entry is added +** to the changed-rows hash table associated with table pTab. +*/ +static void sessionPreupdateOneChange( + int op, /* One of SQLITE_UPDATE, INSERT, DELETE */ + sqlite3_session *pSession, /* Session object pTab is attached to */ + SessionTable *pTab /* Table that change applies to */ +){ + int iHash; + int bNull = 0; + int rc = SQLITE_OK; + + if( pSession->rc ) return; + + /* Load table details if required */ + if( sessionInitTable(pSession, pTab) ) return; + + /* Check the number of columns in this xPreUpdate call matches the + ** number of columns in the table. */ + if( pTab->nCol!=pSession->hook.xCount(pSession->hook.pCtx) ){ + pSession->rc = SQLITE_SCHEMA; + return; + } + + /* Grow the hash table if required */ + if( sessionGrowHash(0, pTab) ){ + pSession->rc = SQLITE_NOMEM; + return; + } + + /* Calculate the hash-key for this change. If the primary key of the row + ** includes a NULL value, exit early. Such changes are ignored by the + ** session module. */ + rc = sessionPreupdateHash(pSession, pTab, op==SQLITE_INSERT, &iHash, &bNull); + if( rc!=SQLITE_OK ) goto error_out; + + if( bNull==0 ){ + /* Search the hash table for an existing record for this row. */ + SessionChange *pC; + for(pC=pTab->apChange[iHash]; pC; pC=pC->pNext){ + if( sessionPreupdateEqual(pSession, pTab, pC, op) ) break; + } + + if( pC==0 ){ + /* Create a new change object containing all the old values (if + ** this is an SQLITE_UPDATE or SQLITE_DELETE), or just the PK + ** values (if this is an INSERT). */ + SessionChange *pChange; /* New change object */ + int nByte; /* Number of bytes to allocate */ + int i; /* Used to iterate through columns */ + + assert( rc==SQLITE_OK ); + pTab->nEntry++; + + /* Figure out how large an allocation is required */ + nByte = sizeof(SessionChange); + for(i=0; inCol; i++){ + sqlite3_value *p = 0; + if( op!=SQLITE_INSERT ){ + TESTONLY(int trc = ) pSession->hook.xOld(pSession->hook.pCtx, i, &p); + assert( trc==SQLITE_OK ); + }else if( pTab->abPK[i] ){ + TESTONLY(int trc = ) pSession->hook.xNew(pSession->hook.pCtx, i, &p); + assert( trc==SQLITE_OK ); + } + + /* This may fail if SQLite value p contains a utf-16 string that must + ** be converted to utf-8 and an OOM error occurs while doing so. */ + rc = sessionSerializeValue(0, p, &nByte); + if( rc!=SQLITE_OK ) goto error_out; + } + + /* Allocate the change object */ + pChange = (SessionChange *)sqlite3_malloc(nByte); + if( !pChange ){ + rc = SQLITE_NOMEM; + goto error_out; + }else{ + memset(pChange, 0, sizeof(SessionChange)); + pChange->aRecord = (u8 *)&pChange[1]; + } + + /* Populate the change object. None of the preupdate_old(), + ** preupdate_new() or SerializeValue() calls below may fail as all + ** required values and encodings have already been cached in memory. + ** It is not possible for an OOM to occur in this block. */ + nByte = 0; + for(i=0; inCol; i++){ + sqlite3_value *p = 0; + if( op!=SQLITE_INSERT ){ + pSession->hook.xOld(pSession->hook.pCtx, i, &p); + }else if( pTab->abPK[i] ){ + pSession->hook.xNew(pSession->hook.pCtx, i, &p); + } + sessionSerializeValue(&pChange->aRecord[nByte], p, &nByte); + } + + /* Add the change to the hash-table */ + if( pSession->bIndirect || pSession->hook.xDepth(pSession->hook.pCtx) ){ + pChange->bIndirect = 1; + } + pChange->nRecord = nByte; + pChange->op = op; + pChange->pNext = pTab->apChange[iHash]; + pTab->apChange[iHash] = pChange; + + }else if( pC->bIndirect ){ + /* If the existing change is considered "indirect", but this current + ** change is "direct", mark the change object as direct. */ + if( pSession->hook.xDepth(pSession->hook.pCtx)==0 + && pSession->bIndirect==0 + ){ + pC->bIndirect = 0; + } + } + } + + /* If an error has occurred, mark the session object as failed. */ + error_out: + if( rc!=SQLITE_OK ){ + pSession->rc = rc; + } +} + +static int sessionFindTable( + sqlite3_session *pSession, + const char *zName, + SessionTable **ppTab +){ + int rc = SQLITE_OK; + int nName = sqlite3Strlen30(zName); + SessionTable *pRet; + + /* Search for an existing table */ + for(pRet=pSession->pTable; pRet; pRet=pRet->pNext){ + if( 0==sqlite3_strnicmp(pRet->zName, zName, nName+1) ) break; + } + + if( pRet==0 && pSession->bAutoAttach ){ + /* If there is a table-filter configured, invoke it. If it returns 0, + ** do not automatically add the new table. */ + if( pSession->xTableFilter==0 + || pSession->xTableFilter(pSession->pFilterCtx, zName) + ){ + rc = sqlite3session_attach(pSession, zName); + if( rc==SQLITE_OK ){ + for(pRet=pSession->pTable; pRet->pNext; pRet=pRet->pNext); + assert( 0==sqlite3_strnicmp(pRet->zName, zName, nName+1) ); + } + } + } + + assert( rc==SQLITE_OK || pRet==0 ); + *ppTab = pRet; + return rc; +} + +/* +** The 'pre-update' hook registered by this module with SQLite databases. +*/ +static void xPreUpdate( + void *pCtx, /* Copy of third arg to preupdate_hook() */ + sqlite3 *db, /* Database handle */ + int op, /* SQLITE_UPDATE, DELETE or INSERT */ + char const *zDb, /* Database name */ + char const *zName, /* Table name */ + sqlite3_int64 iKey1, /* Rowid of row about to be deleted/updated */ + sqlite3_int64 iKey2 /* New rowid value (for a rowid UPDATE) */ +){ + sqlite3_session *pSession; + int nDb = sqlite3Strlen30(zDb); + + assert( sqlite3_mutex_held(db->mutex) ); + + for(pSession=(sqlite3_session *)pCtx; pSession; pSession=pSession->pNext){ + SessionTable *pTab; + + /* If this session is attached to a different database ("main", "temp" + ** etc.), or if it is not currently enabled, there is nothing to do. Skip + ** to the next session object attached to this database. */ + if( pSession->bEnable==0 ) continue; + if( pSession->rc ) continue; + if( sqlite3_strnicmp(zDb, pSession->zDb, nDb+1) ) continue; + + pSession->rc = sessionFindTable(pSession, zName, &pTab); + if( pTab ){ + assert( pSession->rc==SQLITE_OK ); + sessionPreupdateOneChange(op, pSession, pTab); + if( op==SQLITE_UPDATE ){ + sessionPreupdateOneChange(SQLITE_INSERT, pSession, pTab); + } + } + } +} + +/* +** The pre-update hook implementations. +*/ +static int sessionPreupdateOld(void *pCtx, int iVal, sqlite3_value **ppVal){ + return sqlite3_preupdate_old((sqlite3*)pCtx, iVal, ppVal); +} +static int sessionPreupdateNew(void *pCtx, int iVal, sqlite3_value **ppVal){ + return sqlite3_preupdate_new((sqlite3*)pCtx, iVal, ppVal); +} +static int sessionPreupdateCount(void *pCtx){ + return sqlite3_preupdate_count((sqlite3*)pCtx); +} +static int sessionPreupdateDepth(void *pCtx){ + return sqlite3_preupdate_depth((sqlite3*)pCtx); +} + +/* +** Install the pre-update hooks on the session object passed as the only +** argument. +*/ +static void sessionPreupdateHooks( + sqlite3_session *pSession +){ + pSession->hook.pCtx = (void*)pSession->db; + pSession->hook.xOld = sessionPreupdateOld; + pSession->hook.xNew = sessionPreupdateNew; + pSession->hook.xCount = sessionPreupdateCount; + pSession->hook.xDepth = sessionPreupdateDepth; +} + +typedef struct SessionDiffCtx SessionDiffCtx; +struct SessionDiffCtx { + sqlite3_stmt *pStmt; + int nOldOff; +}; + +/* +** The diff hook implementations. +*/ +static int sessionDiffOld(void *pCtx, int iVal, sqlite3_value **ppVal){ + SessionDiffCtx *p = (SessionDiffCtx*)pCtx; + *ppVal = sqlite3_column_value(p->pStmt, iVal+p->nOldOff); + return SQLITE_OK; +} +static int sessionDiffNew(void *pCtx, int iVal, sqlite3_value **ppVal){ + SessionDiffCtx *p = (SessionDiffCtx*)pCtx; + *ppVal = sqlite3_column_value(p->pStmt, iVal); + return SQLITE_OK; +} +static int sessionDiffCount(void *pCtx){ + SessionDiffCtx *p = (SessionDiffCtx*)pCtx; + return p->nOldOff ? p->nOldOff : sqlite3_column_count(p->pStmt); +} +static int sessionDiffDepth(void *pCtx){ + return 0; +} + +/* +** Install the diff hooks on the session object passed as the only +** argument. +*/ +static void sessionDiffHooks( + sqlite3_session *pSession, + SessionDiffCtx *pDiffCtx +){ + pSession->hook.pCtx = (void*)pDiffCtx; + pSession->hook.xOld = sessionDiffOld; + pSession->hook.xNew = sessionDiffNew; + pSession->hook.xCount = sessionDiffCount; + pSession->hook.xDepth = sessionDiffDepth; +} + +static char *sessionExprComparePK( + int nCol, + const char *zDb1, const char *zDb2, + const char *zTab, + const char **azCol, u8 *abPK +){ + int i; + const char *zSep = ""; + char *zRet = 0; + + for(i=0; inCol, zDb1, zDb2, pTab->zName,zExpr); + + if( zStmt==0 ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3_stmt *pStmt; + rc = sqlite3_prepare(pSession->db, zStmt, -1, &pStmt, 0); + if( rc==SQLITE_OK ){ + SessionDiffCtx *pDiffCtx = (SessionDiffCtx*)pSession->hook.pCtx; + pDiffCtx->pStmt = pStmt; + pDiffCtx->nOldOff = 0; + while( SQLITE_ROW==sqlite3_step(pStmt) ){ + sessionPreupdateOneChange(op, pSession, pTab); + } + rc = sqlite3_finalize(pStmt); + } + sqlite3_free(zStmt); + } + + return rc; +} + +static int sessionDiffFindModified( + sqlite3_session *pSession, + SessionTable *pTab, + const char *zFrom, + const char *zExpr +){ + int rc = SQLITE_OK; + + char *zExpr2 = sessionExprCompareOther(pTab->nCol, + pSession->zDb, zFrom, pTab->zName, pTab->azCol, pTab->abPK + ); + if( zExpr2==0 ){ + rc = SQLITE_NOMEM; + }else{ + char *zStmt = sqlite3_mprintf( + "SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)", + pSession->zDb, pTab->zName, zFrom, pTab->zName, zExpr, zExpr2 + ); + if( zStmt==0 ){ + rc = SQLITE_NOMEM; + }else{ + sqlite3_stmt *pStmt; + rc = sqlite3_prepare(pSession->db, zStmt, -1, &pStmt, 0); + + if( rc==SQLITE_OK ){ + SessionDiffCtx *pDiffCtx = (SessionDiffCtx*)pSession->hook.pCtx; + pDiffCtx->pStmt = pStmt; + pDiffCtx->nOldOff = pTab->nCol; + while( SQLITE_ROW==sqlite3_step(pStmt) ){ + sessionPreupdateOneChange(SQLITE_UPDATE, pSession, pTab); + } + rc = sqlite3_finalize(pStmt); + } + sqlite3_free(zStmt); + } + } + + return rc; +} + +SQLITE_API int SQLITE_STDCALL sqlite3session_diff( + sqlite3_session *pSession, + const char *zFrom, + const char *zTbl, + char **pzErrMsg +){ + const char *zDb = pSession->zDb; + int rc = pSession->rc; + SessionDiffCtx d; + + memset(&d, 0, sizeof(d)); + sessionDiffHooks(pSession, &d); + + sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db)); + if( pzErrMsg ) *pzErrMsg = 0; + if( rc==SQLITE_OK ){ + char *zExpr = 0; + sqlite3 *db = pSession->db; + SessionTable *pTo; /* Table zTbl */ + + /* Locate and if necessary initialize the target table object */ + rc = sessionFindTable(pSession, zTbl, &pTo); + if( pTo==0 ) goto diff_out; + if( sessionInitTable(pSession, pTo) ){ + rc = pSession->rc; + goto diff_out; + } + + /* Check the table schemas match */ + if( rc==SQLITE_OK ){ + int bHasPk = 0; + int bMismatch = 0; + int nCol; /* Columns in zFrom.zTbl */ + u8 *abPK; + const char **azCol = 0; + rc = sessionTableInfo(db, zFrom, zTbl, &nCol, 0, &azCol, &abPK); + if( rc==SQLITE_OK ){ + if( pTo->nCol!=nCol ){ + bMismatch = 1; + }else{ + int i; + for(i=0; iabPK[i]!=abPK[i] ) bMismatch = 1; + if( sqlite3_stricmp(azCol[i], pTo->azCol[i]) ) bMismatch = 1; + if( abPK[i] ) bHasPk = 1; + } + } + + } + sqlite3_free((char*)azCol); + if( bMismatch ){ + *pzErrMsg = sqlite3_mprintf("table schemas do not match"); + rc = SQLITE_SCHEMA; + } + if( bHasPk==0 ){ + /* Ignore tables with no primary keys */ + goto diff_out; + } + } + + if( rc==SQLITE_OK ){ + zExpr = sessionExprComparePK(pTo->nCol, + zDb, zFrom, pTo->zName, pTo->azCol, pTo->abPK + ); + } + + /* Find new rows */ + if( rc==SQLITE_OK ){ + rc = sessionDiffFindNew(SQLITE_INSERT, pSession, pTo, zDb, zFrom, zExpr); + } + + /* Find old rows */ + if( rc==SQLITE_OK ){ + rc = sessionDiffFindNew(SQLITE_DELETE, pSession, pTo, zFrom, zDb, zExpr); + } + + /* Find modified rows */ + if( rc==SQLITE_OK ){ + rc = sessionDiffFindModified(pSession, pTo, zFrom, zExpr); + } + + sqlite3_free(zExpr); + } + + diff_out: + sessionPreupdateHooks(pSession); + sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db)); + return rc; +} + +/* +** Create a session object. This session object will record changes to +** database zDb attached to connection db. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3session_create( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Name of db (e.g. "main") */ + sqlite3_session **ppSession /* OUT: New session object */ +){ + sqlite3_session *pNew; /* Newly allocated session object */ + sqlite3_session *pOld; /* Session object already attached to db */ + int nDb = sqlite3Strlen30(zDb); /* Length of zDb in bytes */ + + /* Zero the output value in case an error occurs. */ + *ppSession = 0; + + /* Allocate and populate the new session object. */ + pNew = (sqlite3_session *)sqlite3_malloc(sizeof(sqlite3_session) + nDb + 1); + if( !pNew ) return SQLITE_NOMEM; + memset(pNew, 0, sizeof(sqlite3_session)); + pNew->db = db; + pNew->zDb = (char *)&pNew[1]; + pNew->bEnable = 1; + memcpy(pNew->zDb, zDb, nDb+1); + sessionPreupdateHooks(pNew); + + /* Add the new session object to the linked list of session objects + ** attached to database handle $db. Do this under the cover of the db + ** handle mutex. */ + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + pOld = (sqlite3_session*)sqlite3_preupdate_hook(db, xPreUpdate, (void*)pNew); + pNew->pNext = pOld; + sqlite3_mutex_leave(sqlite3_db_mutex(db)); + + *ppSession = pNew; + return SQLITE_OK; +} + +/* +** Free the list of table objects passed as the first argument. The contents +** of the changed-rows hash tables are also deleted. +*/ +static void sessionDeleteTable(SessionTable *pList){ + SessionTable *pNext; + SessionTable *pTab; + + for(pTab=pList; pTab; pTab=pNext){ + int i; + pNext = pTab->pNext; + for(i=0; inChange; i++){ + SessionChange *p; + SessionChange *pNextChange; + for(p=pTab->apChange[i]; p; p=pNextChange){ + pNextChange = p->pNext; + sqlite3_free(p); + } + } + sqlite3_free((char*)pTab->azCol); /* cast works around VC++ bug */ + sqlite3_free(pTab->apChange); + sqlite3_free(pTab); + } +} + +/* +** Delete a session object previously allocated using sqlite3session_create(). +*/ +SQLITE_API void SQLITE_STDCALL sqlite3session_delete(sqlite3_session *pSession){ + sqlite3 *db = pSession->db; + sqlite3_session *pHead; + sqlite3_session **pp; + + /* Unlink the session from the linked list of sessions attached to the + ** database handle. Hold the db mutex while doing so. */ + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + pHead = (sqlite3_session*)sqlite3_preupdate_hook(db, 0, 0); + for(pp=&pHead; ALWAYS((*pp)!=0); pp=&((*pp)->pNext)){ + if( (*pp)==pSession ){ + *pp = (*pp)->pNext; + if( pHead ) sqlite3_preupdate_hook(db, xPreUpdate, (void*)pHead); + break; + } + } + sqlite3_mutex_leave(sqlite3_db_mutex(db)); + + /* Delete all attached table objects. And the contents of their + ** associated hash-tables. */ + sessionDeleteTable(pSession->pTable); + + /* Free the session object itself. */ + sqlite3_free(pSession); +} + +/* +** Set a table filter on a Session Object. +*/ +SQLITE_API void SQLITE_STDCALL sqlite3session_table_filter( + sqlite3_session *pSession, + int(*xFilter)(void*, const char*), + void *pCtx /* First argument passed to xFilter */ +){ + pSession->bAutoAttach = 1; + pSession->pFilterCtx = pCtx; + pSession->xTableFilter = xFilter; +} + +/* +** Attach a table to a session. All subsequent changes made to the table +** while the session object is enabled will be recorded. +** +** Only tables that have a PRIMARY KEY defined may be attached. It does +** not matter if the PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias) +** or not. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3session_attach( + sqlite3_session *pSession, /* Session object */ + const char *zName /* Table name */ +){ + int rc = SQLITE_OK; + sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db)); + + if( !zName ){ + pSession->bAutoAttach = 1; + }else{ + SessionTable *pTab; /* New table object (if required) */ + int nName; /* Number of bytes in string zName */ + + /* First search for an existing entry. If one is found, this call is + ** a no-op. Return early. */ + nName = sqlite3Strlen30(zName); + for(pTab=pSession->pTable; pTab; pTab=pTab->pNext){ + if( 0==sqlite3_strnicmp(pTab->zName, zName, nName+1) ) break; + } + + if( !pTab ){ + /* Allocate new SessionTable object. */ + pTab = (SessionTable *)sqlite3_malloc(sizeof(SessionTable) + nName + 1); + if( !pTab ){ + rc = SQLITE_NOMEM; + }else{ + /* Populate the new SessionTable object and link it into the list. + ** The new object must be linked onto the end of the list, not + ** simply added to the start of it in order to ensure that tables + ** appear in the correct order when a changeset or patchset is + ** eventually generated. */ + SessionTable **ppTab; + memset(pTab, 0, sizeof(SessionTable)); + pTab->zName = (char *)&pTab[1]; + memcpy(pTab->zName, zName, nName+1); + for(ppTab=&pSession->pTable; *ppTab; ppTab=&(*ppTab)->pNext); + *ppTab = pTab; + } + } + } + + sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db)); + return rc; +} + +/* +** Ensure that there is room in the buffer to append nByte bytes of data. +** If not, use sqlite3_realloc() to grow the buffer so that there is. +** +** If successful, return zero. Otherwise, if an OOM condition is encountered, +** set *pRc to SQLITE_NOMEM and return non-zero. +*/ +static int sessionBufferGrow(SessionBuffer *p, int nByte, int *pRc){ + if( *pRc==SQLITE_OK && p->nAlloc-p->nBufnAlloc ? p->nAlloc : 128; + do { + nNew = nNew*2; + }while( nNew<(p->nBuf+nByte) ); + + aNew = (u8 *)sqlite3_realloc(p->aBuf, nNew); + if( 0==aNew ){ + *pRc = SQLITE_NOMEM; + }else{ + p->aBuf = aNew; + p->nAlloc = nNew; + } + } + return (*pRc!=SQLITE_OK); +} + +/* +** Append the value passed as the second argument to the buffer passed +** as the first. +** +** This function is a no-op if *pRc is non-zero when it is called. +** Otherwise, if an error occurs, *pRc is set to an SQLite error code +** before returning. +*/ +static void sessionAppendValue(SessionBuffer *p, sqlite3_value *pVal, int *pRc){ + int rc = *pRc; + if( rc==SQLITE_OK ){ + int nByte = 0; + rc = sessionSerializeValue(0, pVal, &nByte); + sessionBufferGrow(p, nByte, &rc); + if( rc==SQLITE_OK ){ + rc = sessionSerializeValue(&p->aBuf[p->nBuf], pVal, 0); + p->nBuf += nByte; + }else{ + *pRc = rc; + } + } +} + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. Otherwise, append a single byte to the buffer. +** +** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before +** returning. +*/ +static void sessionAppendByte(SessionBuffer *p, u8 v, int *pRc){ + if( 0==sessionBufferGrow(p, 1, pRc) ){ + p->aBuf[p->nBuf++] = v; + } +} + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. Otherwise, append a single varint to the buffer. +** +** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before +** returning. +*/ +static void sessionAppendVarint(SessionBuffer *p, int v, int *pRc){ + if( 0==sessionBufferGrow(p, 9, pRc) ){ + p->nBuf += sessionVarintPut(&p->aBuf[p->nBuf], v); + } +} + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. Otherwise, append a blob of data to the buffer. +** +** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before +** returning. +*/ +static void sessionAppendBlob( + SessionBuffer *p, + const u8 *aBlob, + int nBlob, + int *pRc +){ + if( 0==sessionBufferGrow(p, nBlob, pRc) ){ + memcpy(&p->aBuf[p->nBuf], aBlob, nBlob); + p->nBuf += nBlob; + } +} + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. Otherwise, append a string to the buffer. All bytes in the string +** up to (but not including) the nul-terminator are written to the buffer. +** +** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before +** returning. +*/ +static void sessionAppendStr( + SessionBuffer *p, + const char *zStr, + int *pRc +){ + int nStr = sqlite3Strlen30(zStr); + if( 0==sessionBufferGrow(p, nStr, pRc) ){ + memcpy(&p->aBuf[p->nBuf], zStr, nStr); + p->nBuf += nStr; + } +} + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. Otherwise, append the string representation of integer iVal +** to the buffer. No nul-terminator is written. +** +** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before +** returning. +*/ +static void sessionAppendInteger( + SessionBuffer *p, /* Buffer to append to */ + int iVal, /* Value to write the string rep. of */ + int *pRc /* IN/OUT: Error code */ +){ + char aBuf[24]; + sqlite3_snprintf(sizeof(aBuf)-1, aBuf, "%d", iVal); + sessionAppendStr(p, aBuf, pRc); +} + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. Otherwise, append the string zStr enclosed in quotes (") and +** with any embedded quote characters escaped to the buffer. No +** nul-terminator byte is written. +** +** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before +** returning. +*/ +static void sessionAppendIdent( + SessionBuffer *p, /* Buffer to a append to */ + const char *zStr, /* String to quote, escape and append */ + int *pRc /* IN/OUT: Error code */ +){ + int nStr = sqlite3Strlen30(zStr)*2 + 2 + 1; + if( 0==sessionBufferGrow(p, nStr, pRc) ){ + char *zOut = (char *)&p->aBuf[p->nBuf]; + const char *zIn = zStr; + *zOut++ = '"'; + while( *zIn ){ + if( *zIn=='"' ) *zOut++ = '"'; + *zOut++ = *(zIn++); + } + *zOut++ = '"'; + p->nBuf = (int)((u8 *)zOut - p->aBuf); + } +} + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. Otherwse, it appends the serialized version of the value stored +** in column iCol of the row that SQL statement pStmt currently points +** to to the buffer. +*/ +static void sessionAppendCol( + SessionBuffer *p, /* Buffer to append to */ + sqlite3_stmt *pStmt, /* Handle pointing to row containing value */ + int iCol, /* Column to read value from */ + int *pRc /* IN/OUT: Error code */ +){ + if( *pRc==SQLITE_OK ){ + int eType = sqlite3_column_type(pStmt, iCol); + sessionAppendByte(p, (u8)eType, pRc); + if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + sqlite3_int64 i; + u8 aBuf[8]; + if( eType==SQLITE_INTEGER ){ + i = sqlite3_column_int64(pStmt, iCol); + }else{ + double r = sqlite3_column_double(pStmt, iCol); + memcpy(&i, &r, 8); + } + sessionPutI64(aBuf, i); + sessionAppendBlob(p, aBuf, 8, pRc); + } + if( eType==SQLITE_BLOB || eType==SQLITE_TEXT ){ + u8 *z; + int nByte; + if( eType==SQLITE_BLOB ){ + z = (u8 *)sqlite3_column_blob(pStmt, iCol); + }else{ + z = (u8 *)sqlite3_column_text(pStmt, iCol); + } + nByte = sqlite3_column_bytes(pStmt, iCol); + if( z || (eType==SQLITE_BLOB && nByte==0) ){ + sessionAppendVarint(p, nByte, pRc); + sessionAppendBlob(p, z, nByte, pRc); + }else{ + *pRc = SQLITE_NOMEM; + } + } + } +} + +/* +** +** This function appends an update change to the buffer (see the comments +** under "CHANGESET FORMAT" at the top of the file). An update change +** consists of: +** +** 1 byte: SQLITE_UPDATE (0x17) +** n bytes: old.* record (see RECORD FORMAT) +** m bytes: new.* record (see RECORD FORMAT) +** +** The SessionChange object passed as the third argument contains the +** values that were stored in the row when the session began (the old.* +** values). The statement handle passed as the second argument points +** at the current version of the row (the new.* values). +** +** If all of the old.* values are equal to their corresponding new.* value +** (i.e. nothing has changed), then no data at all is appended to the buffer. +** +** Otherwise, the old.* record contains all primary key values and the +** original values of any fields that have been modified. The new.* record +** contains the new values of only those fields that have been modified. +*/ +static int sessionAppendUpdate( + SessionBuffer *pBuf, /* Buffer to append to */ + int bPatchset, /* True for "patchset", 0 for "changeset" */ + sqlite3_stmt *pStmt, /* Statement handle pointing at new row */ + SessionChange *p, /* Object containing old values */ + u8 *abPK /* Boolean array - true for PK columns */ +){ + int rc = SQLITE_OK; + SessionBuffer buf2 = {0,0,0}; /* Buffer to accumulate new.* record in */ + int bNoop = 1; /* Set to zero if any values are modified */ + int nRewind = pBuf->nBuf; /* Set to zero if any values are modified */ + int i; /* Used to iterate through columns */ + u8 *pCsr = p->aRecord; /* Used to iterate through old.* values */ + + sessionAppendByte(pBuf, SQLITE_UPDATE, &rc); + sessionAppendByte(pBuf, p->bIndirect, &rc); + for(i=0; inBuf = nRewind; + }else{ + sessionAppendBlob(pBuf, buf2.aBuf, buf2.nBuf, &rc); + } + sqlite3_free(buf2.aBuf); + + return rc; +} + +/* +** Append a DELETE change to the buffer passed as the first argument. Use +** the changeset format if argument bPatchset is zero, or the patchset +** format otherwise. +*/ +static int sessionAppendDelete( + SessionBuffer *pBuf, /* Buffer to append to */ + int bPatchset, /* True for "patchset", 0 for "changeset" */ + SessionChange *p, /* Object containing old values */ + int nCol, /* Number of columns in table */ + u8 *abPK /* Boolean array - true for PK columns */ +){ + int rc = SQLITE_OK; + + sessionAppendByte(pBuf, SQLITE_DELETE, &rc); + sessionAppendByte(pBuf, p->bIndirect, &rc); + + if( bPatchset==0 ){ + sessionAppendBlob(pBuf, p->aRecord, p->nRecord, &rc); + }else{ + int i; + u8 *a = p->aRecord; + for(i=0; iaRecord)==p->nRecord ); + } + + return rc; +} + +/* +** Formulate and prepare a SELECT statement to retrieve a row from table +** zTab in database zDb based on its primary key. i.e. +** +** SELECT * FROM zDb.zTab WHERE pk1 = ? AND pk2 = ? AND ... +*/ +static int sessionSelectStmt( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Database name */ + const char *zTab, /* Table name */ + int nCol, /* Number of columns in table */ + const char **azCol, /* Names of table columns */ + u8 *abPK, /* PRIMARY KEY array */ + sqlite3_stmt **ppStmt /* OUT: Prepared SELECT statement */ +){ + int rc = SQLITE_OK; + int i; + const char *zSep = ""; + SessionBuffer buf = {0, 0, 0}; + + sessionAppendStr(&buf, "SELECT * FROM ", &rc); + sessionAppendIdent(&buf, zDb, &rc); + sessionAppendStr(&buf, ".", &rc); + sessionAppendIdent(&buf, zTab, &rc); + sessionAppendStr(&buf, " WHERE ", &rc); + for(i=0; iaRecord; + + for(i=0; inCol, pRc); + sessionAppendBlob(pBuf, pTab->abPK, pTab->nCol, pRc); + sessionAppendBlob(pBuf, (u8 *)pTab->zName, (int)strlen(pTab->zName)+1, pRc); +} + +/* +** Generate either a changeset (if argument bPatchset is zero) or a patchset +** (if it is non-zero) based on the current contents of the session object +** passed as the first argument. +** +** If no error occurs, SQLITE_OK is returned and the new changeset/patchset +** stored in output variables *pnChangeset and *ppChangeset. Or, if an error +** occurs, an SQLite error code is returned and both output variables set +** to 0. +*/ +static int sessionGenerateChangeset( + sqlite3_session *pSession, /* Session object */ + int bPatchset, /* True for patchset, false for changeset */ + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut, /* First argument for xOutput */ + int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ + void **ppChangeset /* OUT: Buffer containing changeset */ +){ + sqlite3 *db = pSession->db; /* Source database handle */ + SessionTable *pTab; /* Used to iterate through attached tables */ + SessionBuffer buf = {0,0,0}; /* Buffer in which to accumlate changeset */ + int rc; /* Return code */ + + assert( xOutput==0 || (pnChangeset==0 && ppChangeset==0 ) ); + + /* Zero the output variables in case an error occurs. If this session + ** object is already in the error state (sqlite3_session.rc != SQLITE_OK), + ** this call will be a no-op. */ + if( xOutput==0 ){ + *pnChangeset = 0; + *ppChangeset = 0; + } + + if( pSession->rc ) return pSession->rc; + rc = sqlite3_exec(pSession->db, "SAVEPOINT changeset", 0, 0, 0); + if( rc!=SQLITE_OK ) return rc; + + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + + for(pTab=pSession->pTable; rc==SQLITE_OK && pTab; pTab=pTab->pNext){ + if( pTab->nEntry ){ + const char *zName = pTab->zName; + int nCol; /* Number of columns in table */ + u8 *abPK; /* Primary key array */ + const char **azCol = 0; /* Table columns */ + int i; /* Used to iterate through hash buckets */ + sqlite3_stmt *pSel = 0; /* SELECT statement to query table pTab */ + int nRewind = buf.nBuf; /* Initial size of write buffer */ + int nNoop; /* Size of buffer after writing tbl header */ + + /* Check the table schema is still Ok. */ + rc = sessionTableInfo(db, pSession->zDb, zName, &nCol, 0, &azCol, &abPK); + if( !rc && (pTab->nCol!=nCol || memcmp(abPK, pTab->abPK, nCol)) ){ + rc = SQLITE_SCHEMA; + } + + /* Write a table header */ + sessionAppendTableHdr(&buf, bPatchset, pTab, &rc); + + /* Build and compile a statement to execute: */ + if( rc==SQLITE_OK ){ + rc = sessionSelectStmt( + db, pSession->zDb, zName, nCol, azCol, abPK, &pSel); + } + + nNoop = buf.nBuf; + for(i=0; inChange && rc==SQLITE_OK; i++){ + SessionChange *p; /* Used to iterate through changes */ + + for(p=pTab->apChange[i]; rc==SQLITE_OK && p; p=p->pNext){ + rc = sessionSelectBind(pSel, nCol, abPK, p); + if( rc!=SQLITE_OK ) continue; + if( sqlite3_step(pSel)==SQLITE_ROW ){ + if( p->op==SQLITE_INSERT ){ + int iCol; + sessionAppendByte(&buf, SQLITE_INSERT, &rc); + sessionAppendByte(&buf, p->bIndirect, &rc); + for(iCol=0; iColop!=SQLITE_INSERT ){ + rc = sessionAppendDelete(&buf, bPatchset, p, nCol, abPK); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_reset(pSel); + } + + /* If the buffer is now larger than SESSIONS_STRM_CHUNK_SIZE, pass + ** its contents to the xOutput() callback. */ + if( xOutput + && rc==SQLITE_OK + && buf.nBuf>nNoop + && buf.nBuf>SESSIONS_STRM_CHUNK_SIZE + ){ + rc = xOutput(pOut, (void*)buf.aBuf, buf.nBuf); + nNoop = -1; + buf.nBuf = 0; + } + + } + } + + sqlite3_finalize(pSel); + if( buf.nBuf==nNoop ){ + buf.nBuf = nRewind; + } + sqlite3_free((char*)azCol); /* cast works around VC++ bug */ + } + } + + if( rc==SQLITE_OK ){ + if( xOutput==0 ){ + *pnChangeset = buf.nBuf; + *ppChangeset = buf.aBuf; + buf.aBuf = 0; + }else if( buf.nBuf>0 ){ + rc = xOutput(pOut, (void*)buf.aBuf, buf.nBuf); + } + } + + sqlite3_free(buf.aBuf); + sqlite3_exec(db, "RELEASE changeset", 0, 0, 0); + sqlite3_mutex_leave(sqlite3_db_mutex(db)); + return rc; +} + +/* +** Obtain a changeset object containing all changes recorded by the +** session object passed as the first argument. +** +** It is the responsibility of the caller to eventually free the buffer +** using sqlite3_free(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3session_changeset( + sqlite3_session *pSession, /* Session object */ + int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ + void **ppChangeset /* OUT: Buffer containing changeset */ +){ + return sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset, ppChangeset); +} + +/* +** Streaming version of sqlite3session_changeset(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3session_changeset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +){ + return sessionGenerateChangeset(pSession, 0, xOutput, pOut, 0, 0); +} + +/* +** Streaming version of sqlite3session_patchset(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3session_patchset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +){ + return sessionGenerateChangeset(pSession, 1, xOutput, pOut, 0, 0); +} + +/* +** Obtain a patchset object containing all changes recorded by the +** session object passed as the first argument. +** +** It is the responsibility of the caller to eventually free the buffer +** using sqlite3_free(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3session_patchset( + sqlite3_session *pSession, /* Session object */ + int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */ + void **ppPatchset /* OUT: Buffer containing changeset */ +){ + return sessionGenerateChangeset(pSession, 1, 0, 0, pnPatchset, ppPatchset); +} + +/* +** Enable or disable the session object passed as the first argument. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3session_enable(sqlite3_session *pSession, int bEnable){ + int ret; + sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db)); + if( bEnable>=0 ){ + pSession->bEnable = bEnable; + } + ret = pSession->bEnable; + sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db)); + return ret; +} + +/* +** Enable or disable the session object passed as the first argument. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3session_indirect(sqlite3_session *pSession, int bIndirect){ + int ret; + sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db)); + if( bIndirect>=0 ){ + pSession->bIndirect = bIndirect; + } + ret = pSession->bIndirect; + sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db)); + return ret; +} + +/* +** Return true if there have been no changes to monitored tables recorded +** by the session object passed as the only argument. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3session_isempty(sqlite3_session *pSession){ + int ret = 0; + SessionTable *pTab; + + sqlite3_mutex_enter(sqlite3_db_mutex(pSession->db)); + for(pTab=pSession->pTable; pTab && ret==0; pTab=pTab->pNext){ + ret = (pTab->nEntry>0); + } + sqlite3_mutex_leave(sqlite3_db_mutex(pSession->db)); + + return (ret==0); +} + +/* +** Do the work for either sqlite3changeset_start() or start_strm(). +*/ +static int sessionChangesetStart( + sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int nChangeset, /* Size of buffer pChangeset in bytes */ + void *pChangeset /* Pointer to buffer containing changeset */ +){ + sqlite3_changeset_iter *pRet; /* Iterator to return */ + int nByte; /* Number of bytes to allocate for iterator */ + + assert( xInput==0 || (pChangeset==0 && nChangeset==0) ); + + /* Zero the output variable in case an error occurs. */ + *pp = 0; + + /* Allocate and initialize the iterator structure. */ + nByte = sizeof(sqlite3_changeset_iter); + pRet = (sqlite3_changeset_iter *)sqlite3_malloc(nByte); + if( !pRet ) return SQLITE_NOMEM; + memset(pRet, 0, sizeof(sqlite3_changeset_iter)); + pRet->in.aData = (u8 *)pChangeset; + pRet->in.nData = nChangeset; + pRet->in.xInput = xInput; + pRet->in.pIn = pIn; + pRet->in.bEof = (xInput ? 0 : 1); + + /* Populate the output variable and return success. */ + *pp = pRet; + return SQLITE_OK; +} + +/* +** Create an iterator used to iterate through the contents of a changeset. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_start( + sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */ + int nChangeset, /* Size of buffer pChangeset in bytes */ + void *pChangeset /* Pointer to buffer containing changeset */ +){ + return sessionChangesetStart(pp, 0, 0, nChangeset, pChangeset); +} + +/* +** Streaming version of sqlite3changeset_start(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_start_strm( + sqlite3_changeset_iter **pp, /* OUT: Changeset iterator handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +){ + return sessionChangesetStart(pp, xInput, pIn, 0, 0); +} + +/* +** If the SessionInput object passed as the only argument is a streaming +** object and the buffer is full, discard some data to free up space. +*/ +static void sessionDiscardData(SessionInput *pIn){ + if( pIn->bEof && pIn->xInput && pIn->iNext>=SESSIONS_STRM_CHUNK_SIZE ){ + int nMove = pIn->buf.nBuf - pIn->iNext; + assert( nMove>=0 ); + if( nMove>0 ){ + memmove(pIn->buf.aBuf, &pIn->buf.aBuf[pIn->iNext], nMove); + } + pIn->buf.nBuf -= pIn->iNext; + pIn->iNext = 0; + pIn->nData = pIn->buf.nBuf; + } +} + +/* +** Ensure that there are at least nByte bytes available in the buffer. Or, +** if there are not nByte bytes remaining in the input, that all available +** data is in the buffer. +** +** Return an SQLite error code if an error occurs, or SQLITE_OK otherwise. +*/ +static int sessionInputBuffer(SessionInput *pIn, int nByte){ + int rc = SQLITE_OK; + if( pIn->xInput ){ + while( !pIn->bEof && (pIn->iNext+nByte)>=pIn->nData && rc==SQLITE_OK ){ + int nNew = SESSIONS_STRM_CHUNK_SIZE; + + if( pIn->bNoDiscard==0 ) sessionDiscardData(pIn); + if( SQLITE_OK==sessionBufferGrow(&pIn->buf, nNew, &rc) ){ + rc = pIn->xInput(pIn->pIn, &pIn->buf.aBuf[pIn->buf.nBuf], &nNew); + if( nNew==0 ){ + pIn->bEof = 1; + }else{ + pIn->buf.nBuf += nNew; + } + } + + pIn->aData = pIn->buf.aBuf; + pIn->nData = pIn->buf.nBuf; + } + } + return rc; +} + +/* +** When this function is called, *ppRec points to the start of a record +** that contains nCol values. This function advances the pointer *ppRec +** until it points to the byte immediately following that record. +*/ +static void sessionSkipRecord( + u8 **ppRec, /* IN/OUT: Record pointer */ + int nCol /* Number of values in record */ +){ + u8 *aRec = *ppRec; + int i; + for(i=0; iaData[pIn->iNext++]; + } + + assert( apOut[i]==0 ); + if( eType ){ + apOut[i] = sqlite3ValueNew(0); + if( !apOut[i] ) rc = SQLITE_NOMEM; + } + + if( rc==SQLITE_OK ){ + u8 *aVal = &pIn->aData[pIn->iNext]; + if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){ + int nByte; + pIn->iNext += sessionVarintGet(aVal, &nByte); + rc = sessionInputBuffer(pIn, nByte); + if( rc==SQLITE_OK ){ + u8 enc = (eType==SQLITE_TEXT ? SQLITE_UTF8 : 0); + rc = sessionValueSetStr(apOut[i],&pIn->aData[pIn->iNext],nByte,enc); + } + pIn->iNext += nByte; + } + if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + sqlite3_int64 v = sessionGetI64(aVal); + if( eType==SQLITE_INTEGER ){ + sqlite3VdbeMemSetInt64(apOut[i], v); + }else{ + double d; + memcpy(&d, &v, 8); + sqlite3VdbeMemSetDouble(apOut[i], d); + } + pIn->iNext += 8; + } + } + } + + return rc; +} + +/* +** The input pointer currently points to the second byte of a table-header. +** Specifically, to the following: +** +** + number of columns in table (varint) +** + array of PK flags (1 byte per column), +** + table name (nul terminated). +** +** This function ensures that all of the above is present in the input +** buffer (i.e. that it can be accessed without any calls to xInput()). +** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code. +** The input pointer is not moved. +*/ +static int sessionChangesetBufferTblhdr(SessionInput *pIn, int *pnByte){ + int rc = SQLITE_OK; + int nCol = 0; + int nRead = 0; + + rc = sessionInputBuffer(pIn, 9); + if( rc==SQLITE_OK ){ + nRead += sessionVarintGet(&pIn->aData[pIn->iNext + nRead], &nCol); + rc = sessionInputBuffer(pIn, nRead+nCol+100); + nRead += nCol; + } + + while( rc==SQLITE_OK ){ + while( (pIn->iNext + nRead)nData && pIn->aData[pIn->iNext + nRead] ){ + nRead++; + } + if( (pIn->iNext + nRead)nData ) break; + rc = sessionInputBuffer(pIn, nRead + 100); + } + *pnByte = nRead+1; + return rc; +} + +/* +** The input pointer currently points to the first byte of the first field +** of a record consisting of nCol columns. This function ensures the entire +** record is buffered. It does not move the input pointer. +** +** If successful, SQLITE_OK is returned and *pnByte is set to the size of +** the record in bytes. Otherwise, an SQLite error code is returned. The +** final value of *pnByte is undefined in this case. +*/ +static int sessionChangesetBufferRecord( + SessionInput *pIn, /* Input data */ + int nCol, /* Number of columns in record */ + int *pnByte /* OUT: Size of record in bytes */ +){ + int rc = SQLITE_OK; + int nByte = 0; + int i; + for(i=0; rc==SQLITE_OK && iaData[pIn->iNext + nByte++]; + if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){ + int n; + nByte += sessionVarintGet(&pIn->aData[pIn->iNext+nByte], &n); + nByte += n; + rc = sessionInputBuffer(pIn, nByte); + }else if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + nByte += 8; + } + } + } + *pnByte = nByte; + return rc; +} + +/* +** The input pointer currently points to the second byte of a table-header. +** Specifically, to the following: +** +** + number of columns in table (varint) +** + array of PK flags (1 byte per column), +** + table name (nul terminated). +** +** This function decodes the table-header and populates the p->nCol, +** p->zTab and p->abPK[] variables accordingly. The p->apValue[] array is +** also allocated or resized according to the new value of p->nCol. The +** input pointer is left pointing to the byte following the table header. +** +** If successful, SQLITE_OK is returned. Otherwise, an SQLite error code +** is returned and the final values of the various fields enumerated above +** are undefined. +*/ +static int sessionChangesetReadTblhdr(sqlite3_changeset_iter *p){ + int rc; + int nCopy; + assert( p->rc==SQLITE_OK ); + + rc = sessionChangesetBufferTblhdr(&p->in, &nCopy); + if( rc==SQLITE_OK ){ + int nByte; + int nVarint; + nVarint = sessionVarintGet(&p->in.aData[p->in.iNext], &p->nCol); + nCopy -= nVarint; + p->in.iNext += nVarint; + nByte = p->nCol * sizeof(sqlite3_value*) * 2 + nCopy; + p->tblhdr.nBuf = 0; + sessionBufferGrow(&p->tblhdr, nByte, &rc); + } + + if( rc==SQLITE_OK ){ + int iPK = sizeof(sqlite3_value*)*p->nCol*2; + memset(p->tblhdr.aBuf, 0, iPK); + memcpy(&p->tblhdr.aBuf[iPK], &p->in.aData[p->in.iNext], nCopy); + p->in.iNext += nCopy; + } + + p->apValue = (sqlite3_value**)p->tblhdr.aBuf; + p->abPK = (u8*)&p->apValue[p->nCol*2]; + p->zTab = (char*)&p->abPK[p->nCol]; + return (p->rc = rc); +} + +/* +** Advance the changeset iterator to the next change. +** +** If both paRec and pnRec are NULL, then this function works like the public +** API sqlite3changeset_next(). If SQLITE_ROW is returned, then the +** sqlite3changeset_new() and old() APIs may be used to query for values. +** +** Otherwise, if paRec and pnRec are not NULL, then a pointer to the change +** record is written to *paRec before returning and the number of bytes in +** the record to *pnRec. +** +** Either way, this function returns SQLITE_ROW if the iterator is +** successfully advanced to the next change in the changeset, an SQLite +** error code if an error occurs, or SQLITE_DONE if there are no further +** changes in the changeset. +*/ +static int sessionChangesetNext( + sqlite3_changeset_iter *p, /* Changeset iterator */ + u8 **paRec, /* If non-NULL, store record pointer here */ + int *pnRec /* If non-NULL, store size of record here */ +){ + int i; + u8 op; + + assert( (paRec==0 && pnRec==0) || (paRec && pnRec) ); + + /* If the iterator is in the error-state, return immediately. */ + if( p->rc!=SQLITE_OK ) return p->rc; + + /* Free the current contents of p->apValue[], if any. */ + if( p->apValue ){ + for(i=0; inCol*2; i++){ + sqlite3ValueFree(p->apValue[i]); + } + memset(p->apValue, 0, sizeof(sqlite3_value*)*p->nCol*2); + } + + /* Make sure the buffer contains at least 10 bytes of input data, or all + ** remaining data if there are less than 10 bytes available. This is + ** sufficient either for the 'T' or 'P' byte and the varint that follows + ** it, or for the two single byte values otherwise. */ + p->rc = sessionInputBuffer(&p->in, 2); + if( p->rc!=SQLITE_OK ) return p->rc; + + /* If the iterator is already at the end of the changeset, return DONE. */ + if( p->in.iNext>=p->in.nData ){ + return SQLITE_DONE; + } + + sessionDiscardData(&p->in); + p->in.iCurrent = p->in.iNext; + + op = p->in.aData[p->in.iNext++]; + if( op=='T' || op=='P' ){ + p->bPatchset = (op=='P'); + if( sessionChangesetReadTblhdr(p) ) return p->rc; + if( (p->rc = sessionInputBuffer(&p->in, 2)) ) return p->rc; + p->in.iCurrent = p->in.iNext; + op = p->in.aData[p->in.iNext++]; + } + + p->op = op; + p->bIndirect = p->in.aData[p->in.iNext++]; + if( p->op!=SQLITE_UPDATE && p->op!=SQLITE_DELETE && p->op!=SQLITE_INSERT ){ + return (p->rc = SQLITE_CORRUPT_BKPT); + } + + if( paRec ){ + int nVal; /* Number of values to buffer */ + if( p->bPatchset==0 && op==SQLITE_UPDATE ){ + nVal = p->nCol * 2; + }else if( p->bPatchset && op==SQLITE_DELETE ){ + nVal = 0; + for(i=0; inCol; i++) if( p->abPK[i] ) nVal++; + }else{ + nVal = p->nCol; + } + p->rc = sessionChangesetBufferRecord(&p->in, nVal, pnRec); + if( p->rc!=SQLITE_OK ) return p->rc; + *paRec = &p->in.aData[p->in.iNext]; + p->in.iNext += *pnRec; + }else{ + + /* If this is an UPDATE or DELETE, read the old.* record. */ + if( p->op!=SQLITE_INSERT && (p->bPatchset==0 || p->op==SQLITE_DELETE) ){ + u8 *abPK = p->bPatchset ? p->abPK : 0; + p->rc = sessionReadRecord(&p->in, p->nCol, abPK, p->apValue); + if( p->rc!=SQLITE_OK ) return p->rc; + } + + /* If this is an INSERT or UPDATE, read the new.* record. */ + if( p->op!=SQLITE_DELETE ){ + p->rc = sessionReadRecord(&p->in, p->nCol, 0, &p->apValue[p->nCol]); + if( p->rc!=SQLITE_OK ) return p->rc; + } + + if( p->bPatchset && p->op==SQLITE_UPDATE ){ + /* If this is an UPDATE that is part of a patchset, then all PK and + ** modified fields are present in the new.* record. The old.* record + ** is currently completely empty. This block shifts the PK fields from + ** new.* to old.*, to accommodate the code that reads these arrays. */ + for(i=0; inCol; i++){ + assert( p->apValue[i]==0 ); + assert( p->abPK[i]==0 || p->apValue[i+p->nCol] ); + if( p->abPK[i] ){ + p->apValue[i] = p->apValue[i+p->nCol]; + p->apValue[i+p->nCol] = 0; + } + } + } + } + + return SQLITE_ROW; +} + +/* +** Advance an iterator created by sqlite3changeset_start() to the next +** change in the changeset. This function may return SQLITE_ROW, SQLITE_DONE +** or SQLITE_CORRUPT. +** +** This function may not be called on iterators passed to a conflict handler +** callback by changeset_apply(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_next(sqlite3_changeset_iter *p){ + return sessionChangesetNext(p, 0, 0); +} + +/* +** The following function extracts information on the current change +** from a changeset iterator. It may only be called after changeset_next() +** has returned SQLITE_ROW. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_op( + sqlite3_changeset_iter *pIter, /* Iterator handle */ + const char **pzTab, /* OUT: Pointer to table name */ + int *pnCol, /* OUT: Number of columns in table */ + int *pOp, /* OUT: SQLITE_INSERT, DELETE or UPDATE */ + int *pbIndirect /* OUT: True if change is indirect */ +){ + *pOp = pIter->op; + *pnCol = pIter->nCol; + *pzTab = pIter->zTab; + if( pbIndirect ) *pbIndirect = pIter->bIndirect; + return SQLITE_OK; +} + +/* +** Return information regarding the PRIMARY KEY and number of columns in +** the database table affected by the change that pIter currently points +** to. This function may only be called after changeset_next() returns +** SQLITE_ROW. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_pk( + sqlite3_changeset_iter *pIter, /* Iterator object */ + unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */ + int *pnCol /* OUT: Number of entries in output array */ +){ + *pabPK = pIter->abPK; + if( pnCol ) *pnCol = pIter->nCol; + return SQLITE_OK; +} + +/* +** This function may only be called while the iterator is pointing to an +** SQLITE_UPDATE or SQLITE_DELETE change (see sqlite3changeset_op()). +** Otherwise, SQLITE_MISUSE is returned. +** +** It sets *ppValue to point to an sqlite3_value structure containing the +** iVal'th value in the old.* record. Or, if that particular value is not +** included in the record (because the change is an UPDATE and the field +** was not modified and is not a PK column), set *ppValue to NULL. +** +** If value iVal is out-of-range, SQLITE_RANGE is returned and *ppValue is +** not modified. Otherwise, SQLITE_OK. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_old( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Index of old.* value to retrieve */ + sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */ +){ + if( pIter->op!=SQLITE_UPDATE && pIter->op!=SQLITE_DELETE ){ + return SQLITE_MISUSE; + } + if( iVal<0 || iVal>=pIter->nCol ){ + return SQLITE_RANGE; + } + *ppValue = pIter->apValue[iVal]; + return SQLITE_OK; +} + +/* +** This function may only be called while the iterator is pointing to an +** SQLITE_UPDATE or SQLITE_INSERT change (see sqlite3changeset_op()). +** Otherwise, SQLITE_MISUSE is returned. +** +** It sets *ppValue to point to an sqlite3_value structure containing the +** iVal'th value in the new.* record. Or, if that particular value is not +** included in the record (because the change is an UPDATE and the field +** was not modified), set *ppValue to NULL. +** +** If value iVal is out-of-range, SQLITE_RANGE is returned and *ppValue is +** not modified. Otherwise, SQLITE_OK. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_new( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Index of new.* value to retrieve */ + sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */ +){ + if( pIter->op!=SQLITE_UPDATE && pIter->op!=SQLITE_INSERT ){ + return SQLITE_MISUSE; + } + if( iVal<0 || iVal>=pIter->nCol ){ + return SQLITE_RANGE; + } + *ppValue = pIter->apValue[pIter->nCol+iVal]; + return SQLITE_OK; +} + +/* +** The following two macros are used internally. They are similar to the +** sqlite3changeset_new() and sqlite3changeset_old() functions, except that +** they omit all error checking and return a pointer to the requested value. +*/ +#define sessionChangesetNew(pIter, iVal) (pIter)->apValue[(pIter)->nCol+(iVal)] +#define sessionChangesetOld(pIter, iVal) (pIter)->apValue[(iVal)] + +/* +** This function may only be called with a changeset iterator that has been +** passed to an SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT +** conflict-handler function. Otherwise, SQLITE_MISUSE is returned. +** +** If successful, *ppValue is set to point to an sqlite3_value structure +** containing the iVal'th value of the conflicting record. +** +** If value iVal is out-of-range or some other error occurs, an SQLite error +** code is returned. Otherwise, SQLITE_OK. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_conflict( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Index of conflict record value to fetch */ + sqlite3_value **ppValue /* OUT: Value from conflicting row */ +){ + if( !pIter->pConflict ){ + return SQLITE_MISUSE; + } + if( iVal<0 || iVal>=sqlite3_column_count(pIter->pConflict) ){ + return SQLITE_RANGE; + } + *ppValue = sqlite3_column_value(pIter->pConflict, iVal); + return SQLITE_OK; +} + +/* +** This function may only be called with an iterator passed to an +** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case +** it sets the output variable to the total number of known foreign key +** violations in the destination database and returns SQLITE_OK. +** +** In all other cases this function returns SQLITE_MISUSE. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_fk_conflicts( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int *pnOut /* OUT: Number of FK violations */ +){ + if( pIter->pConflict || pIter->apValue ){ + return SQLITE_MISUSE; + } + *pnOut = pIter->nCol; + return SQLITE_OK; +} + + +/* +** Finalize an iterator allocated with sqlite3changeset_start(). +** +** This function may not be called on iterators passed to a conflict handler +** callback by changeset_apply(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_finalize(sqlite3_changeset_iter *p){ + int rc = SQLITE_OK; + if( p ){ + int i; /* Used to iterate through p->apValue[] */ + rc = p->rc; + if( p->apValue ){ + for(i=0; inCol*2; i++) sqlite3ValueFree(p->apValue[i]); + } + sqlite3_free(p->tblhdr.aBuf); + sqlite3_free(p->in.buf.aBuf); + sqlite3_free(p); + } + return rc; +} + +static int sessionChangesetInvert( + SessionInput *pInput, /* Input changeset */ + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut, + int *pnInverted, /* OUT: Number of bytes in output changeset */ + void **ppInverted /* OUT: Inverse of pChangeset */ +){ + int rc = SQLITE_OK; /* Return value */ + SessionBuffer sOut; /* Output buffer */ + int nCol = 0; /* Number of cols in current table */ + u8 *abPK = 0; /* PK array for current table */ + sqlite3_value **apVal = 0; /* Space for values for UPDATE inversion */ + SessionBuffer sPK = {0, 0, 0}; /* PK array for current table */ + + /* Initialize the output buffer */ + memset(&sOut, 0, sizeof(SessionBuffer)); + + /* Zero the output variables in case an error occurs. */ + if( ppInverted ){ + *ppInverted = 0; + *pnInverted = 0; + } + + while( 1 ){ + u8 eType; + + /* Test for EOF. */ + if( (rc = sessionInputBuffer(pInput, 2)) ) goto finished_invert; + if( pInput->iNext>=pInput->nData ) break; + eType = pInput->aData[pInput->iNext]; + + switch( eType ){ + case 'T': { + /* A 'table' record consists of: + ** + ** * A constant 'T' character, + ** * Number of columns in said table (a varint), + ** * An array of nCol bytes (sPK), + ** * A nul-terminated table name. + */ + int nByte; + int nVar; + pInput->iNext++; + if( (rc = sessionChangesetBufferTblhdr(pInput, &nByte)) ){ + goto finished_invert; + } + nVar = sessionVarintGet(&pInput->aData[pInput->iNext], &nCol); + sPK.nBuf = 0; + sessionAppendBlob(&sPK, &pInput->aData[pInput->iNext+nVar], nCol, &rc); + sessionAppendByte(&sOut, eType, &rc); + sessionAppendBlob(&sOut, &pInput->aData[pInput->iNext], nByte, &rc); + if( rc ) goto finished_invert; + + pInput->iNext += nByte; + sqlite3_free(apVal); + apVal = 0; + abPK = sPK.aBuf; + break; + } + + case SQLITE_INSERT: + case SQLITE_DELETE: { + int nByte; + int bIndirect = pInput->aData[pInput->iNext+1]; + int eType2 = (eType==SQLITE_DELETE ? SQLITE_INSERT : SQLITE_DELETE); + pInput->iNext += 2; + assert( rc==SQLITE_OK ); + rc = sessionChangesetBufferRecord(pInput, nCol, &nByte); + sessionAppendByte(&sOut, eType2, &rc); + sessionAppendByte(&sOut, bIndirect, &rc); + sessionAppendBlob(&sOut, &pInput->aData[pInput->iNext], nByte, &rc); + pInput->iNext += nByte; + if( rc ) goto finished_invert; + break; + } + + case SQLITE_UPDATE: { + int iCol; + + if( 0==apVal ){ + apVal = (sqlite3_value **)sqlite3_malloc(sizeof(apVal[0])*nCol*2); + if( 0==apVal ){ + rc = SQLITE_NOMEM; + goto finished_invert; + } + memset(apVal, 0, sizeof(apVal[0])*nCol*2); + } + + /* Write the header for the new UPDATE change. Same as the original. */ + sessionAppendByte(&sOut, eType, &rc); + sessionAppendByte(&sOut, pInput->aData[pInput->iNext+1], &rc); + + /* Read the old.* and new.* records for the update change. */ + pInput->iNext += 2; + rc = sessionReadRecord(pInput, nCol, 0, &apVal[0]); + if( rc==SQLITE_OK ){ + rc = sessionReadRecord(pInput, nCol, 0, &apVal[nCol]); + } + + /* Write the new old.* record. Consists of the PK columns from the + ** original old.* record, and the other values from the original + ** new.* record. */ + for(iCol=0; iCol=SESSIONS_STRM_CHUNK_SIZE ){ + rc = xOutput(pOut, sOut.aBuf, sOut.nBuf); + sOut.nBuf = 0; + if( rc!=SQLITE_OK ) goto finished_invert; + } + } + + assert( rc==SQLITE_OK ); + if( pnInverted ){ + *pnInverted = sOut.nBuf; + *ppInverted = sOut.aBuf; + sOut.aBuf = 0; + }else if( sOut.nBuf>0 ){ + rc = xOutput(pOut, sOut.aBuf, sOut.nBuf); + } + + finished_invert: + sqlite3_free(sOut.aBuf); + sqlite3_free(apVal); + sqlite3_free(sPK.aBuf); + return rc; +} + + +/* +** Invert a changeset object. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_invert( + int nChangeset, /* Number of bytes in input */ + const void *pChangeset, /* Input changeset */ + int *pnInverted, /* OUT: Number of bytes in output changeset */ + void **ppInverted /* OUT: Inverse of pChangeset */ +){ + SessionInput sInput; + + /* Set up the input stream */ + memset(&sInput, 0, sizeof(SessionInput)); + sInput.nData = nChangeset; + sInput.aData = (u8*)pChangeset; + + return sessionChangesetInvert(&sInput, 0, 0, pnInverted, ppInverted); +} + +/* +** Streaming version of sqlite3changeset_invert(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_invert_strm( + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +){ + SessionInput sInput; + int rc; + + /* Set up the input stream */ + memset(&sInput, 0, sizeof(SessionInput)); + sInput.xInput = xInput; + sInput.pIn = pIn; + + rc = sessionChangesetInvert(&sInput, xOutput, pOut, 0, 0); + sqlite3_free(sInput.buf.aBuf); + return rc; +} + +typedef struct SessionApplyCtx SessionApplyCtx; +struct SessionApplyCtx { + sqlite3 *db; + sqlite3_stmt *pDelete; /* DELETE statement */ + sqlite3_stmt *pUpdate; /* UPDATE statement */ + sqlite3_stmt *pInsert; /* INSERT statement */ + sqlite3_stmt *pSelect; /* SELECT statement */ + int nCol; /* Size of azCol[] and abPK[] arrays */ + const char **azCol; /* Array of column names */ + u8 *abPK; /* Boolean array - true if column is in PK */ + + int bDeferConstraints; /* True to defer constraints */ + SessionBuffer constraints; /* Deferred constraints are stored here */ +}; + +/* +** Formulate a statement to DELETE a row from database db. Assuming a table +** structure like this: +** +** CREATE TABLE x(a, b, c, d, PRIMARY KEY(a, c)); +** +** The DELETE statement looks like this: +** +** DELETE FROM x WHERE a = :1 AND c = :3 AND (:5 OR b IS :2 AND d IS :4) +** +** Variable :5 (nCol+1) is a boolean. It should be set to 0 if we require +** matching b and d values, or 1 otherwise. The second case comes up if the +** conflict handler is invoked with NOTFOUND and returns CHANGESET_REPLACE. +** +** If successful, SQLITE_OK is returned and SessionApplyCtx.pDelete is left +** pointing to the prepared version of the SQL statement. +*/ +static int sessionDeleteRow( + sqlite3 *db, /* Database handle */ + const char *zTab, /* Table name */ + SessionApplyCtx *p /* Session changeset-apply context */ +){ + int i; + const char *zSep = ""; + int rc = SQLITE_OK; + SessionBuffer buf = {0, 0, 0}; + int nPk = 0; + + sessionAppendStr(&buf, "DELETE FROM ", &rc); + sessionAppendIdent(&buf, zTab, &rc); + sessionAppendStr(&buf, " WHERE ", &rc); + + for(i=0; inCol; i++){ + if( p->abPK[i] ){ + nPk++; + sessionAppendStr(&buf, zSep, &rc); + sessionAppendIdent(&buf, p->azCol[i], &rc); + sessionAppendStr(&buf, " = ?", &rc); + sessionAppendInteger(&buf, i+1, &rc); + zSep = " AND "; + } + } + + if( nPknCol ){ + sessionAppendStr(&buf, " AND (?", &rc); + sessionAppendInteger(&buf, p->nCol+1, &rc); + sessionAppendStr(&buf, " OR ", &rc); + + zSep = ""; + for(i=0; inCol; i++){ + if( !p->abPK[i] ){ + sessionAppendStr(&buf, zSep, &rc); + sessionAppendIdent(&buf, p->azCol[i], &rc); + sessionAppendStr(&buf, " IS ?", &rc); + sessionAppendInteger(&buf, i+1, &rc); + zSep = "AND "; + } + } + sessionAppendStr(&buf, ")", &rc); + } + + if( rc==SQLITE_OK ){ + rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pDelete, 0); + } + sqlite3_free(buf.aBuf); + + return rc; +} + +/* +** Formulate and prepare a statement to UPDATE a row from database db. +** Assuming a table structure like this: +** +** CREATE TABLE x(a, b, c, d, PRIMARY KEY(a, c)); +** +** The UPDATE statement looks like this: +** +** UPDATE x SET +** a = CASE WHEN ?2 THEN ?3 ELSE a END, +** b = CASE WHEN ?5 THEN ?6 ELSE b END, +** c = CASE WHEN ?8 THEN ?9 ELSE c END, +** d = CASE WHEN ?11 THEN ?12 ELSE d END +** WHERE a = ?1 AND c = ?7 AND (?13 OR +** (?5==0 OR b IS ?4) AND (?11==0 OR d IS ?10) AND +** ) +** +** For each column in the table, there are three variables to bind: +** +** ?(i*3+1) The old.* value of the column, if any. +** ?(i*3+2) A boolean flag indicating that the value is being modified. +** ?(i*3+3) The new.* value of the column, if any. +** +** Also, a boolean flag that, if set to true, causes the statement to update +** a row even if the non-PK values do not match. This is required if the +** conflict-handler is invoked with CHANGESET_DATA and returns +** CHANGESET_REPLACE. This is variable "?(nCol*3+1)". +** +** If successful, SQLITE_OK is returned and SessionApplyCtx.pUpdate is left +** pointing to the prepared version of the SQL statement. +*/ +static int sessionUpdateRow( + sqlite3 *db, /* Database handle */ + const char *zTab, /* Table name */ + SessionApplyCtx *p /* Session changeset-apply context */ +){ + int rc = SQLITE_OK; + int i; + const char *zSep = ""; + SessionBuffer buf = {0, 0, 0}; + + /* Append "UPDATE tbl SET " */ + sessionAppendStr(&buf, "UPDATE ", &rc); + sessionAppendIdent(&buf, zTab, &rc); + sessionAppendStr(&buf, " SET ", &rc); + + /* Append the assignments */ + for(i=0; inCol; i++){ + sessionAppendStr(&buf, zSep, &rc); + sessionAppendIdent(&buf, p->azCol[i], &rc); + sessionAppendStr(&buf, " = CASE WHEN ?", &rc); + sessionAppendInteger(&buf, i*3+2, &rc); + sessionAppendStr(&buf, " THEN ?", &rc); + sessionAppendInteger(&buf, i*3+3, &rc); + sessionAppendStr(&buf, " ELSE ", &rc); + sessionAppendIdent(&buf, p->azCol[i], &rc); + sessionAppendStr(&buf, " END", &rc); + zSep = ", "; + } + + /* Append the PK part of the WHERE clause */ + sessionAppendStr(&buf, " WHERE ", &rc); + for(i=0; inCol; i++){ + if( p->abPK[i] ){ + sessionAppendIdent(&buf, p->azCol[i], &rc); + sessionAppendStr(&buf, " = ?", &rc); + sessionAppendInteger(&buf, i*3+1, &rc); + sessionAppendStr(&buf, " AND ", &rc); + } + } + + /* Append the non-PK part of the WHERE clause */ + sessionAppendStr(&buf, " (?", &rc); + sessionAppendInteger(&buf, p->nCol*3+1, &rc); + sessionAppendStr(&buf, " OR 1", &rc); + for(i=0; inCol; i++){ + if( !p->abPK[i] ){ + sessionAppendStr(&buf, " AND (?", &rc); + sessionAppendInteger(&buf, i*3+2, &rc); + sessionAppendStr(&buf, "=0 OR ", &rc); + sessionAppendIdent(&buf, p->azCol[i], &rc); + sessionAppendStr(&buf, " IS ?", &rc); + sessionAppendInteger(&buf, i*3+1, &rc); + sessionAppendStr(&buf, ")", &rc); + } + } + sessionAppendStr(&buf, ")", &rc); + + if( rc==SQLITE_OK ){ + rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pUpdate, 0); + } + sqlite3_free(buf.aBuf); + + return rc; +} + +/* +** Formulate and prepare an SQL statement to query table zTab by primary +** key. Assuming the following table structure: +** +** CREATE TABLE x(a, b, c, d, PRIMARY KEY(a, c)); +** +** The SELECT statement looks like this: +** +** SELECT * FROM x WHERE a = ?1 AND c = ?3 +** +** If successful, SQLITE_OK is returned and SessionApplyCtx.pSelect is left +** pointing to the prepared version of the SQL statement. +*/ +static int sessionSelectRow( + sqlite3 *db, /* Database handle */ + const char *zTab, /* Table name */ + SessionApplyCtx *p /* Session changeset-apply context */ +){ + return sessionSelectStmt( + db, "main", zTab, p->nCol, p->azCol, p->abPK, &p->pSelect); +} + +/* +** Formulate and prepare an INSERT statement to add a record to table zTab. +** For example: +** +** INSERT INTO main."zTab" VALUES(?1, ?2, ?3 ...); +** +** If successful, SQLITE_OK is returned and SessionApplyCtx.pInsert is left +** pointing to the prepared version of the SQL statement. +*/ +static int sessionInsertRow( + sqlite3 *db, /* Database handle */ + const char *zTab, /* Table name */ + SessionApplyCtx *p /* Session changeset-apply context */ +){ + int rc = SQLITE_OK; + int i; + SessionBuffer buf = {0, 0, 0}; + + sessionAppendStr(&buf, "INSERT INTO main.", &rc); + sessionAppendIdent(&buf, zTab, &rc); + sessionAppendStr(&buf, " VALUES(?", &rc); + for(i=1; inCol; i++){ + sessionAppendStr(&buf, ", ?", &rc); + } + sessionAppendStr(&buf, ")", &rc); + + if( rc==SQLITE_OK ){ + rc = sqlite3_prepare_v2(db, (char *)buf.aBuf, buf.nBuf, &p->pInsert, 0); + } + sqlite3_free(buf.aBuf); + return rc; +} + +/* +** A wrapper around sqlite3_bind_value() that detects an extra problem. +** See comments in the body of this function for details. +*/ +static int sessionBindValue( + sqlite3_stmt *pStmt, /* Statement to bind value to */ + int i, /* Parameter number to bind to */ + sqlite3_value *pVal /* Value to bind */ +){ + int eType = sqlite3_value_type(pVal); + /* COVERAGE: The (pVal->z==0) branch is never true using current versions + ** of SQLite. If a malloc fails in an sqlite3_value_xxx() function, either + ** the (pVal->z) variable remains as it was or the type of the value is + ** set to SQLITE_NULL. */ + if( (eType==SQLITE_TEXT || eType==SQLITE_BLOB) && pVal->z==0 ){ + /* This condition occurs when an earlier OOM in a call to + ** sqlite3_value_text() or sqlite3_value_blob() (perhaps from within + ** a conflict-handler) has zeroed the pVal->z pointer. Return NOMEM. */ + return SQLITE_NOMEM; + } + return sqlite3_bind_value(pStmt, i, pVal); +} + +/* +** Iterator pIter must point to an SQLITE_INSERT entry. This function +** transfers new.* values from the current iterator entry to statement +** pStmt. The table being inserted into has nCol columns. +** +** New.* value $i from the iterator is bound to variable ($i+1) of +** statement pStmt. If parameter abPK is NULL, all values from 0 to (nCol-1) +** are transfered to the statement. Otherwise, if abPK is not NULL, it points +** to an array nCol elements in size. In this case only those values for +** which abPK[$i] is true are read from the iterator and bound to the +** statement. +** +** An SQLite error code is returned if an error occurs. Otherwise, SQLITE_OK. +*/ +static int sessionBindRow( + sqlite3_changeset_iter *pIter, /* Iterator to read values from */ + int(*xValue)(sqlite3_changeset_iter *, int, sqlite3_value **), + int nCol, /* Number of columns */ + u8 *abPK, /* If not NULL, bind only if true */ + sqlite3_stmt *pStmt /* Bind values to this statement */ +){ + int i; + int rc = SQLITE_OK; + + /* Neither sqlite3changeset_old or sqlite3changeset_new can fail if the + ** argument iterator points to a suitable entry. Make sure that xValue + ** is one of these to guarantee that it is safe to ignore the return + ** in the code below. */ + assert( xValue==sqlite3changeset_old || xValue==sqlite3changeset_new ); + + for(i=0; rc==SQLITE_OK && idb, pIter, p->abPK, p->pSelect); + }else{ + rc = SQLITE_OK; + } + + if( rc==SQLITE_ROW ){ + /* There exists another row with the new.* primary key. */ + pIter->pConflict = p->pSelect; + res = xConflict(pCtx, eType, pIter); + pIter->pConflict = 0; + rc = sqlite3_reset(p->pSelect); + }else if( rc==SQLITE_OK ){ + if( p->bDeferConstraints && eType==SQLITE_CHANGESET_CONFLICT ){ + /* Instead of invoking the conflict handler, append the change blob + ** to the SessionApplyCtx.constraints buffer. */ + u8 *aBlob = &pIter->in.aData[pIter->in.iCurrent]; + int nBlob = pIter->in.iNext - pIter->in.iCurrent; + sessionAppendBlob(&p->constraints, aBlob, nBlob, &rc); + res = SQLITE_CHANGESET_OMIT; + }else{ + /* No other row with the new.* primary key. */ + res = xConflict(pCtx, eType+1, pIter); + if( res==SQLITE_CHANGESET_REPLACE ) rc = SQLITE_MISUSE; + } + } + + if( rc==SQLITE_OK ){ + switch( res ){ + case SQLITE_CHANGESET_REPLACE: + assert( pbReplace ); + *pbReplace = 1; + break; + + case SQLITE_CHANGESET_OMIT: + break; + + case SQLITE_CHANGESET_ABORT: + rc = SQLITE_ABORT; + break; + + default: + rc = SQLITE_MISUSE; + break; + } + } + + return rc; +} + +/* +** Attempt to apply the change that the iterator passed as the first argument +** currently points to to the database. If a conflict is encountered, invoke +** the conflict handler callback. +** +** If argument pbRetry is NULL, then ignore any CHANGESET_DATA conflict. If +** one is encountered, update or delete the row with the matching primary key +** instead. Or, if pbRetry is not NULL and a CHANGESET_DATA conflict occurs, +** invoke the conflict handler. If it returns CHANGESET_REPLACE, set *pbRetry +** to true before returning. In this case the caller will invoke this function +** again, this time with pbRetry set to NULL. +** +** If argument pbReplace is NULL and a CHANGESET_CONFLICT conflict is +** encountered invoke the conflict handler with CHANGESET_CONSTRAINT instead. +** Or, if pbReplace is not NULL, invoke it with CHANGESET_CONFLICT. If such +** an invocation returns SQLITE_CHANGESET_REPLACE, set *pbReplace to true +** before retrying. In this case the caller attempts to remove the conflicting +** row before invoking this function again, this time with pbReplace set +** to NULL. +** +** If any conflict handler returns SQLITE_CHANGESET_ABORT, this function +** returns SQLITE_ABORT. Otherwise, if no error occurs, SQLITE_OK is +** returned. +*/ +static int sessionApplyOneOp( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + SessionApplyCtx *p, /* changeset_apply() context */ + int(*xConflict)(void *, int, sqlite3_changeset_iter *), + void *pCtx, /* First argument for the conflict handler */ + int *pbReplace, /* OUT: True to remove PK row and retry */ + int *pbRetry /* OUT: True to retry. */ +){ + const char *zDummy; + int op; + int nCol; + int rc = SQLITE_OK; + + assert( p->pDelete && p->pUpdate && p->pInsert && p->pSelect ); + assert( p->azCol && p->abPK ); + assert( !pbReplace || *pbReplace==0 ); + + sqlite3changeset_op(pIter, &zDummy, &nCol, &op, 0); + + if( op==SQLITE_DELETE ){ + + /* Bind values to the DELETE statement. If conflict handling is required, + ** bind values for all columns and set bound variable (nCol+1) to true. + ** Or, if conflict handling is not required, bind just the PK column + ** values and, if it exists, set (nCol+1) to false. Conflict handling + ** is not required if: + ** + ** * this is a patchset, or + ** * (pbRetry==0), or + ** * all columns of the table are PK columns (in this case there is + ** no (nCol+1) variable to bind to). + */ + u8 *abPK = (pIter->bPatchset ? p->abPK : 0); + rc = sessionBindRow(pIter, sqlite3changeset_old, nCol, abPK, p->pDelete); + if( rc==SQLITE_OK && sqlite3_bind_parameter_count(p->pDelete)>nCol ){ + rc = sqlite3_bind_int(p->pDelete, nCol+1, (pbRetry==0 || abPK)); + } + if( rc!=SQLITE_OK ) return rc; + + sqlite3_step(p->pDelete); + rc = sqlite3_reset(p->pDelete); + if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){ + rc = sessionConflictHandler( + SQLITE_CHANGESET_DATA, p, pIter, xConflict, pCtx, pbRetry + ); + }else if( (rc&0xff)==SQLITE_CONSTRAINT ){ + rc = sessionConflictHandler( + SQLITE_CHANGESET_CONFLICT, p, pIter, xConflict, pCtx, 0 + ); + } + + }else if( op==SQLITE_UPDATE ){ + int i; + + /* Bind values to the UPDATE statement. */ + for(i=0; rc==SQLITE_OK && ipUpdate, i*3+2, !!pNew); + if( pOld ){ + rc = sessionBindValue(p->pUpdate, i*3+1, pOld); + } + if( rc==SQLITE_OK && pNew ){ + rc = sessionBindValue(p->pUpdate, i*3+3, pNew); + } + } + if( rc==SQLITE_OK ){ + sqlite3_bind_int(p->pUpdate, nCol*3+1, pbRetry==0 || pIter->bPatchset); + } + if( rc!=SQLITE_OK ) return rc; + + /* Attempt the UPDATE. In the case of a NOTFOUND or DATA conflict, + ** the result will be SQLITE_OK with 0 rows modified. */ + sqlite3_step(p->pUpdate); + rc = sqlite3_reset(p->pUpdate); + + if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){ + /* A NOTFOUND or DATA error. Search the table to see if it contains + ** a row with a matching primary key. If so, this is a DATA conflict. + ** Otherwise, if there is no primary key match, it is a NOTFOUND. */ + + rc = sessionConflictHandler( + SQLITE_CHANGESET_DATA, p, pIter, xConflict, pCtx, pbRetry + ); + + }else if( (rc&0xff)==SQLITE_CONSTRAINT ){ + /* This is always a CONSTRAINT conflict. */ + rc = sessionConflictHandler( + SQLITE_CHANGESET_CONFLICT, p, pIter, xConflict, pCtx, 0 + ); + } + + }else{ + assert( op==SQLITE_INSERT ); + rc = sessionBindRow(pIter, sqlite3changeset_new, nCol, 0, p->pInsert); + if( rc!=SQLITE_OK ) return rc; + + sqlite3_step(p->pInsert); + rc = sqlite3_reset(p->pInsert); + if( (rc&0xff)==SQLITE_CONSTRAINT ){ + rc = sessionConflictHandler( + SQLITE_CHANGESET_CONFLICT, p, pIter, xConflict, pCtx, pbReplace + ); + } + } + + return rc; +} + +/* +** Attempt to apply the change that the iterator passed as the first argument +** currently points to to the database. If a conflict is encountered, invoke +** the conflict handler callback. +** +** The difference between this function and sessionApplyOne() is that this +** function handles the case where the conflict-handler is invoked and +** returns SQLITE_CHANGESET_REPLACE - indicating that the change should be +** retried in some manner. +*/ +static int sessionApplyOneWithRetry( + sqlite3 *db, /* Apply change to "main" db of this handle */ + sqlite3_changeset_iter *pIter, /* Changeset iterator to read change from */ + SessionApplyCtx *pApply, /* Apply context */ + int(*xConflict)(void*, int, sqlite3_changeset_iter*), + void *pCtx /* First argument passed to xConflict */ +){ + int bReplace = 0; + int bRetry = 0; + int rc; + + rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, &bReplace, &bRetry); + assert( rc==SQLITE_OK || (bRetry==0 && bReplace==0) ); + + /* If the bRetry flag is set, the change has not been applied due to an + ** SQLITE_CHANGESET_DATA problem (i.e. this is an UPDATE or DELETE and + ** a row with the correct PK is present in the db, but one or more other + ** fields do not contain the expected values) and the conflict handler + ** returned SQLITE_CHANGESET_REPLACE. In this case retry the operation, + ** but pass NULL as the final argument so that sessionApplyOneOp() ignores + ** the SQLITE_CHANGESET_DATA problem. */ + if( bRetry ){ + assert( pIter->op==SQLITE_UPDATE || pIter->op==SQLITE_DELETE ); + rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, 0, 0); + } + + /* If the bReplace flag is set, the change is an INSERT that has not + ** been performed because the database already contains a row with the + ** specified primary key and the conflict handler returned + ** SQLITE_CHANGESET_REPLACE. In this case remove the conflicting row + ** before reattempting the INSERT. */ + else if( bReplace ){ + assert( pIter->op==SQLITE_INSERT ); + rc = sqlite3_exec(db, "SAVEPOINT replace_op", 0, 0, 0); + if( rc==SQLITE_OK ){ + rc = sessionBindRow(pIter, + sqlite3changeset_new, pApply->nCol, pApply->abPK, pApply->pDelete); + sqlite3_bind_int(pApply->pDelete, pApply->nCol+1, 1); + } + if( rc==SQLITE_OK ){ + sqlite3_step(pApply->pDelete); + rc = sqlite3_reset(pApply->pDelete); + } + if( rc==SQLITE_OK ){ + rc = sessionApplyOneOp(pIter, pApply, xConflict, pCtx, 0, 0); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_exec(db, "RELEASE replace_op", 0, 0, 0); + } + } + + return rc; +} + +/* +** Retry the changes accumulated in the pApply->constraints buffer. +*/ +static int sessionRetryConstraints( + sqlite3 *db, + int bPatchset, + const char *zTab, + SessionApplyCtx *pApply, + int(*xConflict)(void*, int, sqlite3_changeset_iter*), + void *pCtx /* First argument passed to xConflict */ +){ + int rc = SQLITE_OK; + + while( pApply->constraints.nBuf ){ + sqlite3_changeset_iter *pIter2 = 0; + SessionBuffer cons = pApply->constraints; + memset(&pApply->constraints, 0, sizeof(SessionBuffer)); + + rc = sessionChangesetStart(&pIter2, 0, 0, cons.nBuf, cons.aBuf); + if( rc==SQLITE_OK ){ + int nByte = 2*pApply->nCol*sizeof(sqlite3_value*); + int rc2; + pIter2->bPatchset = bPatchset; + pIter2->zTab = (char*)zTab; + pIter2->nCol = pApply->nCol; + pIter2->abPK = pApply->abPK; + sessionBufferGrow(&pIter2->tblhdr, nByte, &rc); + pIter2->apValue = (sqlite3_value**)pIter2->tblhdr.aBuf; + if( rc==SQLITE_OK ) memset(pIter2->apValue, 0, nByte); + + while( rc==SQLITE_OK && SQLITE_ROW==sqlite3changeset_next(pIter2) ){ + rc = sessionApplyOneWithRetry(db, pIter2, pApply, xConflict, pCtx); + } + + rc2 = sqlite3changeset_finalize(pIter2); + if( rc==SQLITE_OK ) rc = rc2; + } + assert( pApply->bDeferConstraints || pApply->constraints.nBuf==0 ); + + sqlite3_free(cons.aBuf); + if( rc!=SQLITE_OK ) break; + if( pApply->constraints.nBuf>=cons.nBuf ){ + /* No progress was made on the last round. */ + pApply->bDeferConstraints = 0; + } + } + + return rc; +} + +/* +** Argument pIter is a changeset iterator that has been initialized, but +** not yet passed to sqlite3changeset_next(). This function applies the +** changeset to the main database attached to handle "db". The supplied +** conflict handler callback is invoked to resolve any conflicts encountered +** while applying the change. +*/ +static int sessionChangesetApply( + sqlite3 *db, /* Apply change to "main" db of this handle */ + sqlite3_changeset_iter *pIter, /* Changeset to apply */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of fifth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +){ + int schemaMismatch = 0; + int rc; /* Return code */ + const char *zTab = 0; /* Name of current table */ + int nTab = 0; /* Result of sqlite3Strlen30(zTab) */ + SessionApplyCtx sApply; /* changeset_apply() context object */ + int bPatchset; + + assert( xConflict!=0 ); + + pIter->in.bNoDiscard = 1; + memset(&sApply, 0, sizeof(sApply)); + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + rc = sqlite3_exec(db, "SAVEPOINT changeset_apply", 0, 0, 0); + if( rc==SQLITE_OK ){ + rc = sqlite3_exec(db, "PRAGMA defer_foreign_keys = 1", 0, 0, 0); + } + while( rc==SQLITE_OK && SQLITE_ROW==sqlite3changeset_next(pIter) ){ + int nCol; + int op; + const char *zNew; + + sqlite3changeset_op(pIter, &zNew, &nCol, &op, 0); + + if( zTab==0 || sqlite3_strnicmp(zNew, zTab, nTab+1) ){ + u8 *abPK; + + rc = sessionRetryConstraints( + db, pIter->bPatchset, zTab, &sApply, xConflict, pCtx + ); + if( rc!=SQLITE_OK ) break; + + sqlite3_free((char*)sApply.azCol); /* cast works around VC++ bug */ + sqlite3_finalize(sApply.pDelete); + sqlite3_finalize(sApply.pUpdate); + sqlite3_finalize(sApply.pInsert); + sqlite3_finalize(sApply.pSelect); + memset(&sApply, 0, sizeof(sApply)); + sApply.db = db; + sApply.bDeferConstraints = 1; + + /* If an xFilter() callback was specified, invoke it now. If the + ** xFilter callback returns zero, skip this table. If it returns + ** non-zero, proceed. */ + schemaMismatch = (xFilter && (0==xFilter(pCtx, zNew))); + if( schemaMismatch ){ + zTab = sqlite3_mprintf("%s", zNew); + if( zTab==0 ){ + rc = SQLITE_NOMEM; + break; + } + nTab = (int)strlen(zTab); + sApply.azCol = (const char **)zTab; + }else{ + sqlite3changeset_pk(pIter, &abPK, 0); + rc = sessionTableInfo( + db, "main", zNew, &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK + ); + if( rc!=SQLITE_OK ) break; + + if( sApply.nCol==0 ){ + schemaMismatch = 1; + sqlite3_log(SQLITE_SCHEMA, + "sqlite3changeset_apply(): no such table: %s", zTab + ); + } + else if( sApply.nCol!=nCol ){ + schemaMismatch = 1; + sqlite3_log(SQLITE_SCHEMA, + "sqlite3changeset_apply(): table %s has %d columns, expected %d", + zTab, sApply.nCol, nCol + ); + } + else if( memcmp(sApply.abPK, abPK, nCol)!=0 ){ + schemaMismatch = 1; + sqlite3_log(SQLITE_SCHEMA, "sqlite3changeset_apply(): " + "primary key mismatch for table %s", zTab + ); + } + else if( + (rc = sessionSelectRow(db, zTab, &sApply)) + || (rc = sessionUpdateRow(db, zTab, &sApply)) + || (rc = sessionDeleteRow(db, zTab, &sApply)) + || (rc = sessionInsertRow(db, zTab, &sApply)) + ){ + break; + } + nTab = sqlite3Strlen30(zTab); + } + } + + /* If there is a schema mismatch on the current table, proceed to the + ** next change. A log message has already been issued. */ + if( schemaMismatch ) continue; + + rc = sessionApplyOneWithRetry(db, pIter, &sApply, xConflict, pCtx); + } + + bPatchset = pIter->bPatchset; + if( rc==SQLITE_OK ){ + rc = sqlite3changeset_finalize(pIter); + }else{ + sqlite3changeset_finalize(pIter); + } + + if( rc==SQLITE_OK ){ + rc = sessionRetryConstraints(db, bPatchset, zTab, &sApply, xConflict, pCtx); + } + + if( rc==SQLITE_OK ){ + int nFk, notUsed; + sqlite3_db_status(db, SQLITE_DBSTATUS_DEFERRED_FKS, &nFk, ¬Used, 0); + if( nFk!=0 ){ + int res = SQLITE_CHANGESET_ABORT; + sqlite3_changeset_iter sIter; + memset(&sIter, 0, sizeof(sIter)); + sIter.nCol = nFk; + res = xConflict(pCtx, SQLITE_CHANGESET_FOREIGN_KEY, &sIter); + if( res!=SQLITE_CHANGESET_OMIT ){ + rc = SQLITE_CONSTRAINT; + } + } + } + sqlite3_exec(db, "PRAGMA defer_foreign_keys = 0", 0, 0, 0); + + if( rc==SQLITE_OK ){ + rc = sqlite3_exec(db, "RELEASE changeset_apply", 0, 0, 0); + }else{ + sqlite3_exec(db, "ROLLBACK TO changeset_apply", 0, 0, 0); + sqlite3_exec(db, "RELEASE changeset_apply", 0, 0, 0); + } + + sqlite3_finalize(sApply.pInsert); + sqlite3_finalize(sApply.pDelete); + sqlite3_finalize(sApply.pUpdate); + sqlite3_finalize(sApply.pSelect); + sqlite3_free((char*)sApply.azCol); /* cast works around VC++ bug */ + sqlite3_free((char*)sApply.constraints.aBuf); + sqlite3_mutex_leave(sqlite3_db_mutex(db)); + return rc; +} + +/* +** Apply the changeset passed via pChangeset/nChangeset to the main database +** attached to handle "db". Invoke the supplied conflict handler callback +** to resolve any conflicts encountered while applying the change. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_apply( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of fifth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +){ + sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ + int rc = sqlite3changeset_start(&pIter, nChangeset, pChangeset); + if( rc==SQLITE_OK ){ + rc = sessionChangesetApply(db, pIter, xFilter, xConflict, pCtx); + } + return rc; +} + +/* +** Apply the changeset passed via xInput/pIn to the main database +** attached to handle "db". Invoke the supplied conflict handler callback +** to resolve any conflicts encountered while applying the change. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_apply_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +){ + sqlite3_changeset_iter *pIter; /* Iterator to skip through changeset */ + int rc = sqlite3changeset_start_strm(&pIter, xInput, pIn); + if( rc==SQLITE_OK ){ + rc = sessionChangesetApply(db, pIter, xFilter, xConflict, pCtx); + } + return rc; +} + +/* +** sqlite3_changegroup handle. +*/ +struct sqlite3_changegroup { + int rc; /* Error code */ + int bPatch; /* True to accumulate patchsets */ + SessionTable *pList; /* List of tables in current patch */ +}; + +/* +** This function is called to merge two changes to the same row together as +** part of an sqlite3changeset_concat() operation. A new change object is +** allocated and a pointer to it stored in *ppNew. +*/ +static int sessionChangeMerge( + SessionTable *pTab, /* Table structure */ + int bPatchset, /* True for patchsets */ + SessionChange *pExist, /* Existing change */ + int op2, /* Second change operation */ + int bIndirect, /* True if second change is indirect */ + u8 *aRec, /* Second change record */ + int nRec, /* Number of bytes in aRec */ + SessionChange **ppNew /* OUT: Merged change */ +){ + SessionChange *pNew = 0; + + if( !pExist ){ + pNew = (SessionChange *)sqlite3_malloc(sizeof(SessionChange) + nRec); + if( !pNew ){ + return SQLITE_NOMEM; + } + memset(pNew, 0, sizeof(SessionChange)); + pNew->op = op2; + pNew->bIndirect = bIndirect; + pNew->nRecord = nRec; + pNew->aRecord = (u8*)&pNew[1]; + memcpy(pNew->aRecord, aRec, nRec); + }else{ + int op1 = pExist->op; + + /* + ** op1=INSERT, op2=INSERT -> Unsupported. Discard op2. + ** op1=INSERT, op2=UPDATE -> INSERT. + ** op1=INSERT, op2=DELETE -> (none) + ** + ** op1=UPDATE, op2=INSERT -> Unsupported. Discard op2. + ** op1=UPDATE, op2=UPDATE -> UPDATE. + ** op1=UPDATE, op2=DELETE -> DELETE. + ** + ** op1=DELETE, op2=INSERT -> UPDATE. + ** op1=DELETE, op2=UPDATE -> Unsupported. Discard op2. + ** op1=DELETE, op2=DELETE -> Unsupported. Discard op2. + */ + if( (op1==SQLITE_INSERT && op2==SQLITE_INSERT) + || (op1==SQLITE_UPDATE && op2==SQLITE_INSERT) + || (op1==SQLITE_DELETE && op2==SQLITE_UPDATE) + || (op1==SQLITE_DELETE && op2==SQLITE_DELETE) + ){ + pNew = pExist; + }else if( op1==SQLITE_INSERT && op2==SQLITE_DELETE ){ + sqlite3_free(pExist); + assert( pNew==0 ); + }else{ + u8 *aExist = pExist->aRecord; + int nByte; + u8 *aCsr; + + /* Allocate a new SessionChange object. Ensure that the aRecord[] + ** buffer of the new object is large enough to hold any record that + ** may be generated by combining the input records. */ + nByte = sizeof(SessionChange) + pExist->nRecord + nRec; + pNew = (SessionChange *)sqlite3_malloc(nByte); + if( !pNew ){ + sqlite3_free(pExist); + return SQLITE_NOMEM; + } + memset(pNew, 0, sizeof(SessionChange)); + pNew->bIndirect = (bIndirect && pExist->bIndirect); + aCsr = pNew->aRecord = (u8 *)&pNew[1]; + + if( op1==SQLITE_INSERT ){ /* INSERT + UPDATE */ + u8 *a1 = aRec; + assert( op2==SQLITE_UPDATE ); + pNew->op = SQLITE_INSERT; + if( bPatchset==0 ) sessionSkipRecord(&a1, pTab->nCol); + sessionMergeRecord(&aCsr, pTab->nCol, aExist, a1); + }else if( op1==SQLITE_DELETE ){ /* DELETE + INSERT */ + assert( op2==SQLITE_INSERT ); + pNew->op = SQLITE_UPDATE; + if( bPatchset ){ + memcpy(aCsr, aRec, nRec); + aCsr += nRec; + }else{ + if( 0==sessionMergeUpdate(&aCsr, pTab, bPatchset, aExist, 0,aRec,0) ){ + sqlite3_free(pNew); + pNew = 0; + } + } + }else if( op2==SQLITE_UPDATE ){ /* UPDATE + UPDATE */ + u8 *a1 = aExist; + u8 *a2 = aRec; + assert( op1==SQLITE_UPDATE ); + if( bPatchset==0 ){ + sessionSkipRecord(&a1, pTab->nCol); + sessionSkipRecord(&a2, pTab->nCol); + } + pNew->op = SQLITE_UPDATE; + if( 0==sessionMergeUpdate(&aCsr, pTab, bPatchset, aRec, aExist,a1,a2) ){ + sqlite3_free(pNew); + pNew = 0; + } + }else{ /* UPDATE + DELETE */ + assert( op1==SQLITE_UPDATE && op2==SQLITE_DELETE ); + pNew->op = SQLITE_DELETE; + if( bPatchset ){ + memcpy(aCsr, aRec, nRec); + aCsr += nRec; + }else{ + sessionMergeRecord(&aCsr, pTab->nCol, aRec, aExist); + } + } + + if( pNew ){ + pNew->nRecord = (int)(aCsr - pNew->aRecord); + } + sqlite3_free(pExist); + } + } + + *ppNew = pNew; + return SQLITE_OK; +} + +/* +** Add all changes in the changeset traversed by the iterator passed as +** the first argument to the changegroup hash tables. +*/ +static int sessionChangesetToHash( + sqlite3_changeset_iter *pIter, /* Iterator to read from */ + sqlite3_changegroup *pGrp /* Changegroup object to add changeset to */ +){ + u8 *aRec; + int nRec; + int rc = SQLITE_OK; + SessionTable *pTab = 0; + + + while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec) ){ + const char *zNew; + int nCol; + int op; + int iHash; + int bIndirect; + SessionChange *pChange; + SessionChange *pExist = 0; + SessionChange **pp; + + if( pGrp->pList==0 ){ + pGrp->bPatch = pIter->bPatchset; + }else if( pIter->bPatchset!=pGrp->bPatch ){ + rc = SQLITE_ERROR; + break; + } + + sqlite3changeset_op(pIter, &zNew, &nCol, &op, &bIndirect); + if( !pTab || sqlite3_stricmp(zNew, pTab->zName) ){ + /* Search the list for a matching table */ + int nNew = (int)strlen(zNew); + u8 *abPK; + + sqlite3changeset_pk(pIter, &abPK, 0); + for(pTab = pGrp->pList; pTab; pTab=pTab->pNext){ + if( 0==sqlite3_strnicmp(pTab->zName, zNew, nNew+1) ) break; + } + if( !pTab ){ + SessionTable **ppTab; + + pTab = sqlite3_malloc(sizeof(SessionTable) + nCol + nNew+1); + if( !pTab ){ + rc = SQLITE_NOMEM; + break; + } + memset(pTab, 0, sizeof(SessionTable)); + pTab->nCol = nCol; + pTab->abPK = (u8*)&pTab[1]; + memcpy(pTab->abPK, abPK, nCol); + pTab->zName = (char*)&pTab->abPK[nCol]; + memcpy(pTab->zName, zNew, nNew+1); + + /* The new object must be linked on to the end of the list, not + ** simply added to the start of it. This is to ensure that the + ** tables within the output of sqlite3changegroup_output() are in + ** the right order. */ + for(ppTab=&pGrp->pList; *ppTab; ppTab=&(*ppTab)->pNext); + *ppTab = pTab; + }else if( pTab->nCol!=nCol || memcmp(pTab->abPK, abPK, nCol) ){ + rc = SQLITE_SCHEMA; + break; + } + } + + if( sessionGrowHash(pIter->bPatchset, pTab) ){ + rc = SQLITE_NOMEM; + break; + } + iHash = sessionChangeHash( + pTab, (pIter->bPatchset && op==SQLITE_DELETE), aRec, pTab->nChange + ); + + /* Search for existing entry. If found, remove it from the hash table. + ** Code below may link it back in. + */ + for(pp=&pTab->apChange[iHash]; *pp; pp=&(*pp)->pNext){ + int bPkOnly1 = 0; + int bPkOnly2 = 0; + if( pIter->bPatchset ){ + bPkOnly1 = (*pp)->op==SQLITE_DELETE; + bPkOnly2 = op==SQLITE_DELETE; + } + if( sessionChangeEqual(pTab, bPkOnly1, (*pp)->aRecord, bPkOnly2, aRec) ){ + pExist = *pp; + *pp = (*pp)->pNext; + pTab->nEntry--; + break; + } + } + + rc = sessionChangeMerge(pTab, + pIter->bPatchset, pExist, op, bIndirect, aRec, nRec, &pChange + ); + if( rc ) break; + if( pChange ){ + pChange->pNext = pTab->apChange[iHash]; + pTab->apChange[iHash] = pChange; + pTab->nEntry++; + } + } + + if( rc==SQLITE_OK ) rc = pIter->rc; + return rc; +} + +/* +** Serialize a changeset (or patchset) based on all changesets (or patchsets) +** added to the changegroup object passed as the first argument. +** +** If xOutput is not NULL, then the changeset/patchset is returned to the +** user via one or more calls to xOutput, as with the other streaming +** interfaces. +** +** Or, if xOutput is NULL, then (*ppOut) is populated with a pointer to a +** buffer containing the output changeset before this function returns. In +** this case (*pnOut) is set to the size of the output buffer in bytes. It +** is the responsibility of the caller to free the output buffer using +** sqlite3_free() when it is no longer required. +** +** If successful, SQLITE_OK is returned. Or, if an error occurs, an SQLite +** error code. If an error occurs and xOutput is NULL, (*ppOut) and (*pnOut) +** are both set to 0 before returning. +*/ +static int sessionChangegroupOutput( + sqlite3_changegroup *pGrp, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut, + int *pnOut, + void **ppOut +){ + int rc = SQLITE_OK; + SessionBuffer buf = {0, 0, 0}; + SessionTable *pTab; + assert( xOutput==0 || (ppOut==0 && pnOut==0) ); + + /* Create the serialized output changeset based on the contents of the + ** hash tables attached to the SessionTable objects in list p->pList. + */ + for(pTab=pGrp->pList; rc==SQLITE_OK && pTab; pTab=pTab->pNext){ + int i; + if( pTab->nEntry==0 ) continue; + + sessionAppendTableHdr(&buf, pGrp->bPatch, pTab, &rc); + for(i=0; inChange; i++){ + SessionChange *p; + for(p=pTab->apChange[i]; p; p=p->pNext){ + sessionAppendByte(&buf, p->op, &rc); + sessionAppendByte(&buf, p->bIndirect, &rc); + sessionAppendBlob(&buf, p->aRecord, p->nRecord, &rc); + } + } + + if( rc==SQLITE_OK && xOutput && buf.nBuf>=SESSIONS_STRM_CHUNK_SIZE ){ + rc = xOutput(pOut, buf.aBuf, buf.nBuf); + buf.nBuf = 0; + } + } + + if( rc==SQLITE_OK ){ + if( xOutput ){ + if( buf.nBuf>0 ) rc = xOutput(pOut, buf.aBuf, buf.nBuf); + }else{ + *ppOut = buf.aBuf; + *pnOut = buf.nBuf; + buf.aBuf = 0; + } + } + sqlite3_free(buf.aBuf); + + return rc; +} + +/* +** Allocate a new, empty, sqlite3_changegroup. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changegroup_new(sqlite3_changegroup **pp){ + int rc = SQLITE_OK; /* Return code */ + sqlite3_changegroup *p; /* New object */ + p = (sqlite3_changegroup*)sqlite3_malloc(sizeof(sqlite3_changegroup)); + if( p==0 ){ + rc = SQLITE_NOMEM; + }else{ + memset(p, 0, sizeof(sqlite3_changegroup)); + } + *pp = p; + return rc; +} + +/* +** Add the changeset currently stored in buffer pData, size nData bytes, +** to changeset-group p. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changegroup_add(sqlite3_changegroup *pGrp, int nData, void *pData){ + sqlite3_changeset_iter *pIter; /* Iterator opened on pData/nData */ + int rc; /* Return code */ + + rc = sqlite3changeset_start(&pIter, nData, pData); + if( rc==SQLITE_OK ){ + rc = sessionChangesetToHash(pIter, pGrp); + } + sqlite3changeset_finalize(pIter); + return rc; +} + +/* +** Obtain a buffer containing a changeset representing the concatenation +** of all changesets added to the group so far. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changegroup_output( + sqlite3_changegroup *pGrp, + int *pnData, + void **ppData +){ + return sessionChangegroupOutput(pGrp, 0, 0, pnData, ppData); +} + +/* +** Streaming versions of changegroup_add(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changegroup_add_strm( + sqlite3_changegroup *pGrp, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +){ + sqlite3_changeset_iter *pIter; /* Iterator opened on pData/nData */ + int rc; /* Return code */ + + rc = sqlite3changeset_start_strm(&pIter, xInput, pIn); + if( rc==SQLITE_OK ){ + rc = sessionChangesetToHash(pIter, pGrp); + } + sqlite3changeset_finalize(pIter); + return rc; +} + +/* +** Streaming versions of changegroup_output(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changegroup_output_strm( + sqlite3_changegroup *pGrp, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +){ + return sessionChangegroupOutput(pGrp, xOutput, pOut, 0, 0); +} + +/* +** Delete a changegroup object. +*/ +SQLITE_API void SQLITE_STDCALL sqlite3changegroup_delete(sqlite3_changegroup *pGrp){ + if( pGrp ){ + sessionDeleteTable(pGrp->pList); + sqlite3_free(pGrp); + } +} + +/* +** Combine two changesets together. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_concat( + int nLeft, /* Number of bytes in lhs input */ + void *pLeft, /* Lhs input changeset */ + int nRight /* Number of bytes in rhs input */, + void *pRight, /* Rhs input changeset */ + int *pnOut, /* OUT: Number of bytes in output changeset */ + void **ppOut /* OUT: changeset (left right) */ +){ + sqlite3_changegroup *pGrp; + int rc; + + rc = sqlite3changegroup_new(&pGrp); + if( rc==SQLITE_OK ){ + rc = sqlite3changegroup_add(pGrp, nLeft, pLeft); + } + if( rc==SQLITE_OK ){ + rc = sqlite3changegroup_add(pGrp, nRight, pRight); + } + if( rc==SQLITE_OK ){ + rc = sqlite3changegroup_output(pGrp, pnOut, ppOut); + } + sqlite3changegroup_delete(pGrp); + + return rc; +} + +/* +** Streaming version of sqlite3changeset_concat(). +*/ +SQLITE_API int SQLITE_STDCALL sqlite3changeset_concat_strm( + int (*xInputA)(void *pIn, void *pData, int *pnData), + void *pInA, + int (*xInputB)(void *pIn, void *pData, int *pnData), + void *pInB, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +){ + sqlite3_changegroup *pGrp; + int rc; + + rc = sqlite3changegroup_new(&pGrp); + if( rc==SQLITE_OK ){ + rc = sqlite3changegroup_add_strm(pGrp, xInputA, pInA); + } + if( rc==SQLITE_OK ){ + rc = sqlite3changegroup_add_strm(pGrp, xInputB, pInB); + } + if( rc==SQLITE_OK ){ + rc = sqlite3changegroup_output_strm(pGrp, xOutput, pOut); + } + sqlite3changegroup_delete(pGrp); + + return rc; +} + +#endif /* SQLITE_ENABLE_SESSION && SQLITE_ENABLE_PREUPDATE_HOOK */ + +/************** End of sqlite3session.c **************************************/ /************** Begin file json1.c *******************************************/ /* ** 2015-08-12 @@ -167688,6 +176179,26 @@ static void jsonTest1Func( ** Scalar SQL function implementations ****************************************************************************/ +/* +** Implementation of the json_QUOTE(VALUE) function. Return a JSON value +** corresponding to the SQL value input. Mostly this means putting +** double-quotes around strings and returning the unquoted string "null" +** when given a NULL input. +*/ +static void jsonQuoteFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + JsonString jx; + UNUSED_PARAM(argc); + + jsonInit(&jx, ctx); + jsonAppendValue(&jx, argv[0]); + jsonResult(&jx); + sqlite3_result_subtype(ctx, JSON_SUBTYPE); +} + /* ** Implementation of the json_array(VALUE,...) function. Return a JSON ** array that contains all values given in arguments. Or if any argument @@ -168601,6 +177112,7 @@ SQLITE_PRIVATE int sqlite3Json1Init(sqlite3 *db){ { "json_extract", -1, 0, jsonExtractFunc }, { "json_insert", -1, 0, jsonSetFunc }, { "json_object", -1, 0, jsonObjectFunc }, + { "json_quote", 1, 0, jsonQuoteFunc }, { "json_remove", -1, 0, jsonRemoveFunc }, { "json_replace", -1, 0, jsonReplaceFunc }, { "json_set", -1, 1, jsonSetFunc }, @@ -168826,11 +177338,13 @@ struct Fts5PhraseIter { ** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid ** ** with $p set to a phrase equivalent to the phrase iPhrase of the -** current query is executed. For each row visited, the callback function -** passed as the fourth argument is invoked. The context and API objects -** passed to the callback function may be used to access the properties of -** each matched row. Invoking Api.xUserData() returns a copy of the pointer -** passed as the third argument to pUserData. +** current query is executed. Any column filter that applies to +** phrase iPhrase of the current query is included in $p. For each +** row visited, the callback function passed as the fourth argument +** is invoked. The context and API objects passed to the callback +** function may be used to access the properties of each matched row. +** Invoking Api.xUserData() returns a copy of the pointer passed as +** the third argument to pUserData. ** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. @@ -168999,7 +177513,7 @@ struct Fts5ExtensionApi { ** behaviour. The structure methods are expected to function as follows: ** ** xCreate: -** This function is used to allocate and inititalize a tokenizer instance. +** This function is used to allocate and initialize a tokenizer instance. ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) @@ -169259,7 +177773,6 @@ struct fts5_api { #endif /* _FTS5_H */ - /* ** 2014 May 31 ** @@ -169948,7 +178461,6 @@ static int sqlite3Fts5ExprPopulatePoslists( Fts5Config*, Fts5Expr*, Fts5PoslistPopulator*, int, const char*, int ); static void sqlite3Fts5ExprCheckPoslists(Fts5Expr*, i64); -static void sqlite3Fts5ExprClearEof(Fts5Expr*); static int sqlite3Fts5ExprClonePhrase(Fts5Expr*, int, Fts5Expr**); @@ -170364,9 +178876,9 @@ typedef struct fts5yyStackEntry fts5yyStackEntry; /* The state of the parser is completely contained in an instance of ** the following structure */ struct fts5yyParser { - int fts5yyidx; /* Index of top element in stack */ + fts5yyStackEntry *fts5yytos; /* Pointer to top element of the stack */ #ifdef fts5YYTRACKMAXSTACKDEPTH - int fts5yyidxMax; /* Maximum value of fts5yyidx */ + int fts5yyhwm; /* High-water mark of the stack */ #endif #ifndef fts5YYNOERRORRECOVERY int fts5yyerrcnt; /* Shifts left before out of the error */ @@ -170375,6 +178887,7 @@ struct fts5yyParser { #if fts5YYSTACKDEPTH<=0 int fts5yystksz; /* Current side of the stack */ fts5yyStackEntry *fts5yystack; /* The parser's stack */ + fts5yyStackEntry fts5yystk0; /* First stack entry */ #else fts5yyStackEntry fts5yystack[fts5YYSTACKDEPTH]; /* The parser's stack */ #endif @@ -170461,24 +178974,34 @@ static const char *const fts5yyRuleName[] = { #if fts5YYSTACKDEPTH<=0 /* -** Try to increase the size of the parser stack. +** Try to increase the size of the parser stack. Return the number +** of errors. Return 0 on success. */ -static void fts5yyGrowStack(fts5yyParser *p){ +static int fts5yyGrowStack(fts5yyParser *p){ int newSize; + int idx; fts5yyStackEntry *pNew; newSize = p->fts5yystksz*2 + 100; - pNew = realloc(p->fts5yystack, newSize*sizeof(pNew[0])); + idx = p->fts5yytos ? (int)(p->fts5yytos - p->fts5yystack) : 0; + if( p->fts5yystack==&p->fts5yystk0 ){ + pNew = malloc(newSize*sizeof(pNew[0])); + if( pNew ) pNew[0] = p->fts5yystk0; + }else{ + pNew = realloc(p->fts5yystack, newSize*sizeof(pNew[0])); + } if( pNew ){ p->fts5yystack = pNew; - p->fts5yystksz = newSize; + p->fts5yytos = &p->fts5yystack[idx]; #ifndef NDEBUG if( fts5yyTraceFILE ){ - fprintf(fts5yyTraceFILE,"%sStack grows to %d entries!\n", - fts5yyTracePrompt, p->fts5yystksz); + fprintf(fts5yyTraceFILE,"%sStack grows from %d to %d entries.\n", + fts5yyTracePrompt, p->fts5yystksz, newSize); } #endif + p->fts5yystksz = newSize; } + return pNew==0; } #endif @@ -170507,15 +179030,24 @@ static void *sqlite3Fts5ParserAlloc(void *(*mallocProc)(fts5YYMALLOCARGTYPE)){ fts5yyParser *pParser; pParser = (fts5yyParser*)(*mallocProc)( (fts5YYMALLOCARGTYPE)sizeof(fts5yyParser) ); if( pParser ){ - pParser->fts5yyidx = -1; #ifdef fts5YYTRACKMAXSTACKDEPTH - pParser->fts5yyidxMax = 0; + pParser->fts5yyhwm = 0; #endif #if fts5YYSTACKDEPTH<=0 + pParser->fts5yytos = NULL; pParser->fts5yystack = NULL; pParser->fts5yystksz = 0; - fts5yyGrowStack(pParser); + if( fts5yyGrowStack(pParser) ){ + pParser->fts5yystack = &pParser->fts5yystk0; + pParser->fts5yystksz = 1; + } +#endif +#ifndef fts5YYNOERRORRECOVERY + pParser->fts5yyerrcnt = -1; #endif + pParser->fts5yytos = pParser->fts5yystack; + pParser->fts5yystack[0].stateno = 0; + pParser->fts5yystack[0].major = 0; } return pParser; } @@ -170587,8 +179119,9 @@ static void fts5yy_destructor( */ static void fts5yy_pop_parser_stack(fts5yyParser *pParser){ fts5yyStackEntry *fts5yytos; - assert( pParser->fts5yyidx>=0 ); - fts5yytos = &pParser->fts5yystack[pParser->fts5yyidx--]; + assert( pParser->fts5yytos!=0 ); + assert( pParser->fts5yytos > pParser->fts5yystack ); + fts5yytos = pParser->fts5yytos--; #ifndef NDEBUG if( fts5yyTraceFILE ){ fprintf(fts5yyTraceFILE,"%sPopping %s\n", @@ -170615,9 +179148,9 @@ static void sqlite3Fts5ParserFree( #ifndef fts5YYPARSEFREENEVERNULL if( pParser==0 ) return; #endif - while( pParser->fts5yyidx>=0 ) fts5yy_pop_parser_stack(pParser); + while( pParser->fts5yytos>pParser->fts5yystack ) fts5yy_pop_parser_stack(pParser); #if fts5YYSTACKDEPTH<=0 - free(pParser->fts5yystack); + if( pParser->fts5yystack!=&pParser->fts5yystk0 ) free(pParser->fts5yystack); #endif (*freeProc)((void*)pParser); } @@ -170628,7 +179161,7 @@ static void sqlite3Fts5ParserFree( #ifdef fts5YYTRACKMAXSTACKDEPTH static int sqlite3Fts5ParserStackPeak(void *p){ fts5yyParser *pParser = (fts5yyParser*)p; - return pParser->fts5yyidxMax; + return pParser->fts5yyhwm; } #endif @@ -170641,7 +179174,7 @@ static unsigned int fts5yy_find_shift_action( fts5YYCODETYPE iLookAhead /* The look-ahead token */ ){ int i; - int stateno = pParser->fts5yystack[pParser->fts5yyidx].stateno; + int stateno = pParser->fts5yytos->stateno; if( stateno>=fts5YY_MIN_REDUCE ) return stateno; assert( stateno <= fts5YY_SHIFT_COUNT ); @@ -170734,13 +179267,13 @@ static int fts5yy_find_reduce_action( */ static void fts5yyStackOverflow(fts5yyParser *fts5yypParser){ sqlite3Fts5ParserARG_FETCH; - fts5yypParser->fts5yyidx--; + fts5yypParser->fts5yytos--; #ifndef NDEBUG if( fts5yyTraceFILE ){ fprintf(fts5yyTraceFILE,"%sStack Overflow!\n",fts5yyTracePrompt); } #endif - while( fts5yypParser->fts5yyidx>=0 ) fts5yy_pop_parser_stack(fts5yypParser); + while( fts5yypParser->fts5yytos>fts5yypParser->fts5yystack ) fts5yy_pop_parser_stack(fts5yypParser); /* Here code is inserted which will execute if the parser ** stack every overflows */ /******** Begin %stack_overflow code ******************************************/ @@ -170758,11 +179291,11 @@ static void fts5yyTraceShift(fts5yyParser *fts5yypParser, int fts5yyNewState){ if( fts5yyTraceFILE ){ if( fts5yyNewStatefts5yystack[fts5yypParser->fts5yyidx].major], + fts5yyTracePrompt,fts5yyTokenName[fts5yypParser->fts5yytos->major], fts5yyNewState); }else{ fprintf(fts5yyTraceFILE,"%sShift '%s'\n", - fts5yyTracePrompt,fts5yyTokenName[fts5yypParser->fts5yystack[fts5yypParser->fts5yyidx].major]); + fts5yyTracePrompt,fts5yyTokenName[fts5yypParser->fts5yytos->major]); } } } @@ -170780,27 +179313,30 @@ static void fts5yy_shift( sqlite3Fts5ParserFTS5TOKENTYPE fts5yyMinor /* The minor token to shift in */ ){ fts5yyStackEntry *fts5yytos; - fts5yypParser->fts5yyidx++; + fts5yypParser->fts5yytos++; #ifdef fts5YYTRACKMAXSTACKDEPTH - if( fts5yypParser->fts5yyidx>fts5yypParser->fts5yyidxMax ){ - fts5yypParser->fts5yyidxMax = fts5yypParser->fts5yyidx; + if( (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)>fts5yypParser->fts5yyhwm ){ + fts5yypParser->fts5yyhwm++; + assert( fts5yypParser->fts5yyhwm == (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack) ); } #endif #if fts5YYSTACKDEPTH>0 - if( fts5yypParser->fts5yyidx>=fts5YYSTACKDEPTH ){ + if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5YYSTACKDEPTH] ){ fts5yyStackOverflow(fts5yypParser); return; } #else - if( fts5yypParser->fts5yyidx>=fts5yypParser->fts5yystksz ){ - fts5yyGrowStack(fts5yypParser); - if( fts5yypParser->fts5yyidx>=fts5yypParser->fts5yystksz ){ + if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz] ){ + if( fts5yyGrowStack(fts5yypParser) ){ fts5yyStackOverflow(fts5yypParser); return; } } #endif - fts5yytos = &fts5yypParser->fts5yystack[fts5yypParser->fts5yyidx]; + if( fts5yyNewState > fts5YY_MAX_SHIFT ){ + fts5yyNewState += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE; + } + fts5yytos = fts5yypParser->fts5yytos; fts5yytos->stateno = (fts5YYACTIONTYPE)fts5yyNewState; fts5yytos->major = (fts5YYCODETYPE)fts5yyMajor; fts5yytos->minor.fts5yy0 = fts5yyMinor; @@ -170855,7 +179391,7 @@ static void fts5yy_reduce( fts5yyStackEntry *fts5yymsp; /* The top of the parser's stack */ int fts5yysize; /* Amount to pop the stack */ sqlite3Fts5ParserARG_FETCH; - fts5yymsp = &fts5yypParser->fts5yystack[fts5yypParser->fts5yyidx]; + fts5yymsp = fts5yypParser->fts5yytos; #ifndef NDEBUG if( fts5yyTraceFILE && fts5yyruleno<(int)(sizeof(fts5yyRuleName)/sizeof(fts5yyRuleName[0])) ){ fts5yysize = fts5yyRuleInfo[fts5yyruleno].nrhs; @@ -170869,22 +179405,23 @@ static void fts5yy_reduce( ** enough on the stack to push the LHS value */ if( fts5yyRuleInfo[fts5yyruleno].nrhs==0 ){ #ifdef fts5YYTRACKMAXSTACKDEPTH - if( fts5yypParser->fts5yyidx>fts5yypParser->fts5yyidxMax ){ - fts5yypParser->fts5yyidxMax = fts5yypParser->fts5yyidx; + if( (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)>fts5yypParser->fts5yyhwm ){ + fts5yypParser->fts5yyhwm++; + assert( fts5yypParser->fts5yyhwm == (int)(fts5yypParser->fts5yytos - fts5yypParser->fts5yystack)); } #endif #if fts5YYSTACKDEPTH>0 - if( fts5yypParser->fts5yyidx>=fts5YYSTACKDEPTH-1 ){ + if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5YYSTACKDEPTH-1] ){ fts5yyStackOverflow(fts5yypParser); return; } #else - if( fts5yypParser->fts5yyidx>=fts5yypParser->fts5yystksz-1 ){ - fts5yyGrowStack(fts5yypParser); - if( fts5yypParser->fts5yyidx>=fts5yypParser->fts5yystksz-1 ){ + if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz-1] ){ + if( fts5yyGrowStack(fts5yypParser) ){ fts5yyStackOverflow(fts5yypParser); return; } + fts5yymsp = fts5yypParser->fts5yytos; } #endif } @@ -171025,15 +179562,17 @@ static void fts5yy_reduce( fts5yysize = fts5yyRuleInfo[fts5yyruleno].nrhs; fts5yyact = fts5yy_find_reduce_action(fts5yymsp[-fts5yysize].stateno,(fts5YYCODETYPE)fts5yygoto); if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){ - if( fts5yyact>fts5YY_MAX_SHIFT ) fts5yyact += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE; - fts5yypParser->fts5yyidx -= fts5yysize - 1; + if( fts5yyact>fts5YY_MAX_SHIFT ){ + fts5yyact += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE; + } fts5yymsp -= fts5yysize-1; + fts5yypParser->fts5yytos = fts5yymsp; fts5yymsp->stateno = (fts5YYACTIONTYPE)fts5yyact; fts5yymsp->major = (fts5YYCODETYPE)fts5yygoto; fts5yyTraceShift(fts5yypParser, fts5yyact); }else{ assert( fts5yyact == fts5YY_ACCEPT_ACTION ); - fts5yypParser->fts5yyidx -= fts5yysize; + fts5yypParser->fts5yytos -= fts5yysize; fts5yy_accept(fts5yypParser); } } @@ -171051,7 +179590,7 @@ static void fts5yy_parse_failed( fprintf(fts5yyTraceFILE,"%sFail!\n",fts5yyTracePrompt); } #endif - while( fts5yypParser->fts5yyidx>=0 ) fts5yy_pop_parser_stack(fts5yypParser); + while( fts5yypParser->fts5yytos>fts5yypParser->fts5yystack ) fts5yy_pop_parser_stack(fts5yypParser); /* Here code is inserted which will be executed whenever the ** parser fails */ /************ Begin %parse_failure code ***************************************/ @@ -171092,7 +179631,10 @@ static void fts5yy_accept( fprintf(fts5yyTraceFILE,"%sAccept!\n",fts5yyTracePrompt); } #endif - while( fts5yypParser->fts5yyidx>=0 ) fts5yy_pop_parser_stack(fts5yypParser); +#ifndef fts5YYNOERRORRECOVERY + fts5yypParser->fts5yyerrcnt = -1; +#endif + assert( fts5yypParser->fts5yytos==fts5yypParser->fts5yystack ); /* Here code is inserted which will be executed whenever the ** parser accepts */ /*********** Begin %parse_accept code *****************************************/ @@ -171135,28 +179677,8 @@ static void sqlite3Fts5Parser( #endif fts5yyParser *fts5yypParser; /* The parser */ - /* (re)initialize the parser, if necessary */ fts5yypParser = (fts5yyParser*)fts5yyp; - if( fts5yypParser->fts5yyidx<0 ){ -#if fts5YYSTACKDEPTH<=0 - if( fts5yypParser->fts5yystksz <=0 ){ - fts5yyStackOverflow(fts5yypParser); - return; - } -#endif - fts5yypParser->fts5yyidx = 0; -#ifndef fts5YYNOERRORRECOVERY - fts5yypParser->fts5yyerrcnt = -1; -#endif - fts5yypParser->fts5yystack[0].stateno = 0; - fts5yypParser->fts5yystack[0].major = 0; -#ifndef NDEBUG - if( fts5yyTraceFILE ){ - fprintf(fts5yyTraceFILE,"%sInitialize. Empty stack. State 0\n", - fts5yyTracePrompt); - } -#endif - } + assert( fts5yypParser->fts5yytos!=0 ); #if !defined(fts5YYERRORSYMBOL) && !defined(fts5YYNOERRORRECOVERY) fts5yyendofinput = (fts5yymajor==0); #endif @@ -171171,7 +179693,6 @@ static void sqlite3Fts5Parser( do{ fts5yyact = fts5yy_find_shift_action(fts5yypParser,(fts5YYCODETYPE)fts5yymajor); if( fts5yyact <= fts5YY_MAX_SHIFTREDUCE ){ - if( fts5yyact > fts5YY_MAX_SHIFT ) fts5yyact += fts5YY_MIN_REDUCE - fts5YY_MIN_SHIFTREDUCE; fts5yy_shift(fts5yypParser,fts5yyact,fts5yymajor,fts5yyminor); #ifndef fts5YYNOERRORRECOVERY fts5yypParser->fts5yyerrcnt--; @@ -171213,7 +179734,7 @@ static void sqlite3Fts5Parser( if( fts5yypParser->fts5yyerrcnt<0 ){ fts5yy_syntax_error(fts5yypParser,fts5yymajor,fts5yyminor); } - fts5yymx = fts5yypParser->fts5yystack[fts5yypParser->fts5yyidx].major; + fts5yymx = fts5yypParser->fts5yytos->major; if( fts5yymx==fts5YYERRORSYMBOL || fts5yyerrorhit ){ #ifndef NDEBUG if( fts5yyTraceFILE ){ @@ -171224,18 +179745,20 @@ static void sqlite3Fts5Parser( fts5yy_destructor(fts5yypParser, (fts5YYCODETYPE)fts5yymajor, &fts5yyminorunion); fts5yymajor = fts5YYNOCODE; }else{ - while( - fts5yypParser->fts5yyidx >= 0 && - fts5yymx != fts5YYERRORSYMBOL && - (fts5yyact = fts5yy_find_reduce_action( - fts5yypParser->fts5yystack[fts5yypParser->fts5yyidx].stateno, + while( fts5yypParser->fts5yytos >= &fts5yypParser->fts5yystack + && fts5yymx != fts5YYERRORSYMBOL + && (fts5yyact = fts5yy_find_reduce_action( + fts5yypParser->fts5yytos->stateno, fts5YYERRORSYMBOL)) >= fts5YY_MIN_REDUCE ){ fts5yy_pop_parser_stack(fts5yypParser); } - if( fts5yypParser->fts5yyidx < 0 || fts5yymajor==0 ){ + if( fts5yypParser->fts5yytos < fts5yypParser->fts5yystack || fts5yymajor==0 ){ fts5yy_destructor(fts5yypParser,(fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion); fts5yy_parse_failed(fts5yypParser); +#ifndef fts5YYNOERRORRECOVERY + fts5yypParser->fts5yyerrcnt = -1; +#endif fts5yymajor = fts5YYNOCODE; }else if( fts5yymx!=fts5YYERRORSYMBOL ){ fts5yy_shift(fts5yypParser,fts5yyact,fts5YYERRORSYMBOL,fts5yyminor); @@ -171272,18 +179795,23 @@ static void sqlite3Fts5Parser( fts5yy_destructor(fts5yypParser,(fts5YYCODETYPE)fts5yymajor,&fts5yyminorunion); if( fts5yyendofinput ){ fts5yy_parse_failed(fts5yypParser); +#ifndef fts5YYNOERRORRECOVERY + fts5yypParser->fts5yyerrcnt = -1; +#endif } fts5yymajor = fts5YYNOCODE; #endif } - }while( fts5yymajor!=fts5YYNOCODE && fts5yypParser->fts5yyidx>=0 ); + }while( fts5yymajor!=fts5YYNOCODE && fts5yypParser->fts5yytos>fts5yypParser->fts5yystack ); #ifndef NDEBUG if( fts5yyTraceFILE ){ - int i; + fts5yyStackEntry *i; + char cDiv = '['; fprintf(fts5yyTraceFILE,"%sReturn. Stack=",fts5yyTracePrompt); - for(i=1; i<=fts5yypParser->fts5yyidx; i++) - fprintf(fts5yyTraceFILE,"%c%s", i==1 ? '[' : ' ', - fts5yyTokenName[fts5yypParser->fts5yystack[i].major]); + for(i=&fts5yypParser->fts5yystack[1]; i<=fts5yypParser->fts5yytos; i++){ + fprintf(fts5yyTraceFILE,"%c%s", cDiv, fts5yyTokenName[i->major]); + cDiv = ' '; + } fprintf(fts5yyTraceFILE,"]\n"); } #endif @@ -174865,6 +183393,17 @@ static int sqlite3Fts5ExprClonePhrase( pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*)); } + if( rc==SQLITE_OK ){ + Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset; + if( pColsetOrig ){ + int nByte = sizeof(Fts5Colset) + pColsetOrig->nCol * sizeof(int); + Fts5Colset *pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&rc, nByte); + if( pColset ){ + memcpy(pColset, pColsetOrig, nByte); + } + pNew->pRoot->pNear->pColset = pColset; + } + } for(i=0; rc==SQLITE_OK && inTerm; i++){ int tflags = 0; @@ -175816,17 +184355,6 @@ static void sqlite3Fts5ExprCheckPoslists(Fts5Expr *pExpr, i64 iRowid){ fts5ExprCheckPoslists(pExpr->pRoot, iRowid); } -static void fts5ExprClearEof(Fts5ExprNode *pNode){ - int i; - for(i=0; inChild; i++){ - fts5ExprClearEof(pNode->apChild[i]); - } - pNode->bEof = 0; -} -static void sqlite3Fts5ExprClearEof(Fts5Expr *pExpr){ - fts5ExprClearEof(pExpr->pRoot); -} - /* ** This function is only called for detail=columns tables. */ @@ -176213,11 +184741,11 @@ static int sqlite3Fts5HashWrite( if( pHash->eDetail==FTS5_DETAIL_FULL ){ pPtr[p->nData++] = 0x01; p->nData += sqlite3Fts5PutVarint(&pPtr[p->nData], iCol); - p->iCol = iCol; + p->iCol = (i16)iCol; p->iPos = 0; }else{ bNew = 1; - p->iCol = iPos = iCol; + p->iCol = (i16)(iPos = iCol); } } @@ -179640,7 +188168,7 @@ static void fts5IterSetOutputs_Col100(Fts5Iter *pIter, Fts5SegIter *pSeg){ if( aiCol==aiColEnd ) goto setoutputs_col_out; } if( *aiCol==iPrev ){ - *aOut++ = (iPrev - iPrevOut) + 2; + *aOut++ = (u8)((iPrev - iPrevOut) + 2); iPrevOut = iPrev; } } @@ -184044,7 +192572,6 @@ static int fts5FilterMethod( pCsr->ePlan = FTS5_PLAN_SOURCE; pCsr->pExpr = pTab->pSortCsr->pExpr; rc = fts5CursorFirst(pTab, pCsr, bDesc); - sqlite3Fts5ExprClearEof(pCsr->pExpr); }else if( pMatch ){ const char *zExpr = (const char*)sqlite3_value_text(apVal[0]); if( zExpr==0 ) zExpr = ""; @@ -185473,7 +194000,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2016-04-18 17:30:31 92dc59fd5ad66f646666042eb04195e3a61a9e8e", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2016-08-08 13:40:27 d5e98057028abcf7217d0d2b2e29bbbcdf09d6de", -1, SQLITE_TRANSIENT); } static int fts5Init(sqlite3 *db){ @@ -185838,7 +194365,11 @@ static int sqlite3Fts5CreateTable( char *zErr = 0; rc = fts5ExecPrintf(pConfig->db, &zErr, "CREATE TABLE %Q.'%q_%q'(%s)%s", - pConfig->zDb, pConfig->zName, zPost, zDefn, bWithout?" WITHOUT ROWID":"" + pConfig->zDb, pConfig->zName, zPost, zDefn, +#ifndef SQLITE_FTS5_NO_WITHOUT_ROWID + bWithout?" WITHOUT ROWID": +#endif + "" ); if( zErr ){ *pzErr = sqlite3_mprintf( diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h index cd0217e..4e2df5e 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h @@ -30,8 +30,8 @@ ** the version number) and changes its name to "sqlite3.h" as ** part of the build process. */ -#ifndef _SQLITE3_H_ -#define _SQLITE3_H_ +#ifndef SQLITE3_H +#define SQLITE3_H #include /* Needed for the definition of va_list */ /* @@ -54,8 +54,17 @@ extern "C" { #ifndef SQLITE_CDECL # define SQLITE_CDECL #endif +#ifndef SQLITE_APICALL +# define SQLITE_APICALL +#endif #ifndef SQLITE_STDCALL -# define SQLITE_STDCALL +# define SQLITE_STDCALL SQLITE_APICALL +#endif +#ifndef SQLITE_CALLBACK +# define SQLITE_CALLBACK +#endif +#ifndef SQLITE_SYSAPI +# define SQLITE_SYSAPI #endif /* @@ -111,9 +120,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.12.2" -#define SQLITE_VERSION_NUMBER 3012002 -#define SQLITE_SOURCE_ID "2016-04-18 17:30:31 92dc59fd5ad66f646666042eb04195e3a61a9e8e" +#define SQLITE_VERSION "3.14.0" +#define SQLITE_VERSION_NUMBER 3014000 +#define SQLITE_SOURCE_ID "2016-08-08 13:40:27 d5e98057028abcf7217d0d2b2e29bbbcdf09d6de" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -506,6 +515,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_exec( #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) +#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) /* ** CAPI3REF: Flags For File Open Operations @@ -1035,6 +1045,16 @@ struct sqlite3_io_methods { */ typedef struct sqlite3_mutex sqlite3_mutex; +/* +** CAPI3REF: Loadable Extension Thunk +** +** A pointer to the opaque sqlite3_api_routines structure is passed as +** the third parameter to entry points of [loadable extensions]. This +** structure must be typedefed in order to work around compiler warnings +** on some platforms. +*/ +typedef struct sqlite3_api_routines sqlite3_api_routines; + /* ** CAPI3REF: OS Interface Object ** @@ -1932,12 +1952,30 @@ struct sqlite3_mem_methods { ** following this call. The second parameter may be a NULL pointer, in ** which case the new setting is not reported back.
    ** +**
    SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION
    +**
    ^This option is used to enable or disable the [sqlite3_load_extension()] +** interface independently of the [load_extension()] SQL function. +** The [sqlite3_enable_load_extension()] API enables or disables both the +** C-API [sqlite3_load_extension()] and the SQL function [load_extension()]. +** There should be two additional arguments. +** When the first argument to this interface is 1, then only the C-API is +** enabled and the SQL function remains disabled. If the first argument to +** this interface is 0, then both the C-API and the SQL function are disabled. +** If the first argument is -1, then no changes are made to state of either the +** C-API or the SQL function. +** The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface +** is disabled or enabled following this call. The second parameter may +** be a NULL pointer, in which case the new setting is not reported back. +**
    +** ** */ #define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ #define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */ #define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ #define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */ /* @@ -2214,7 +2252,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql); ** A busy handler must not close the database connection ** or [prepared statement] that invoked the busy handler. */ -SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*, int(*)(void*,int), void*); +SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*); /* ** CAPI3REF: Set A Busy Timeout @@ -2736,6 +2774,9 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer( ** CAPI3REF: Tracing And Profiling Functions ** METHOD: sqlite3 ** +** These routines are deprecated. Use the [sqlite3_trace_v2()] interface +** instead of the routines described here. +** ** These routines register callback functions that can be used for ** tracing and profiling the execution of SQL statements. ** @@ -2761,10 +2802,104 @@ SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer( ** sqlite3_profile() function is considered experimental and is ** subject to change in future versions of SQLite. */ -SQLITE_API void *SQLITE_STDCALL sqlite3_trace(sqlite3*, void(*xTrace)(void*,const char*), void*); -SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_profile(sqlite3*, +SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_trace(sqlite3*, + void(*xTrace)(void*,const char*), void*); +SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_profile(sqlite3*, void(*xProfile)(void*,const char*,sqlite3_uint64), void*); +/* +** CAPI3REF: SQL Trace Event Codes +** KEYWORDS: SQLITE_TRACE +** +** These constants identify classes of events that can be monitored +** using the [sqlite3_trace_v2()] tracing logic. The third argument +** to [sqlite3_trace_v2()] is an OR-ed combination of one or more of +** the following constants. ^The first argument to the trace callback +** is one of the following constants. +** +** New tracing constants may be added in future releases. +** +** ^A trace callback has four arguments: xCallback(T,C,P,X). +** ^The T argument is one of the integer type codes above. +** ^The C argument is a copy of the context pointer passed in as the +** fourth argument to [sqlite3_trace_v2()]. +** The P and X arguments are pointers whose meanings depend on T. +** +**
    +** [[SQLITE_TRACE_STMT]]
    SQLITE_TRACE_STMT
    +**
    ^An SQLITE_TRACE_STMT callback is invoked when a prepared statement +** first begins running and possibly at other times during the +** execution of the prepared statement, such as at the start of each +** trigger subprogram. ^The P argument is a pointer to the +** [prepared statement]. ^The X argument is a pointer to a string which +** is the unexpanded SQL text of the prepared statement or an SQL comment +** that indicates the invocation of a trigger. ^The callback can compute +** the same text that would have been returned by the legacy [sqlite3_trace()] +** interface by using the X argument when X begins with "--" and invoking +** [sqlite3_expanded_sql(P)] otherwise. +** +** [[SQLITE_TRACE_PROFILE]]
    SQLITE_TRACE_PROFILE
    +**
    ^An SQLITE_TRACE_PROFILE callback provides approximately the same +** information as is provided by the [sqlite3_profile()] callback. +** ^The P argument is a pointer to the [prepared statement] and the +** X argument points to a 64-bit integer which is the estimated of +** the number of nanosecond that the prepared statement took to run. +** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. +** +** [[SQLITE_TRACE_ROW]]
    SQLITE_TRACE_ROW
    +**
    ^An SQLITE_TRACE_ROW callback is invoked whenever a prepared +** statement generates a single row of result. +** ^The P argument is a pointer to the [prepared statement] and the +** X argument is unused. +** +** [[SQLITE_TRACE_CLOSE]]
    SQLITE_TRACE_CLOSE
    +**
    ^An SQLITE_TRACE_CLOSE callback is invoked when a database +** connection closes. +** ^The P argument is a pointer to the [database connection] object +** and the X argument is unused. +**
    +*/ +#define SQLITE_TRACE_STMT 0x01 +#define SQLITE_TRACE_PROFILE 0x02 +#define SQLITE_TRACE_ROW 0x04 +#define SQLITE_TRACE_CLOSE 0x08 + +/* +** CAPI3REF: SQL Trace Hook +** METHOD: sqlite3 +** +** ^The sqlite3_trace_v2(D,M,X,P) interface registers a trace callback +** function X against [database connection] D, using property mask M +** and context pointer P. ^If the X callback is +** NULL or if the M mask is zero, then tracing is disabled. The +** M argument should be the bitwise OR-ed combination of +** zero or more [SQLITE_TRACE] constants. +** +** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides +** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2(). +** +** ^The X callback is invoked whenever any of the events identified by +** mask M occur. ^The integer return value from the callback is currently +** ignored, though this may change in future releases. Callback +** implementations should return zero to ensure future compatibility. +** +** ^A trace callback is invoked with four arguments: callback(T,C,P,X). +** ^The T argument is one of the [SQLITE_TRACE] +** constants to indicate why the callback was invoked. +** ^The C argument is a copy of the context pointer. +** The P and X arguments are pointers whose meanings depend on T. +** +** The sqlite3_trace_v2() interface is intended to replace the legacy +** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which +** are deprecated. +*/ +SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2( + sqlite3*, + unsigned uMask, + int(*xCallback)(unsigned,void*,void*,void*), + void *pCtx +); + /* ** CAPI3REF: Query Progress Callbacks ** METHOD: sqlite3 @@ -3383,11 +3518,35 @@ SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2( ** CAPI3REF: Retrieving Statement SQL ** METHOD: sqlite3_stmt ** -** ^This interface can be used to retrieve a saved copy of the original -** SQL text used to create a [prepared statement] if that statement was -** compiled using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. +** ^The sqlite3_sql(P) interface returns a pointer to a copy of the UTF-8 +** SQL text used to create [prepared statement] P if P was +** created by either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()]. +** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8 +** string containing the SQL text of prepared statement P with +** [bound parameters] expanded. +** +** ^(For example, if a prepared statement is created using the SQL +** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345 +** and parameter :xyz is unbound, then sqlite3_sql() will return +** the original string, "SELECT $abc,:xyz" but sqlite3_expanded_sql() +** will return "SELECT 2345,NULL".)^ +** +** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory +** is available to hold the result, or if the result would exceed the +** the maximum string length determined by the [SQLITE_LIMIT_LENGTH]. +** +** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of +** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time +** option causes sqlite3_expanded_sql() to always return NULL. +** +** ^The string returned by sqlite3_sql(P) is managed by SQLite and is +** automatically freed when the prepared statement is finalized. +** ^The string returned by sqlite3_expanded_sql(P), on the other hand, +** is obtained from [sqlite3_malloc()] and must be free by the application +** by passing it to [sqlite3_free()]. */ SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt); +SQLITE_API char *SQLITE_STDCALL sqlite3_expanded_sql(sqlite3_stmt *pStmt); /* ** CAPI3REF: Determine If An SQL Statement Writes The Database @@ -4545,12 +4704,13 @@ SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*); ** SQLite will invoke the destructor function X with parameter P exactly ** once, when the metadata is discarded. ** SQLite is free to discard the metadata at any time, including:
      -**
    • when the corresponding function parameter changes, or -**
    • when [sqlite3_reset()] or [sqlite3_finalize()] is called for the -** SQL statement, or -**
    • when sqlite3_set_auxdata() is invoked again on the same parameter, or -**
    • during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.
    )^ +**
  • ^(when the corresponding function parameter changes)^, or +**
  • ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the +** SQL statement)^, or +**
  • ^(when sqlite3_set_auxdata() is invoked again on the same +** parameter)^, or +**
  • ^(during the original sqlite3_set_auxdata() call when a memory +** allocation error occurs.)^ ** ** Note the last bullet in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the @@ -5187,7 +5347,7 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), ** ^The sqlite3_update_hook() interface registers a callback function ** with the [database connection] identified by the first argument ** to be invoked whenever a row is updated, inserted or deleted in -** a rowid table. +** a [rowid table]. ** ^Any callback set by a previous call to this function ** for the same database connection is overridden. ** @@ -5226,8 +5386,8 @@ SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), ** on the same [database connection] D, or NULL for ** the first call on D. ** -** See also the [sqlite3_commit_hook()] and [sqlite3_rollback_hook()] -** interfaces. +** See also the [sqlite3_commit_hook()], [sqlite3_rollback_hook()], +** and [sqlite3_preupdate_hook()] interfaces. */ SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook( sqlite3*, @@ -5377,7 +5537,7 @@ SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N); ** column exists. ^The sqlite3_table_column_metadata() interface returns ** SQLITE_ERROR and if the specified column does not exist. ** ^If the column-name parameter to sqlite3_table_column_metadata() is a -** NULL pointer, then this routine simply checks for the existance of the +** NULL pointer, then this routine simply checks for the existence of the ** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it ** does not. ** @@ -5474,9 +5634,18 @@ SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata( ** should free this memory by calling [sqlite3_free()]. ** ** ^Extension loading must be enabled using -** [sqlite3_enable_load_extension()] prior to calling this API, +** [sqlite3_enable_load_extension()] or +** [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],1,NULL) +** prior to calling this API, ** otherwise an error will be returned. ** +** Security warning: It is recommended that the +** [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method be used to enable only this +** interface. The use of the [sqlite3_enable_load_extension()] interface +** should be avoided. This will keep the SQL function [load_extension()] +** disabled and prevent SQL injections from giving attackers +** access to extension loading capabilities. +** ** See also the [load_extension() SQL function]. */ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension( @@ -5499,6 +5668,17 @@ SQLITE_API int SQLITE_STDCALL sqlite3_load_extension( ** ^Call the sqlite3_enable_load_extension() routine with onoff==1 ** to turn extension loading on and call it with onoff==0 to turn ** it back off again. +** +** ^This interface enables or disables both the C-API +** [sqlite3_load_extension()] and the SQL function [load_extension()]. +** ^(Use [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],..) +** to enable or disable only the C-API.)^ +** +** Security warning: It is recommended that extension loading +** be disabled using the [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method +** rather than this interface, so the [load_extension()] SQL function +** remains disabled. This will prevent SQL injections from giving attackers +** access to extension loading capabilities. */ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff); @@ -5512,7 +5692,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono ** ** ^(Even though the function prototype shows that xEntryPoint() takes ** no arguments and returns void, SQLite invokes xEntryPoint() with three -** arguments and expects and integer result as if the signature of the +** arguments and expects an integer result as if the signature of the ** entry point where as follows: ** **
    @@ -5538,7 +5718,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int ono
     ** See also: [sqlite3_reset_auto_extension()]
     ** and [sqlite3_cancel_auto_extension()]
     */
    -SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xEntryPoint)(void));
    +SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void(*xEntryPoint)(void));
     
     /*
     ** CAPI3REF: Cancel Automatic Extension Loading
    @@ -5550,7 +5730,7 @@ SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void (*xEntryPoint)(void));
     ** unregistered and it returns 0 if X was not on the list of initialization
     ** routines.
     */
    -SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void (*xEntryPoint)(void));
    +SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
     
     /*
     ** CAPI3REF: Reset Automatic Extension Loading
    @@ -6726,6 +6906,18 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int
     ** memory used by all pager caches associated with the database connection.)^
     ** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0.
     **
    +** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]] 
    +** ^(
    SQLITE_DBSTATUS_CACHE_USED_SHARED
    +**
    This parameter is similar to DBSTATUS_CACHE_USED, except that if a +** pager cache is shared between two or more connections the bytes of heap +** memory used by that pager cache is divided evenly between the attached +** connections.)^ In other words, if none of the pager caches associated +** with the database connection are shared, this request returns the same +** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are +** shared, the value returned by this call will be smaller than that returned +** by DBSTATUS_CACHE_USED. ^The highwater mark associated with +** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. +** ** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
    SQLITE_DBSTATUS_SCHEMA_USED
    **
    This parameter returns the approximate number of bytes of heap ** memory used to store the schema for all databases associated @@ -6783,7 +6975,8 @@ SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int #define SQLITE_DBSTATUS_CACHE_MISS 8 #define SQLITE_DBSTATUS_CACHE_WRITE 9 #define SQLITE_DBSTATUS_DEFERRED_FKS 10 -#define SQLITE_DBSTATUS_MAX 10 /* Largest defined DBSTATUS */ +#define SQLITE_DBSTATUS_CACHE_USED_SHARED 11 +#define SQLITE_DBSTATUS_MAX 11 /* Largest defined DBSTATUS */ /* @@ -7137,7 +7330,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** must be different or else sqlite3_backup_init(D,N,S,M) will fail with ** an error. ** -** ^A call to sqlite3_backup_init() will fail, returning SQLITE_ERROR, if +** ^A call to sqlite3_backup_init() will fail, returning NULL, if ** there is already a read or read-write transaction open on the ** destination database. ** @@ -7915,11 +8108,107 @@ SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); */ SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3*); +/* +** CAPI3REF: The pre-update hook. +** +** ^These interfaces are only available if SQLite is compiled using the +** [SQLITE_ENABLE_PREUPDATE_HOOK] compile-time option. +** +** ^The [sqlite3_preupdate_hook()] interface registers a callback function +** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation +** on a [rowid table]. +** ^At most one preupdate hook may be registered at a time on a single +** [database connection]; each call to [sqlite3_preupdate_hook()] overrides +** the previous setting. +** ^The preupdate hook is disabled by invoking [sqlite3_preupdate_hook()] +** with a NULL pointer as the second parameter. +** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as +** the first parameter to callbacks. +** +** ^The preupdate hook only fires for changes to [rowid tables]; the preupdate +** hook is not invoked for changes to [virtual tables] or [WITHOUT ROWID] +** tables. +** +** ^The second parameter to the preupdate callback is a pointer to +** the [database connection] that registered the preupdate hook. +** ^The third parameter to the preupdate callback is one of the constants +** [SQLITE_INSERT], [SQLITE_DELETE], or [SQLITE_UPDATE] to identify the +** kind of update operation that is about to occur. +** ^(The fourth parameter to the preupdate callback is the name of the +** database within the database connection that is being modified. This +** will be "main" for the main database or "temp" for TEMP tables or +** the name given after the AS keyword in the [ATTACH] statement for attached +** databases.)^ +** ^The fifth parameter to the preupdate callback is the name of the +** table that is being modified. +** ^The sixth parameter to the preupdate callback is the initial [rowid] of the +** row being changes for SQLITE_UPDATE and SQLITE_DELETE changes and is +** undefined for SQLITE_INSERT changes. +** ^The seventh parameter to the preupdate callback is the final [rowid] of +** the row being changed for SQLITE_UPDATE and SQLITE_INSERT changes and is +** undefined for SQLITE_DELETE changes. +** +** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], +** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces +** provide additional information about a preupdate event. These routines +** may only be called from within a preupdate callback. Invoking any of +** these routines from outside of a preupdate callback or with a +** [database connection] pointer that is different from the one supplied +** to the preupdate callback results in undefined and probably undesirable +** behavior. +** +** ^The [sqlite3_preupdate_count(D)] interface returns the number of columns +** in the row that is being inserted, updated, or deleted. +** +** ^The [sqlite3_preupdate_old(D,N,P)] interface writes into P a pointer to +** a [protected sqlite3_value] that contains the value of the Nth column of +** the table row before it is updated. The N parameter must be between 0 +** and one less than the number of columns or the behavior will be +** undefined. This must only be used within SQLITE_UPDATE and SQLITE_DELETE +** preupdate callbacks; if it is used by an SQLITE_INSERT callback then the +** behavior is undefined. The [sqlite3_value] that P points to +** will be destroyed when the preupdate callback returns. +** +** ^The [sqlite3_preupdate_new(D,N,P)] interface writes into P a pointer to +** a [protected sqlite3_value] that contains the value of the Nth column of +** the table row after it is updated. The N parameter must be between 0 +** and one less than the number of columns or the behavior will be +** undefined. This must only be used within SQLITE_INSERT and SQLITE_UPDATE +** preupdate callbacks; if it is used by an SQLITE_DELETE callback then the +** behavior is undefined. The [sqlite3_value] that P points to +** will be destroyed when the preupdate callback returns. +** +** ^The [sqlite3_preupdate_depth(D)] interface returns 0 if the preupdate +** callback was invoked as a result of a direct insert, update, or delete +** operation; or 1 for inserts, updates, or deletes invoked by top-level +** triggers; or 2 for changes resulting from triggers called by top-level +** triggers; and so forth. +** +** See also: [sqlite3_update_hook()] +*/ +SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_preupdate_hook( + sqlite3 *db, + void(*xPreUpdate)( + void *pCtx, /* Copy of third arg to preupdate_hook() */ + sqlite3 *db, /* Database handle */ + int op, /* SQLITE_UPDATE, DELETE or INSERT */ + char const *zDb, /* Database name */ + char const *zName, /* Table name */ + sqlite3_int64 iKey1, /* Rowid of row about to be deleted/updated */ + sqlite3_int64 iKey2 /* New rowid value (for a rowid UPDATE) */ + ), + void* +); +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_count(sqlite3 *); +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_depth(sqlite3 *); +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); + /* ** CAPI3REF: Low-level system error code ** ** ^Attempt to return the underlying operating system error code or error -** number that caused the most reason I/O error or failure to open a file. +** number that caused the most recent I/O error or failure to open a file. ** The return value is OS-dependent. For example, on unix systems, after ** [sqlite3_open_v2()] returns [SQLITE_CANTOPEN], this interface could be ** called to get back the underlying "errno" that caused the problem, such @@ -7985,20 +8274,29 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_get( ** CAPI3REF: Start a read transaction on an historical snapshot ** EXPERIMENTAL ** -** ^The [sqlite3_snapshot_open(D,S,P)] interface attempts to move the -** read transaction that is currently open on schema S of -** [database connection] D so that it refers to historical [snapshot] P. +** ^The [sqlite3_snapshot_open(D,S,P)] interface starts a +** read transaction for schema S of +** [database connection] D such that the read transaction +** refers to historical [snapshot] P, rather than the most +** recent change to the database. ** ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK on success ** or an appropriate [error code] if it fails. ** ** ^In order to succeed, a call to [sqlite3_snapshot_open(D,S,P)] must be -** the first operation, apart from other sqlite3_snapshot_open() calls, -** following the [BEGIN] that starts a new read transaction. -** ^A [snapshot] will fail to open if it has been overwritten by a +** the first operation following the [BEGIN] that takes the schema S +** out of [autocommit mode]. +** ^In other words, schema S must not currently be in +** a transaction for [sqlite3_snapshot_open(D,S,P)] to work, but the +** database connection D must be out of [autocommit mode]. +** ^A [snapshot] will fail to open if it has been overwritten by a ** [checkpoint]. -** ^A [snapshot] will fail to open if the database connection D has not -** previously completed at least one read operation against the database -** file. (Hint: Run "[PRAGMA application_id]" against a newly opened +** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the +** database connection D does not know that the database file for +** schema S is in [WAL mode]. A database connection might not know +** that the database file is in [WAL mode] if there has been no prior +** I/O on that database connection, or if the database entered [WAL mode] +** after the most recent I/O on the database connection.)^ +** (Hint: Run "[PRAGMA application_id]" against a newly opened ** database connection in order to make it ready to use snapshots.) ** ** The [sqlite3_snapshot_open()] interface is only available when the @@ -8023,6 +8321,33 @@ SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_open( */ SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3_snapshot*); +/* +** CAPI3REF: Compare the ages of two snapshot handles. +** EXPERIMENTAL +** +** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages +** of two valid snapshot handles. +** +** If the two snapshot handles are not associated with the same database +** file, the result of the comparison is undefined. +** +** Additionally, the result of the comparison is only valid if both of the +** snapshot handles were obtained by calling sqlite3_snapshot_get() since the +** last time the wal file was deleted. The wal file is deleted when the +** database is changed back to rollback mode or when the number of database +** clients drops to zero. If either snapshot handle was obtained before the +** wal file was last deleted, the value returned by this function +** is undefined. +** +** Otherwise, this API returns a negative value if P1 refers to an older +** snapshot than P2, zero if the two handles refer to the same database +** snapshot, and a positive value if P1 is a newer snapshot than P2. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_cmp( + sqlite3_snapshot *p1, + sqlite3_snapshot *p2 +); + /* ** Undo the hack that converts floating point types to integer for ** builds on processors without floating point support. @@ -8034,8 +8359,9 @@ SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3 #ifdef __cplusplus } /* End of the 'extern "C"' block */ #endif -#endif /* _SQLITE3_H_ */ +#endif /* SQLITE3_H */ +/******** Begin file sqlite3rtree.h *********/ /* ** 2010 August 30 ** @@ -8153,6 +8479,1287 @@ struct sqlite3_rtree_query_info { #endif /* ifndef _SQLITE3RTREE_H_ */ +/******** End of sqlite3rtree.h *********/ +/******** Begin file sqlite3session.h *********/ + +#if !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) +#define __SQLITESESSION_H_ 1 + +/* +** Make sure we can call this stuff from C++. +*/ +#ifdef __cplusplus +extern "C" { +#endif + + +/* +** CAPI3REF: Session Object Handle +*/ +typedef struct sqlite3_session sqlite3_session; + +/* +** CAPI3REF: Changeset Iterator Handle +*/ +typedef struct sqlite3_changeset_iter sqlite3_changeset_iter; + +/* +** CAPI3REF: Create A New Session Object +** +** Create a new session object attached to database handle db. If successful, +** a pointer to the new object is written to *ppSession and SQLITE_OK is +** returned. If an error occurs, *ppSession is set to NULL and an SQLite +** error code (e.g. SQLITE_NOMEM) is returned. +** +** It is possible to create multiple session objects attached to a single +** database handle. +** +** Session objects created using this function should be deleted using the +** [sqlite3session_delete()] function before the database handle that they +** are attached to is itself closed. If the database handle is closed before +** the session object is deleted, then the results of calling any session +** module function, including [sqlite3session_delete()] on the session object +** are undefined. +** +** Because the session module uses the [sqlite3_preupdate_hook()] API, it +** is not possible for an application to register a pre-update hook on a +** database handle that has one or more session objects attached. Nor is +** it possible to create a session object attached to a database handle for +** which a pre-update hook is already defined. The results of attempting +** either of these things are undefined. +** +** The session object will be used to create changesets for tables in +** database zDb, where zDb is either "main", or "temp", or the name of an +** attached database. It is not an error if database zDb is not attached +** to the database when the session object is created. +*/ +int sqlite3session_create( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Name of db (e.g. "main") */ + sqlite3_session **ppSession /* OUT: New session object */ +); + +/* +** CAPI3REF: Delete A Session Object +** +** Delete a session object previously allocated using +** [sqlite3session_create()]. Once a session object has been deleted, the +** results of attempting to use pSession with any other session module +** function are undefined. +** +** Session objects must be deleted before the database handle to which they +** are attached is closed. Refer to the documentation for +** [sqlite3session_create()] for details. +*/ +void sqlite3session_delete(sqlite3_session *pSession); + + +/* +** CAPI3REF: Enable Or Disable A Session Object +** +** Enable or disable the recording of changes by a session object. When +** enabled, a session object records changes made to the database. When +** disabled - it does not. A newly created session object is enabled. +** Refer to the documentation for [sqlite3session_changeset()] for further +** details regarding how enabling and disabling a session object affects +** the eventual changesets. +** +** Passing zero to this function disables the session. Passing a value +** greater than zero enables it. Passing a value less than zero is a +** no-op, and may be used to query the current state of the session. +** +** The return value indicates the final state of the session object: 0 if +** the session is disabled, or 1 if it is enabled. +*/ +int sqlite3session_enable(sqlite3_session *pSession, int bEnable); + +/* +** CAPI3REF: Set Or Clear the Indirect Change Flag +** +** Each change recorded by a session object is marked as either direct or +** indirect. A change is marked as indirect if either: +** +**
      +**
    • The session object "indirect" flag is set when the change is +** made, or +**
    • The change is made by an SQL trigger or foreign key action +** instead of directly as a result of a users SQL statement. +**
    +** +** If a single row is affected by more than one operation within a session, +** then the change is considered indirect if all operations meet the criteria +** for an indirect change above, or direct otherwise. +** +** This function is used to set, clear or query the session object indirect +** flag. If the second argument passed to this function is zero, then the +** indirect flag is cleared. If it is greater than zero, the indirect flag +** is set. Passing a value less than zero does not modify the current value +** of the indirect flag, and may be used to query the current state of the +** indirect flag for the specified session object. +** +** The return value indicates the final state of the indirect flag: 0 if +** it is clear, or 1 if it is set. +*/ +int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); + +/* +** CAPI3REF: Attach A Table To A Session Object +** +** If argument zTab is not NULL, then it is the name of a table to attach +** to the session object passed as the first argument. All subsequent changes +** made to the table while the session object is enabled will be recorded. See +** documentation for [sqlite3session_changeset()] for further details. +** +** Or, if argument zTab is NULL, then changes are recorded for all tables +** in the database. If additional tables are added to the database (by +** executing "CREATE TABLE" statements) after this call is made, changes for +** the new tables are also recorded. +** +** Changes can only be recorded for tables that have a PRIMARY KEY explicitly +** defined as part of their CREATE TABLE statement. It does not matter if the +** PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias) or not. The PRIMARY +** KEY may consist of a single column, or may be a composite key. +** +** It is not an error if the named table does not exist in the database. Nor +** is it an error if the named table does not have a PRIMARY KEY. However, +** no changes will be recorded in either of these scenarios. +** +** Changes are not recorded for individual rows that have NULL values stored +** in one or more of their PRIMARY KEY columns. +** +** SQLITE_OK is returned if the call completes without error. Or, if an error +** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned. +*/ +int sqlite3session_attach( + sqlite3_session *pSession, /* Session object */ + const char *zTab /* Table name */ +); + +/* +** CAPI3REF: Set a table filter on a Session Object. +** +** The second argument (xFilter) is the "filter callback". For changes to rows +** in tables that are not attached to the Session oject, the filter is called +** to determine whether changes to the table's rows should be tracked or not. +** If xFilter returns 0, changes is not tracked. Note that once a table is +** attached, xFilter will not be called again. +*/ +void sqlite3session_table_filter( + sqlite3_session *pSession, /* Session object */ + int(*xFilter)( + void *pCtx, /* Copy of third arg to _filter_table() */ + const char *zTab /* Table name */ + ), + void *pCtx /* First argument passed to xFilter */ +); + +/* +** CAPI3REF: Generate A Changeset From A Session Object +** +** Obtain a changeset containing changes to the tables attached to the +** session object passed as the first argument. If successful, +** set *ppChangeset to point to a buffer containing the changeset +** and *pnChangeset to the size of the changeset in bytes before returning +** SQLITE_OK. If an error occurs, set both *ppChangeset and *pnChangeset to +** zero and return an SQLite error code. +** +** A changeset consists of zero or more INSERT, UPDATE and/or DELETE changes, +** each representing a change to a single row of an attached table. An INSERT +** change contains the values of each field of a new database row. A DELETE +** contains the original values of each field of a deleted database row. An +** UPDATE change contains the original values of each field of an updated +** database row along with the updated values for each updated non-primary-key +** column. It is not possible for an UPDATE change to represent a change that +** modifies the values of primary key columns. If such a change is made, it +** is represented in a changeset as a DELETE followed by an INSERT. +** +** Changes are not recorded for rows that have NULL values stored in one or +** more of their PRIMARY KEY columns. If such a row is inserted or deleted, +** no corresponding change is present in the changesets returned by this +** function. If an existing row with one or more NULL values stored in +** PRIMARY KEY columns is updated so that all PRIMARY KEY columns are non-NULL, +** only an INSERT is appears in the changeset. Similarly, if an existing row +** with non-NULL PRIMARY KEY values is updated so that one or more of its +** PRIMARY KEY columns are set to NULL, the resulting changeset contains a +** DELETE change only. +** +** The contents of a changeset may be traversed using an iterator created +** using the [sqlite3changeset_start()] API. A changeset may be applied to +** a database with a compatible schema using the [sqlite3changeset_apply()] +** API. +** +** Within a changeset generated by this function, all changes related to a +** single table are grouped together. In other words, when iterating through +** a changeset or when applying a changeset to a database, all changes related +** to a single table are processed before moving on to the next table. Tables +** are sorted in the same order in which they were attached (or auto-attached) +** to the sqlite3_session object. The order in which the changes related to +** a single table are stored is undefined. +** +** Following a successful call to this function, it is the responsibility of +** the caller to eventually free the buffer that *ppChangeset points to using +** [sqlite3_free()]. +** +**

    Changeset Generation

    +** +** Once a table has been attached to a session object, the session object +** records the primary key values of all new rows inserted into the table. +** It also records the original primary key and other column values of any +** deleted or updated rows. For each unique primary key value, data is only +** recorded once - the first time a row with said primary key is inserted, +** updated or deleted in the lifetime of the session. +** +** There is one exception to the previous paragraph: when a row is inserted, +** updated or deleted, if one or more of its primary key columns contain a +** NULL value, no record of the change is made. +** +** The session object therefore accumulates two types of records - those +** that consist of primary key values only (created when the user inserts +** a new record) and those that consist of the primary key values and the +** original values of other table columns (created when the users deletes +** or updates a record). +** +** When this function is called, the requested changeset is created using +** both the accumulated records and the current contents of the database +** file. Specifically: +** +**
      +**
    • For each record generated by an insert, the database is queried +** for a row with a matching primary key. If one is found, an INSERT +** change is added to the changeset. If no such row is found, no change +** is added to the changeset. +** +**
    • For each record generated by an update or delete, the database is +** queried for a row with a matching primary key. If such a row is +** found and one or more of the non-primary key fields have been +** modified from their original values, an UPDATE change is added to +** the changeset. Or, if no such row is found in the table, a DELETE +** change is added to the changeset. If there is a row with a matching +** primary key in the database, but all fields contain their original +** values, no change is added to the changeset. +**
    +** +** This means, amongst other things, that if a row is inserted and then later +** deleted while a session object is active, neither the insert nor the delete +** will be present in the changeset. Or if a row is deleted and then later a +** row with the same primary key values inserted while a session object is +** active, the resulting changeset will contain an UPDATE change instead of +** a DELETE and an INSERT. +** +** When a session object is disabled (see the [sqlite3session_enable()] API), +** it does not accumulate records when rows are inserted, updated or deleted. +** This may appear to have some counter-intuitive effects if a single row +** is written to more than once during a session. For example, if a row +** is inserted while a session object is enabled, then later deleted while +** the same session object is disabled, no INSERT record will appear in the +** changeset, even though the delete took place while the session was disabled. +** Or, if one field of a row is updated while a session is disabled, and +** another field of the same row is updated while the session is enabled, the +** resulting changeset will contain an UPDATE change that updates both fields. +*/ +int sqlite3session_changeset( + sqlite3_session *pSession, /* Session object */ + int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ + void **ppChangeset /* OUT: Buffer containing changeset */ +); + +/* +** CAPI3REF: Load The Difference Between Tables Into A Session +** +** If it is not already attached to the session object passed as the first +** argument, this function attaches table zTbl in the same manner as the +** [sqlite3session_attach()] function. If zTbl does not exist, or if it +** does not have a primary key, this function is a no-op (but does not return +** an error). +** +** Argument zFromDb must be the name of a database ("main", "temp" etc.) +** attached to the same database handle as the session object that contains +** a table compatible with the table attached to the session by this function. +** A table is considered compatible if it: +** +**
      +**
    • Has the same name, +**
    • Has the same set of columns declared in the same order, and +**
    • Has the same PRIMARY KEY definition. +**
    +** +** If the tables are not compatible, SQLITE_SCHEMA is returned. If the tables +** are compatible but do not have any PRIMARY KEY columns, it is not an error +** but no changes are added to the session object. As with other session +** APIs, tables without PRIMARY KEYs are simply ignored. +** +** This function adds a set of changes to the session object that could be +** used to update the table in database zFrom (call this the "from-table") +** so that its content is the same as the table attached to the session +** object (call this the "to-table"). Specifically: +** +**
      +**
    • For each row (primary key) that exists in the to-table but not in +** the from-table, an INSERT record is added to the session object. +** +**
    • For each row (primary key) that exists in the to-table but not in +** the from-table, a DELETE record is added to the session object. +** +**
    • For each row (primary key) that exists in both tables, but features +** different in each, an UPDATE record is added to the session. +**
    +** +** To clarify, if this function is called and then a changeset constructed +** using [sqlite3session_changeset()], then after applying that changeset to +** database zFrom the contents of the two compatible tables would be +** identical. +** +** It an error if database zFrom does not exist or does not contain the +** required compatible table. +** +** If the operation successful, SQLITE_OK is returned. Otherwise, an SQLite +** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg +** may be set to point to a buffer containing an English language error +** message. It is the responsibility of the caller to free this buffer using +** sqlite3_free(). +*/ +int sqlite3session_diff( + sqlite3_session *pSession, + const char *zFromDb, + const char *zTbl, + char **pzErrMsg +); + + +/* +** CAPI3REF: Generate A Patchset From A Session Object +** +** The differences between a patchset and a changeset are that: +** +**
      +**
    • DELETE records consist of the primary key fields only. The +** original values of other fields are omitted. +**
    • The original values of any modified fields are omitted from +** UPDATE records. +**
    +** +** A patchset blob may be used with up to date versions of all +** sqlite3changeset_xxx API functions except for sqlite3changeset_invert(), +** which returns SQLITE_CORRUPT if it is passed a patchset. Similarly, +** attempting to use a patchset blob with old versions of the +** sqlite3changeset_xxx APIs also provokes an SQLITE_CORRUPT error. +** +** Because the non-primary key "old.*" fields are omitted, no +** SQLITE_CHANGESET_DATA conflicts can be detected or reported if a patchset +** is passed to the sqlite3changeset_apply() API. Other conflict types work +** in the same way as for changesets. +** +** Changes within a patchset are ordered in the same way as for changesets +** generated by the sqlite3session_changeset() function (i.e. all changes for +** a single table are grouped together, tables appear in the order in which +** they were attached to the session object). +*/ +int sqlite3session_patchset( + sqlite3_session *pSession, /* Session object */ + int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */ + void **ppPatchset /* OUT: Buffer containing changeset */ +); + +/* +** CAPI3REF: Test if a changeset has recorded any changes. +** +** Return non-zero if no changes to attached tables have been recorded by +** the session object passed as the first argument. Otherwise, if one or +** more changes have been recorded, return zero. +** +** Even if this function returns zero, it is possible that calling +** [sqlite3session_changeset()] on the session handle may still return a +** changeset that contains no changes. This can happen when a row in +** an attached table is modified and then later on the original values +** are restored. However, if this function returns non-zero, then it is +** guaranteed that a call to sqlite3session_changeset() will return a +** changeset containing zero changes. +*/ +int sqlite3session_isempty(sqlite3_session *pSession); + +/* +** CAPI3REF: Create An Iterator To Traverse A Changeset +** +** Create an iterator used to iterate through the contents of a changeset. +** If successful, *pp is set to point to the iterator handle and SQLITE_OK +** is returned. Otherwise, if an error occurs, *pp is set to zero and an +** SQLite error code is returned. +** +** The following functions can be used to advance and query a changeset +** iterator created by this function: +** +**
      +**
    • [sqlite3changeset_next()] +**
    • [sqlite3changeset_op()] +**
    • [sqlite3changeset_new()] +**
    • [sqlite3changeset_old()] +**
    +** +** It is the responsibility of the caller to eventually destroy the iterator +** by passing it to [sqlite3changeset_finalize()]. The buffer containing the +** changeset (pChangeset) must remain valid until after the iterator is +** destroyed. +** +** Assuming the changeset blob was created by one of the +** [sqlite3session_changeset()], [sqlite3changeset_concat()] or +** [sqlite3changeset_invert()] functions, all changes within the changeset +** that apply to a single table are grouped together. This means that when +** an application iterates through a changeset using an iterator created by +** this function, all changes that relate to a single table are visted +** consecutively. There is no chance that the iterator will visit a change +** the applies to table X, then one for table Y, and then later on visit +** another change for table X. +*/ +int sqlite3changeset_start( + sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */ + int nChangeset, /* Size of changeset blob in bytes */ + void *pChangeset /* Pointer to blob containing changeset */ +); + + +/* +** CAPI3REF: Advance A Changeset Iterator +** +** This function may only be used with iterators created by function +** [sqlite3changeset_start()]. If it is called on an iterator passed to +** a conflict-handler callback by [sqlite3changeset_apply()], SQLITE_MISUSE +** is returned and the call has no effect. +** +** Immediately after an iterator is created by sqlite3changeset_start(), it +** does not point to any change in the changeset. Assuming the changeset +** is not empty, the first call to this function advances the iterator to +** point to the first change in the changeset. Each subsequent call advances +** the iterator to point to the next change in the changeset (if any). If +** no error occurs and the iterator points to a valid change after a call +** to sqlite3changeset_next() has advanced it, SQLITE_ROW is returned. +** Otherwise, if all changes in the changeset have already been visited, +** SQLITE_DONE is returned. +** +** If an error occurs, an SQLite error code is returned. Possible error +** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or +** SQLITE_NOMEM. +*/ +int sqlite3changeset_next(sqlite3_changeset_iter *pIter); + +/* +** CAPI3REF: Obtain The Current Operation From A Changeset Iterator +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this +** is not the case, this function returns [SQLITE_MISUSE]. +** +** If argument pzTab is not NULL, then *pzTab is set to point to a +** nul-terminated utf-8 encoded string containing the name of the table +** affected by the current change. The buffer remains valid until either +** sqlite3changeset_next() is called on the iterator or until the +** conflict-handler function returns. If pnCol is not NULL, then *pnCol is +** set to the number of columns in the table affected by the change. If +** pbIncorrect is not NULL, then *pbIndirect is set to true (1) if the change +** is an indirect change, or false (0) otherwise. See the documentation for +** [sqlite3session_indirect()] for a description of direct and indirect +** changes. Finally, if pOp is not NULL, then *pOp is set to one of +** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the +** type of change that the iterator currently points to. +** +** If no error occurs, SQLITE_OK is returned. If an error does occur, an +** SQLite error code is returned. The values of the output variables may not +** be trusted in this case. +*/ +int sqlite3changeset_op( + sqlite3_changeset_iter *pIter, /* Iterator object */ + const char **pzTab, /* OUT: Pointer to table name */ + int *pnCol, /* OUT: Number of columns in table */ + int *pOp, /* OUT: SQLITE_INSERT, DELETE or UPDATE */ + int *pbIndirect /* OUT: True for an 'indirect' change */ +); + +/* +** CAPI3REF: Obtain The Primary Key Definition Of A Table +** +** For each modified table, a changeset includes the following: +** +**
      +**
    • The number of columns in the table, and +**
    • Which of those columns make up the tables PRIMARY KEY. +**
    +** +** This function is used to find which columns comprise the PRIMARY KEY of +** the table modified by the change that iterator pIter currently points to. +** If successful, *pabPK is set to point to an array of nCol entries, where +** nCol is the number of columns in the table. Elements of *pabPK are set to +** 0x01 if the corresponding column is part of the tables primary key, or +** 0x00 if it is not. +** +** If argumet pnCol is not NULL, then *pnCol is set to the number of columns +** in the table. +** +** If this function is called when the iterator does not point to a valid +** entry, SQLITE_MISUSE is returned and the output variables zeroed. Otherwise, +** SQLITE_OK is returned and the output variables populated as described +** above. +*/ +int sqlite3changeset_pk( + sqlite3_changeset_iter *pIter, /* Iterator object */ + unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */ + int *pnCol /* OUT: Number of entries in output array */ +); + +/* +** CAPI3REF: Obtain old.* Values From A Changeset Iterator +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned SQLITE_ROW. +** Furthermore, it may only be called if the type of change that the iterator +** currently points to is either [SQLITE_DELETE] or [SQLITE_UPDATE]. Otherwise, +** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the vector of +** original row values stored as part of the UPDATE or DELETE change and +** returns SQLITE_OK. The name of the function comes from the fact that this +** is similar to the "old.*" columns available to update or delete triggers. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +int sqlite3changeset_old( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */ +); + +/* +** CAPI3REF: Obtain new.* Values From A Changeset Iterator +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned SQLITE_ROW. +** Furthermore, it may only be called if the type of change that the iterator +** currently points to is either [SQLITE_UPDATE] or [SQLITE_INSERT]. Otherwise, +** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the vector of +** new row values stored as part of the UPDATE or INSERT change and +** returns SQLITE_OK. If the change is an UPDATE and does not include +** a new value for the requested column, *ppValue is set to NULL and +** SQLITE_OK returned. The name of the function comes from the fact that +** this is similar to the "new.*" columns available to update or delete +** triggers. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +int sqlite3changeset_new( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */ +); + +/* +** CAPI3REF: Obtain Conflicting Row Values From A Changeset Iterator +** +** This function should only be used with iterator objects passed to a +** conflict-handler callback by [sqlite3changeset_apply()] with either +** [SQLITE_CHANGESET_DATA] or [SQLITE_CHANGESET_CONFLICT]. If this function +** is called on any other iterator, [SQLITE_MISUSE] is returned and *ppValue +** is set to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the +** "conflicting row" associated with the current conflict-handler callback +** and returns SQLITE_OK. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +int sqlite3changeset_conflict( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: Value from conflicting row */ +); + +/* +** CAPI3REF: Determine The Number Of Foreign Key Constraint Violations +** +** This function may only be called with an iterator passed to an +** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case +** it sets the output variable to the total number of known foreign key +** violations in the destination database and returns SQLITE_OK. +** +** In all other cases this function returns SQLITE_MISUSE. +*/ +int sqlite3changeset_fk_conflicts( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int *pnOut /* OUT: Number of FK violations */ +); + + +/* +** CAPI3REF: Finalize A Changeset Iterator +** +** This function is used to finalize an iterator allocated with +** [sqlite3changeset_start()]. +** +** This function should only be called on iterators created using the +** [sqlite3changeset_start()] function. If an application calls this +** function with an iterator passed to a conflict-handler by +** [sqlite3changeset_apply()], [SQLITE_MISUSE] is immediately returned and the +** call has no effect. +** +** If an error was encountered within a call to an sqlite3changeset_xxx() +** function (for example an [SQLITE_CORRUPT] in [sqlite3changeset_next()] or an +** [SQLITE_NOMEM] in [sqlite3changeset_new()]) then an error code corresponding +** to that error is returned by this function. Otherwise, SQLITE_OK is +** returned. This is to allow the following pattern (pseudo-code): +** +** sqlite3changeset_start(); +** while( SQLITE_ROW==sqlite3changeset_next() ){ +** // Do something with change. +** } +** rc = sqlite3changeset_finalize(); +** if( rc!=SQLITE_OK ){ +** // An error has occurred +** } +*/ +int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); + +/* +** CAPI3REF: Invert A Changeset +** +** This function is used to "invert" a changeset object. Applying an inverted +** changeset to a database reverses the effects of applying the uninverted +** changeset. Specifically: +** +**
      +**
    • Each DELETE change is changed to an INSERT, and +**
    • Each INSERT change is changed to a DELETE, and +**
    • For each UPDATE change, the old.* and new.* values are exchanged. +**
    +** +** This function does not change the order in which changes appear within +** the changeset. It merely reverses the sense of each individual change. +** +** If successful, a pointer to a buffer containing the inverted changeset +** is stored in *ppOut, the size of the same buffer is stored in *pnOut, and +** SQLITE_OK is returned. If an error occurs, both *pnOut and *ppOut are +** zeroed and an SQLite error code returned. +** +** It is the responsibility of the caller to eventually call sqlite3_free() +** on the *ppOut pointer to free the buffer allocation following a successful +** call to this function. +** +** WARNING/TODO: This function currently assumes that the input is a valid +** changeset. If it is not, the results are undefined. +*/ +int sqlite3changeset_invert( + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + +/* +** CAPI3REF: Concatenate Two Changeset Objects +** +** This function is used to concatenate two changesets, A and B, into a +** single changeset. The result is a changeset equivalent to applying +** changeset A followed by changeset B. +** +** This function combines the two input changesets using an +** sqlite3_changegroup object. Calling it produces similar results as the +** following code fragment: +** +** sqlite3_changegroup *pGrp; +** rc = sqlite3_changegroup_new(&pGrp); +** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nA, pA); +** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nB, pB); +** if( rc==SQLITE_OK ){ +** rc = sqlite3changegroup_output(pGrp, pnOut, ppOut); +** }else{ +** *ppOut = 0; +** *pnOut = 0; +** } +** +** Refer to the sqlite3_changegroup documentation below for details. +*/ +int sqlite3changeset_concat( + int nA, /* Number of bytes in buffer pA */ + void *pA, /* Pointer to buffer containing changeset A */ + int nB, /* Number of bytes in buffer pB */ + void *pB, /* Pointer to buffer containing changeset B */ + int *pnOut, /* OUT: Number of bytes in output changeset */ + void **ppOut /* OUT: Buffer containing output changeset */ +); + + +/* +** Changegroup handle. +*/ +typedef struct sqlite3_changegroup sqlite3_changegroup; + +/* +** CAPI3REF: Combine two or more changesets into a single changeset. +** +** An sqlite3_changegroup object is used to combine two or more changesets +** (or patchsets) into a single changeset (or patchset). A single changegroup +** object may combine changesets or patchsets, but not both. The output is +** always in the same format as the input. +** +** If successful, this function returns SQLITE_OK and populates (*pp) with +** a pointer to a new sqlite3_changegroup object before returning. The caller +** should eventually free the returned object using a call to +** sqlite3changegroup_delete(). If an error occurs, an SQLite error code +** (i.e. SQLITE_NOMEM) is returned and *pp is set to NULL. +** +** The usual usage pattern for an sqlite3_changegroup object is as follows: +** +**
      +**
    • It is created using a call to sqlite3changegroup_new(). +** +**
    • Zero or more changesets (or patchsets) are added to the object +** by calling sqlite3changegroup_add(). +** +**
    • The result of combining all input changesets together is obtained +** by the application via a call to sqlite3changegroup_output(). +** +**
    • The object is deleted using a call to sqlite3changegroup_delete(). +**
    +** +** Any number of calls to add() and output() may be made between the calls to +** new() and delete(), and in any order. +** +** As well as the regular sqlite3changegroup_add() and +** sqlite3changegroup_output() functions, also available are the streaming +** versions sqlite3changegroup_add_strm() and sqlite3changegroup_output_strm(). +*/ +int sqlite3changegroup_new(sqlite3_changegroup **pp); + +/* +** Add all changes within the changeset (or patchset) in buffer pData (size +** nData bytes) to the changegroup. +** +** If the buffer contains a patchset, then all prior calls to this function +** on the same changegroup object must also have specified patchsets. Or, if +** the buffer contains a changeset, so must have the earlier calls to this +** function. Otherwise, SQLITE_ERROR is returned and no changes are added +** to the changegroup. +** +** Rows within the changeset and changegroup are identified by the values in +** their PRIMARY KEY columns. A change in the changeset is considered to +** apply to the same row as a change already present in the changegroup if +** the two rows have the same primary key. +** +** Changes to rows that that do not already appear in the changegroup are +** simply copied into it. Or, if both the new changeset and the changegroup +** contain changes that apply to a single row, the final contents of the +** changegroup depends on the type of each change, as follows: +** +** +** +** +**
    Existing Change New Change Output Change +**
    INSERT INSERT +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
    INSERT UPDATE +** The INSERT change remains in the changegroup. The values in the +** INSERT change are modified as if the row was inserted by the +** existing change and then updated according to the new change. +**
    INSERT DELETE +** The existing INSERT is removed from the changegroup. The DELETE is +** not added. +**
    UPDATE INSERT +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
    UPDATE UPDATE +** The existing UPDATE remains within the changegroup. It is amended +** so that the accompanying values are as if the row was updated once +** by the existing change and then again by the new change. +**
    UPDATE DELETE +** The existing UPDATE is replaced by the new DELETE within the +** changegroup. +**
    DELETE INSERT +** If one or more of the column values in the row inserted by the +** new change differ from those in the row deleted by the existing +** change, the existing DELETE is replaced by an UPDATE within the +** changegroup. Otherwise, if the inserted row is exactly the same +** as the deleted row, the existing DELETE is simply discarded. +**
    DELETE UPDATE +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
    DELETE DELETE +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
    +** +** If the new changeset contains changes to a table that is already present +** in the changegroup, then the number of columns and the position of the +** primary key columns for the table must be consistent. If this is not the +** case, this function fails with SQLITE_SCHEMA. If the input changeset +** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is +** returned. Or, if an out-of-memory condition occurs during processing, this +** function returns SQLITE_NOMEM. In all cases, if an error occurs the +** final contents of the changegroup is undefined. +** +** If no error occurs, SQLITE_OK is returned. +*/ +int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); + +/* +** Obtain a buffer containing a changeset (or patchset) representing the +** current contents of the changegroup. If the inputs to the changegroup +** were themselves changesets, the output is a changeset. Or, if the +** inputs were patchsets, the output is also a patchset. +** +** As with the output of the sqlite3session_changeset() and +** sqlite3session_patchset() functions, all changes related to a single +** table are grouped together in the output of this function. Tables appear +** in the same order as for the very first changeset added to the changegroup. +** If the second or subsequent changesets added to the changegroup contain +** changes for tables that do not appear in the first changeset, they are +** appended onto the end of the output changeset, again in the order in +** which they are first encountered. +** +** If an error occurs, an SQLite error code is returned and the output +** variables (*pnData) and (*ppData) are set to 0. Otherwise, SQLITE_OK +** is returned and the output variables are set to the size of and a +** pointer to the output buffer, respectively. In this case it is the +** responsibility of the caller to eventually free the buffer using a +** call to sqlite3_free(). +*/ +int sqlite3changegroup_output( + sqlite3_changegroup*, + int *pnData, /* OUT: Size of output buffer in bytes */ + void **ppData /* OUT: Pointer to output buffer */ +); + +/* +** Delete a changegroup object. +*/ +void sqlite3changegroup_delete(sqlite3_changegroup*); + +/* +** CAPI3REF: Apply A Changeset To A Database +** +** Apply a changeset to a database. This function attempts to update the +** "main" database attached to handle db with the changes found in the +** changeset passed via the second and third arguments. +** +** The fourth argument (xFilter) passed to this function is the "filter +** callback". If it is not NULL, then for each table affected by at least one +** change in the changeset, the filter callback is invoked with +** the table name as the second argument, and a copy of the context pointer +** passed as the sixth argument to this function as the first. If the "filter +** callback" returns zero, then no attempt is made to apply any changes to +** the table. Otherwise, if the return value is non-zero or the xFilter +** argument to this function is NULL, all changes related to the table are +** attempted. +** +** For each table that is not excluded by the filter callback, this function +** tests that the target database contains a compatible table. A table is +** considered compatible if all of the following are true: +** +**
      +**
    • The table has the same name as the name recorded in the +** changeset, and +**
    • The table has the same number of columns as recorded in the +** changeset, and +**
    • The table has primary key columns in the same position as +** recorded in the changeset. +**
    +** +** If there is no compatible table, it is not an error, but none of the +** changes associated with the table are applied. A warning message is issued +** via the sqlite3_log() mechanism with the error code SQLITE_SCHEMA. At most +** one such warning is issued for each table in the changeset. +** +** For each change for which there is a compatible table, an attempt is made +** to modify the table contents according to the UPDATE, INSERT or DELETE +** change. If a change cannot be applied cleanly, the conflict handler +** function passed as the fifth argument to sqlite3changeset_apply() may be +** invoked. A description of exactly when the conflict handler is invoked for +** each type of change is below. +** +** Unlike the xFilter argument, xConflict may not be passed NULL. The results +** of passing anything other than a valid function pointer as the xConflict +** argument are undefined. +** +** Each time the conflict handler function is invoked, it must return one +** of [SQLITE_CHANGESET_OMIT], [SQLITE_CHANGESET_ABORT] or +** [SQLITE_CHANGESET_REPLACE]. SQLITE_CHANGESET_REPLACE may only be returned +** if the second argument passed to the conflict handler is either +** SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If the conflict-handler +** returns an illegal value, any changes already made are rolled back and +** the call to sqlite3changeset_apply() returns SQLITE_MISUSE. Different +** actions are taken by sqlite3changeset_apply() depending on the value +** returned by each invocation of the conflict-handler function. Refer to +** the documentation for the three +** [SQLITE_CHANGESET_OMIT|available return values] for details. +** +**
    +**
    DELETE Changes
    +** For each DELETE change, this function checks if the target database +** contains a row with the same primary key value (or values) as the +** original row values stored in the changeset. If it does, and the values +** stored in all non-primary key columns also match the values stored in +** the changeset the row is deleted from the target database. +** +** If a row with matching primary key values is found, but one or more of +** the non-primary key fields contains a value different from the original +** row value stored in the changeset, the conflict-handler function is +** invoked with [SQLITE_CHANGESET_DATA] as the second argument. +** +** If no row with matching primary key values is found in the database, +** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] +** passed as the second argument. +** +** If the DELETE operation is attempted, but SQLite returns SQLITE_CONSTRAINT +** (which can only happen if a foreign key constraint is violated), the +** conflict-handler function is invoked with [SQLITE_CHANGESET_CONSTRAINT] +** passed as the second argument. This includes the case where the DELETE +** operation is attempted because an earlier call to the conflict handler +** function returned [SQLITE_CHANGESET_REPLACE]. +** +**
    INSERT Changes
    +** For each INSERT change, an attempt is made to insert the new row into +** the database. +** +** If the attempt to insert the row fails because the database already +** contains a row with the same primary key values, the conflict handler +** function is invoked with the second argument set to +** [SQLITE_CHANGESET_CONFLICT]. +** +** If the attempt to insert the row fails because of some other constraint +** violation (e.g. NOT NULL or UNIQUE), the conflict handler function is +** invoked with the second argument set to [SQLITE_CHANGESET_CONSTRAINT]. +** This includes the case where the INSERT operation is re-attempted because +** an earlier call to the conflict handler function returned +** [SQLITE_CHANGESET_REPLACE]. +** +**
    UPDATE Changes
    +** For each UPDATE change, this function checks if the target database +** contains a row with the same primary key value (or values) as the +** original row values stored in the changeset. If it does, and the values +** stored in all non-primary key columns also match the values stored in +** the changeset the row is updated within the target database. +** +** If a row with matching primary key values is found, but one or more of +** the non-primary key fields contains a value different from an original +** row value stored in the changeset, the conflict-handler function is +** invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since +** UPDATE changes only contain values for non-primary key fields that are +** to be modified, only those fields need to match the original values to +** avoid the SQLITE_CHANGESET_DATA conflict-handler callback. +** +** If no row with matching primary key values is found in the database, +** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] +** passed as the second argument. +** +** If the UPDATE operation is attempted, but SQLite returns +** SQLITE_CONSTRAINT, the conflict-handler function is invoked with +** [SQLITE_CHANGESET_CONSTRAINT] passed as the second argument. +** This includes the case where the UPDATE operation is attempted after +** an earlier call to the conflict handler function returned +** [SQLITE_CHANGESET_REPLACE]. +**
    +** +** It is safe to execute SQL statements, including those that write to the +** table that the callback related to, from within the xConflict callback. +** This can be used to further customize the applications conflict +** resolution strategy. +** +** All changes made by this function are enclosed in a savepoint transaction. +** If any other error (aside from a constraint failure when attempting to +** write to the target database) occurs, then the savepoint transaction is +** rolled back, restoring the target database to its original state, and an +** SQLite error code returned. +*/ +int sqlite3changeset_apply( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +); + +/* +** CAPI3REF: Constants Passed To The Conflict Handler +** +** Values that may be passed as the second argument to a conflict-handler. +** +**
    +**
    SQLITE_CHANGESET_DATA
    +** The conflict handler is invoked with CHANGESET_DATA as the second argument +** when processing a DELETE or UPDATE change if a row with the required +** PRIMARY KEY fields is present in the database, but one or more other +** (non primary-key) fields modified by the update do not contain the +** expected "before" values. +** +** The conflicting row, in this case, is the database row with the matching +** primary key. +** +**
    SQLITE_CHANGESET_NOTFOUND
    +** The conflict handler is invoked with CHANGESET_NOTFOUND as the second +** argument when processing a DELETE or UPDATE change if a row with the +** required PRIMARY KEY fields is not present in the database. +** +** There is no conflicting row in this case. The results of invoking the +** sqlite3changeset_conflict() API are undefined. +** +**
    SQLITE_CHANGESET_CONFLICT
    +** CHANGESET_CONFLICT is passed as the second argument to the conflict +** handler while processing an INSERT change if the operation would result +** in duplicate primary key values. +** +** The conflicting row in this case is the database row with the matching +** primary key. +** +**
    SQLITE_CHANGESET_FOREIGN_KEY
    +** If foreign key handling is enabled, and applying a changeset leaves the +** database in a state containing foreign key violations, the conflict +** handler is invoked with CHANGESET_FOREIGN_KEY as the second argument +** exactly once before the changeset is committed. If the conflict handler +** returns CHANGESET_OMIT, the changes, including those that caused the +** foreign key constraint violation, are committed. Or, if it returns +** CHANGESET_ABORT, the changeset is rolled back. +** +** No current or conflicting row information is provided. The only function +** it is possible to call on the supplied sqlite3_changeset_iter handle +** is sqlite3changeset_fk_conflicts(). +** +**
    SQLITE_CHANGESET_CONSTRAINT
    +** If any other constraint violation occurs while applying a change (i.e. +** a UNIQUE, CHECK or NOT NULL constraint), the conflict handler is +** invoked with CHANGESET_CONSTRAINT as the second argument. +** +** There is no conflicting row in this case. The results of invoking the +** sqlite3changeset_conflict() API are undefined. +** +**
    +*/ +#define SQLITE_CHANGESET_DATA 1 +#define SQLITE_CHANGESET_NOTFOUND 2 +#define SQLITE_CHANGESET_CONFLICT 3 +#define SQLITE_CHANGESET_CONSTRAINT 4 +#define SQLITE_CHANGESET_FOREIGN_KEY 5 + +/* +** CAPI3REF: Constants Returned By The Conflict Handler +** +** A conflict handler callback must return one of the following three values. +** +**
    +**
    SQLITE_CHANGESET_OMIT
    +** If a conflict handler returns this value no special action is taken. The +** change that caused the conflict is not applied. The session module +** continues to the next change in the changeset. +** +**
    SQLITE_CHANGESET_REPLACE
    +** This value may only be returned if the second argument to the conflict +** handler was SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If this +** is not the case, any changes applied so far are rolled back and the +** call to sqlite3changeset_apply() returns SQLITE_MISUSE. +** +** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_DATA conflict +** handler, then the conflicting row is either updated or deleted, depending +** on the type of change. +** +** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_CONFLICT conflict +** handler, then the conflicting row is removed from the database and a +** second attempt to apply the change is made. If this second attempt fails, +** the original row is restored to the database before continuing. +** +**
    SQLITE_CHANGESET_ABORT
    +** If this value is returned, any changes applied so far are rolled back +** and the call to sqlite3changeset_apply() returns SQLITE_ABORT. +**
    +*/ +#define SQLITE_CHANGESET_OMIT 0 +#define SQLITE_CHANGESET_REPLACE 1 +#define SQLITE_CHANGESET_ABORT 2 + +/* +** CAPI3REF: Streaming Versions of API functions. +** +** The six streaming API xxx_strm() functions serve similar purposes to the +** corresponding non-streaming API functions: +** +** +** +**
    Streaming functionNon-streaming equivalent
    sqlite3changeset_apply_str[sqlite3changeset_apply] +**
    sqlite3changeset_concat_str[sqlite3changeset_concat] +**
    sqlite3changeset_invert_str[sqlite3changeset_invert] +**
    sqlite3changeset_start_str[sqlite3changeset_start] +**
    sqlite3session_changeset_str[sqlite3session_changeset] +**
    sqlite3session_patchset_str[sqlite3session_patchset] +**
    +** +** Non-streaming functions that accept changesets (or patchsets) as input +** require that the entire changeset be stored in a single buffer in memory. +** Similarly, those that return a changeset or patchset do so by returning +** a pointer to a single large buffer allocated using sqlite3_malloc(). +** Normally this is convenient. However, if an application running in a +** low-memory environment is required to handle very large changesets, the +** large contiguous memory allocations required can become onerous. +** +** In order to avoid this problem, instead of a single large buffer, input +** is passed to a streaming API functions by way of a callback function that +** the sessions module invokes to incrementally request input data as it is +** required. In all cases, a pair of API function parameters such as +** +**
    +**        int nChangeset,
    +**        void *pChangeset,
    +**  
    +** +** Is replaced by: +** +**
    +**        int (*xInput)(void *pIn, void *pData, int *pnData),
    +**        void *pIn,
    +**  
    +** +** Each time the xInput callback is invoked by the sessions module, the first +** argument passed is a copy of the supplied pIn context pointer. The second +** argument, pData, points to a buffer (*pnData) bytes in size. Assuming no +** error occurs the xInput method should copy up to (*pnData) bytes of data +** into the buffer and set (*pnData) to the actual number of bytes copied +** before returning SQLITE_OK. If the input is completely exhausted, (*pnData) +** should be set to zero to indicate this. Or, if an error occurs, an SQLite +** error code should be returned. In all cases, if an xInput callback returns +** an error, all processing is abandoned and the streaming API function +** returns a copy of the error code to the caller. +** +** In the case of sqlite3changeset_start_strm(), the xInput callback may be +** invoked by the sessions module at any point during the lifetime of the +** iterator. If such an xInput callback returns an error, the iterator enters +** an error state, whereby all subsequent calls to iterator functions +** immediately fail with the same error code as returned by xInput. +** +** Similarly, streaming API functions that return changesets (or patchsets) +** return them in chunks by way of a callback function instead of via a +** pointer to a single large buffer. In this case, a pair of parameters such +** as: +** +**
    +**        int *pnChangeset,
    +**        void **ppChangeset,
    +**  
    +** +** Is replaced by: +** +**
    +**        int (*xOutput)(void *pOut, const void *pData, int nData),
    +**        void *pOut
    +**  
    +** +** The xOutput callback is invoked zero or more times to return data to +** the application. The first parameter passed to each call is a copy of the +** pOut pointer supplied by the application. The second parameter, pData, +** points to a buffer nData bytes in size containing the chunk of output +** data being returned. If the xOutput callback successfully processes the +** supplied data, it should return SQLITE_OK to indicate success. Otherwise, +** it should return some other SQLite error code. In this case processing +** is immediately abandoned and the streaming API function returns a copy +** of the xOutput error code to the application. +** +** The sessions module never invokes an xOutput callback with the third +** parameter set to a value less than or equal to zero. Other than this, +** no guarantees are made as to the size of the chunks of data returned. +*/ +int sqlite3changeset_apply_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +); +int sqlite3changeset_concat_strm( + int (*xInputA)(void *pIn, void *pData, int *pnData), + void *pInA, + int (*xInputB)(void *pIn, void *pData, int *pnData), + void *pInB, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +int sqlite3changeset_invert_strm( + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +int sqlite3changeset_start_strm( + sqlite3_changeset_iter **pp, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +); +int sqlite3session_changeset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +int sqlite3session_patchset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +int sqlite3changegroup_add_strm(sqlite3_changegroup*, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +); +int sqlite3changegroup_output_strm(sqlite3_changegroup*, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); + + +/* +** Make sure we can call this stuff from C++. +*/ +#ifdef __cplusplus +} +#endif + +#endif /* !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) */ + +/******** End of sqlite3session.h *********/ +/******** Begin file fts5.h *********/ /* ** 2014 May 31 ** @@ -8297,11 +9904,13 @@ struct Fts5PhraseIter { ** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid ** ** with $p set to a phrase equivalent to the phrase iPhrase of the -** current query is executed. For each row visited, the callback function -** passed as the fourth argument is invoked. The context and API objects -** passed to the callback function may be used to access the properties of -** each matched row. Invoking Api.xUserData() returns a copy of the pointer -** passed as the third argument to pUserData. +** current query is executed. Any column filter that applies to +** phrase iPhrase of the current query is included in $p. For each +** row visited, the callback function passed as the fourth argument +** is invoked. The context and API objects passed to the callback +** function may be used to access the properties of each matched row. +** Invoking Api.xUserData() returns a copy of the pointer passed as +** the third argument to pUserData. ** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. @@ -8470,7 +10079,7 @@ struct Fts5ExtensionApi { ** behaviour. The structure methods are expected to function as follows: ** ** xCreate: -** This function is used to allocate and inititalize a tokenizer instance. +** This function is used to allocate and initialize a tokenizer instance. ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) @@ -8730,4 +10339,4 @@ struct fts5_api { #endif /* _FTS5_H */ - +/******** End of fts5.h *********/ diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go index 55c8ad7..e6e0801 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go @@ -7,7 +7,11 @@ package sqlite3 /* +#ifndef USE_LIBSQLITE3 #include +#else +#include +#endif #include */ import "C" diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h index f02f2c2..ce87e74 100644 --- a/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h +++ b/vendor/github.com/mattn/go-sqlite3/sqlite3ext.h @@ -12,14 +12,12 @@ ** This header file defines the SQLite interface for use by ** shared libraries that want to be imported as extensions into ** an SQLite instance. Shared libraries that intend to be loaded -** as extensions by SQLite should #include this file instead of +** as extensions by SQLite should #include this file instead of ** sqlite3.h. */ -#ifndef _SQLITE3EXT_H_ -#define _SQLITE3EXT_H_ -#include "sqlite3-binding.h" - -typedef struct sqlite3_api_routines sqlite3_api_routines; +#ifndef SQLITE3EXT_H +#define SQLITE3EXT_H +#include "sqlite3.h" /* ** The following structure holds pointers to all of the SQLite API @@ -281,8 +279,21 @@ struct sqlite3_api_routines { int (*db_cacheflush)(sqlite3*); /* Version 3.12.0 and later */ int (*system_errno)(sqlite3*); + /* Version 3.14.0 and later */ + int (*trace_v2)(sqlite3*,unsigned,int(*)(unsigned,void*,void*,void*),void*); + char *(*expanded_sql)(sqlite3_stmt*); }; +/* +** This is the function signature used for all extension entry points. It +** is also defined in the file "loadext.c". +*/ +typedef int (*sqlite3_loadext_entry)( + sqlite3 *db, /* Handle to the database. */ + char **pzErrMsg, /* Used to set error string on failure. */ + const sqlite3_api_routines *pThunk /* Extension API function pointers. */ +); + /* ** The following macros redefine the API routines so that they are ** redirected through the global sqlite3_api structure. @@ -526,21 +537,24 @@ struct sqlite3_api_routines { #define sqlite3_db_cacheflush sqlite3_api->db_cacheflush /* Version 3.12.0 and later */ #define sqlite3_system_errno sqlite3_api->system_errno +/* Version 3.14.0 and later */ +#define sqlite3_trace_v2 sqlite3_api->trace_v2 +#define sqlite3_expanded_sql sqlite3_api->expanded_sql #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) - /* This case when the file really is being compiled as a loadable + /* This case when the file really is being compiled as a loadable ** extension */ # define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0; # define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v; # define SQLITE_EXTENSION_INIT3 \ extern const sqlite3_api_routines *sqlite3_api; #else - /* This case when the file is being statically linked into the + /* This case when the file is being statically linked into the ** application */ # define SQLITE_EXTENSION_INIT1 /*no-op*/ # define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */ # define SQLITE_EXTENSION_INIT3 /*no-op*/ #endif -#endif /* _SQLITE3EXT_H_ */ +#endif /* SQLITE3EXT_H */ diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 4490521..a554e79 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -8,6 +8,7 @@ package mapstructure import ( + "encoding/json" "errors" "fmt" "reflect" @@ -306,6 +307,7 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -327,6 +329,14 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er } else { return fmt.Errorf("cannot parse '%s' as int: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s'", @@ -413,6 +423,7 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -434,6 +445,14 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) } else { return fmt.Errorf("cannot parse '%s' as float: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s'", diff --git a/vendor/github.com/pelletier/go-buffruneio/README.md b/vendor/github.com/pelletier/go-buffruneio/README.md new file mode 100644 index 0000000..ff608b3 --- /dev/null +++ b/vendor/github.com/pelletier/go-buffruneio/README.md @@ -0,0 +1,62 @@ +# buffruneio + +[![Tests Status](https://travis-ci.org/pelletier/go-buffruneio.svg?branch=master)](https://travis-ci.org/pelletier/go-buffruneio) +[![GoDoc](https://godoc.org/github.com/pelletier/go-buffruneio?status.svg)](https://godoc.org/github.com/pelletier/go-buffruneio) + +Buffruneio is a wrapper around bufio to provide buffered runes access with +unlimited unreads. + +```go +import "github.com/pelletier/go-buffruneio" +``` + +## Examples + +```go +import ( + "fmt" + "github.com/pelletier/go-buffruneio" + "strings" +) + +reader := buffruneio.NewReader(strings.NewReader("abcd")) +fmt.Println(reader.ReadRune()) // 'a' +fmt.Println(reader.ReadRune()) // 'b' +fmt.Println(reader.ReadRune()) // 'c' +reader.UnreadRune() +reader.UnreadRune() +fmt.Println(reader.ReadRune()) // 'b' +fmt.Println(reader.ReadRune()) // 'c' +``` + +## Documentation + +The documentation and additional examples are available at +[godoc.org](http://godoc.org/github.com/pelletier/go-buffruneio). + +## Contribute + +Feel free to report bugs and patches using GitHub's pull requests system on +[pelletier/go-toml](https://github.com/pelletier/go-buffruneio). Any feedback is +much appreciated! + +## LICENSE + +Copyright (c) 2016 Thomas Pelletier + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/pelletier/go-buffruneio/buffruneio.go b/vendor/github.com/pelletier/go-buffruneio/buffruneio.go new file mode 100644 index 0000000..41cab87 --- /dev/null +++ b/vendor/github.com/pelletier/go-buffruneio/buffruneio.go @@ -0,0 +1,110 @@ +// Package buffruneio is a wrapper around bufio to provide buffered runes access with unlimited unreads. +package buffruneio + +import ( + "bufio" + "container/list" + "errors" + "io" +) + +// Rune to indicate end of file. +const ( + EOF = -(iota + 1) +) + +// ErrNoRuneToUnread is returned by UnreadRune() when the read index is already at the beginning of the buffer. +var ErrNoRuneToUnread = errors.New("no rune to unwind") + +// Reader implements runes buffering for an io.Reader object. +type Reader struct { + buffer *list.List + current *list.Element + input *bufio.Reader +} + +// NewReader returns a new Reader. +func NewReader(rd io.Reader) *Reader { + return &Reader{ + buffer: list.New(), + input: bufio.NewReader(rd), + } +} + +func (rd *Reader) feedBuffer() error { + r, _, err := rd.input.ReadRune() + + if err != nil { + if err != io.EOF { + return err + } + r = EOF + } + + rd.buffer.PushBack(r) + if rd.current == nil { + rd.current = rd.buffer.Back() + } + return nil +} + +// ReadRune reads the next rune from buffer, or from the underlying reader if needed. +func (rd *Reader) ReadRune() (rune, error) { + if rd.current == rd.buffer.Back() || rd.current == nil { + err := rd.feedBuffer() + if err != nil { + return EOF, err + } + } + + r := rd.current.Value + rd.current = rd.current.Next() + return r.(rune), nil +} + +// UnreadRune pushes back the previously read rune in the buffer, extending it if needed. +func (rd *Reader) UnreadRune() error { + if rd.current == rd.buffer.Front() { + return ErrNoRuneToUnread + } + if rd.current == nil { + rd.current = rd.buffer.Back() + } else { + rd.current = rd.current.Prev() + } + return nil +} + +// Forget removes runes stored before the current stream position index. +func (rd *Reader) Forget() { + if rd.current == nil { + rd.current = rd.buffer.Back() + } + for ; rd.current != rd.buffer.Front(); rd.buffer.Remove(rd.current.Prev()) { + } +} + +// Peek returns at most the next n runes, reading from the uderlying source if +// needed. Does not move the current index. It includes EOF if reached. +func (rd *Reader) Peek(n int) []rune { + res := make([]rune, 0, n) + cursor := rd.current + for i := 0; i < n; i++ { + if cursor == nil { + err := rd.feedBuffer() + if err != nil { + return res + } + cursor = rd.buffer.Back() + } + if cursor != nil { + r := cursor.Value.(rune) + res = append(res, r) + if r == EOF { + return res + } + cursor = cursor.Next() + } + } + return res +} diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE new file mode 100644 index 0000000..5f9f53d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2013 - 2016 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md new file mode 100644 index 0000000..b511f39 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -0,0 +1,120 @@ +# go-toml + +Go library for the [TOML](https://github.com/mojombo/toml) format. + +This library supports TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) +[![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/goadesign/goa/blob/master/LICENSE) +[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml) +[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) + +## Features + +Go-toml provides the following features for using data parsed from TOML documents: + +* Load TOML documents from files and string data +* Easily navigate TOML structure using TomlTree +* Line & column position data for all parsed elements +* Query support similar to JSON-Path +* Syntax errors contain line and column numbers + +Go-toml is designed to help cover use-cases not covered by reflection-based TOML parsing: + +* Semantic evaluation of parsed TOML +* Informing a user of mistakes in the source document, after it has been parsed +* Programatic handling of default values on a case-by-case basis +* Using a TOML document as a flexible data-store + +## Import + + import "github.com/pelletier/go-toml" + +## Usage + +### Example + +Say you have a TOML file that looks like this: + +```toml +[postgres] +user = "pelletier" +password = "mypassword" +``` + +Read the username and password like this: + +```go +import ( + "fmt" + "github.com/pelletier/go-toml" +) + +config, err := toml.LoadFile("config.toml") +if err != nil { + fmt.Println("Error ", err.Error()) +} else { + // retrieve data directly + user := config.Get("postgres.user").(string) + password := config.Get("postgres.password").(string) + + // or using an intermediate object + configTree := config.Get("postgres").(*toml.TomlTree) + user = configTree.Get("user").(string) + password = configTree.Get("password").(string) + fmt.Println("User is ", user, ". Password is ", password) + + // show where elements are in the file + fmt.Println("User position: %v", configTree.GetPosition("user")) + fmt.Println("Password position: %v", configTree.GetPosition("password")) + + // use a query to gather elements without walking the tree + results, _ := config.Query("$..[user,password]") + for ii, item := range results.Values() { + fmt.Println("Query result %d: %v", ii, item) + } +} +``` + +## Documentation + +The documentation and additional examples are available at +[godoc.org](http://godoc.org/github.com/pelletier/go-toml). + +## Tools + +Go-toml provides two handy command line tools: + +* `tomll`: Reads TOML files and lint them. + + ``` + go install github.com/pelletier/go-toml/cmd/tomll + tomll --help + ``` +* `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + go install github.com/pelletier/go-toml/cmd/tomjson + tomljson --help + ``` + +## Contribute + +Feel free to report bugs and patches using GitHub's pull requests system on +[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be +much appreciated! + +### Run tests + +You have to make sure two kind of tests run: + +1. The Go unit tests +2. The TOML examples base + +You can run both of them using `./test.sh`. + +## License + +The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/clean.sh b/vendor/github.com/pelletier/go-toml/clean.sh new file mode 100755 index 0000000..44d49d9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/clean.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# fail out of the script if anything here fails +set -e + +# clear out stuff generated by test.sh +rm -rf src test_program_bin toml-test diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go new file mode 100644 index 0000000..c8c9add --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -0,0 +1,250 @@ +// Package toml is a TOML markup language parser. +// +// This version supports the specification as described in +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md +// +// TOML Parsing +// +// TOML data may be parsed in two ways: by file, or by string. +// +// // load TOML data by filename +// tree, err := toml.LoadFile("filename.toml") +// +// // load TOML data stored in a string +// tree, err := toml.Load(stringContainingTomlData) +// +// Either way, the result is a TomlTree object that can be used to navigate the +// structure and data within the original document. +// +// +// Getting data from the TomlTree +// +// After parsing TOML data with Load() or LoadFile(), use the Has() and Get() +// methods on the returned TomlTree, to find your way through the document data. +// +// if tree.Has('foo') { +// fmt.Prinln("foo is: %v", tree.Get('foo')) +// } +// +// Working with Paths +// +// Go-toml has support for basic dot-separated key paths on the Has(), Get(), Set() +// and GetDefault() methods. These are the same kind of key paths used within the +// TOML specification for struct tames. +// +// // looks for a key named 'baz', within struct 'bar', within struct 'foo' +// tree.Has("foo.bar.baz") +// +// // returns the key at this path, if it is there +// tree.Get("foo.bar.baz") +// +// TOML allows keys to contain '.', which can cause this syntax to be problematic +// for some documents. In such cases, use the GetPath(), HasPath(), and SetPath(), +// methods to explicitly define the path. This form is also faster, since +// it avoids having to parse the passed key for '.' delimiters. +// +// // looks for a key named 'baz', within struct 'bar', within struct 'foo' +// tree.HasPath(string{}{"foo","bar","baz"}) +// +// // returns the key at this path, if it is there +// tree.GetPath(string{}{"foo","bar","baz"}) +// +// Note that this is distinct from the heavyweight query syntax supported by +// TomlTree.Query() and the Query() struct (see below). +// +// Position Support +// +// Each element within the TomlTree is stored with position metadata, which is +// invaluable for providing semantic feedback to a user. This helps in +// situations where the TOML file parses correctly, but contains data that is +// not correct for the application. In such cases, an error message can be +// generated that indicates the problem line and column number in the source +// TOML document. +// +// // load TOML data +// tree, _ := toml.Load("filename.toml") +// +// // get an entry and report an error if it's the wrong type +// element := tree.Get("foo") +// if value, ok := element.(int64); !ok { +// return fmt.Errorf("%v: Element 'foo' must be an integer", tree.GetPosition("foo")) +// } +// +// // report an error if an expected element is missing +// if !tree.Has("bar") { +// return fmt.Errorf("%v: Expected 'bar' element", tree.GetPosition("")) +// } +// +// Query Support +// +// The TOML query path implementation is based loosely on the JSONPath specification: +// http://goessner.net/articles/JsonPath/ +// +// The idea behind a query path is to allow quick access to any element, or set +// of elements within TOML document, with a single expression. +// +// result, err := tree.Query("$.foo.bar.baz") +// +// This is roughly equivalent to: +// +// next := tree.Get("foo") +// if next != nil { +// next = next.Get("bar") +// if next != nil { +// next = next.Get("baz") +// } +// } +// result := next +// +// err is nil if any parsing exception occurs. +// +// If no node in the tree matches the query, result will simply contain an empty list of +// items. +// +// As illustrated above, the query path is much more efficient, especially since +// the structure of the TOML file can vary. Rather than making assumptions about +// a document's structure, a query allows the programmer to make structured +// requests into the document, and get zero or more values as a result. +// +// The syntax of a query begins with a root token, followed by any number +// sub-expressions: +// +// $ +// Root of the TOML tree. This must always come first. +// .name +// Selects child of this node, where 'name' is a TOML key +// name. +// ['name'] +// Selects child of this node, where 'name' is a string +// containing a TOML key name. +// [index] +// Selcts child array element at 'index'. +// ..expr +// Recursively selects all children, filtered by an a union, +// index, or slice expression. +// ..* +// Recursive selection of all nodes at this point in the +// tree. +// .* +// Selects all children of the current node. +// [expr,expr] +// Union operator - a logical 'or' grouping of two or more +// sub-expressions: index, key name, or filter. +// [start:end:step] +// Slice operator - selects array elements from start to +// end-1, at the given step. All three arguments are +// optional. +// [?(filter)] +// Named filter expression - the function 'filter' is +// used to filter children at this node. +// +// Query Indexes And Slices +// +// Index expressions perform no bounds checking, and will contribute no +// values to the result set if the provided index or index range is invalid. +// Negative indexes represent values from the end of the array, counting backwards. +// +// // select the last index of the array named 'foo' +// tree.Query("$.foo[-1]") +// +// Slice expressions are supported, by using ':' to separate a start/end index pair. +// +// // select up to the first five elements in the array +// tree.Query("$.foo[0:5]") +// +// Slice expressions also allow negative indexes for the start and stop +// arguments. +// +// // select all array elements. +// tree.Query("$.foo[0:-1]") +// +// Slice expressions may have an optional stride/step parameter: +// +// // select every other element +// tree.Query("$.foo[0:-1:2]") +// +// Slice start and end parameters are also optional: +// +// // these are all equivalent and select all the values in the array +// tree.Query("$.foo[:]") +// tree.Query("$.foo[0:]") +// tree.Query("$.foo[:-1]") +// tree.Query("$.foo[0:-1:]") +// tree.Query("$.foo[::1]") +// tree.Query("$.foo[0::1]") +// tree.Query("$.foo[:-1:1]") +// tree.Query("$.foo[0:-1:1]") +// +// Query Filters +// +// Query filters are used within a Union [,] or single Filter [] expression. +// A filter only allows nodes that qualify through to the next expression, +// and/or into the result set. +// +// // returns children of foo that are permitted by the 'bar' filter. +// tree.Query("$.foo[?(bar)]") +// +// There are several filters provided with the library: +// +// tree +// Allows nodes of type TomlTree. +// int +// Allows nodes of type int64. +// float +// Allows nodes of type float64. +// string +// Allows nodes of type string. +// time +// Allows nodes of type time.Time. +// bool +// Allows nodes of type bool. +// +// Query Results +// +// An executed query returns a QueryResult object. This contains the nodes +// in the TOML tree that qualify the query expression. Position information +// is also available for each value in the set. +// +// // display the results of a query +// results := tree.Query("$.foo.bar.baz") +// for idx, value := results.Values() { +// fmt.Println("%v: %v", results.Positions()[idx], value) +// } +// +// Compiled Queries +// +// Queries may be executed directly on a TomlTree object, or compiled ahead +// of time and executed discretely. The former is more convienent, but has the +// penalty of having to recompile the query expression each time. +// +// // basic query +// results := tree.Query("$.foo.bar.baz") +// +// // compiled query +// query := toml.CompileQuery("$.foo.bar.baz") +// results := query.Execute(tree) +// +// // run the compiled query again on a different tree +// moreResults := query.Execute(anotherTree) +// +// User Defined Query Filters +// +// Filter expressions may also be user defined by using the SetFilter() +// function on the Query object. The function must return true/false, which +// signifies if the passed node is kept or discarded, respectively. +// +// // create a query that references a user-defined filter +// query, _ := CompileQuery("$[?(bazOnly)]") +// +// // define the filter, and assign it to the query +// query.SetFilter("bazOnly", func(node interface{}) bool{ +// if tree, ok := node.(*TomlTree); ok { +// return tree.Has("baz") +// } +// return false // reject all other node types +// }) +// +// // run the query +// query.Execute(tree) +// +package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml new file mode 100644 index 0000000..12950a1 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml @@ -0,0 +1,29 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml new file mode 100644 index 0000000..3d902f2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/example.toml @@ -0,0 +1,29 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go new file mode 100644 index 0000000..4deed81 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -0,0 +1,81 @@ +// Parsing keys handling both bare and quoted keys. + +package toml + +import ( + "bytes" + "fmt" + "unicode" +) + +func parseKey(key string) ([]string, error) { + groups := []string{} + var buffer bytes.Buffer + inQuotes := false + escapeNext := false + ignoreSpace := true + expectDot := false + + for _, char := range key { + if ignoreSpace { + if char == ' ' { + continue + } + ignoreSpace = false + } + if escapeNext { + buffer.WriteRune(char) + escapeNext = false + continue + } + switch char { + case '\\': + escapeNext = true + continue + case '"': + inQuotes = !inQuotes + expectDot = false + case '.': + if inQuotes { + buffer.WriteRune(char) + } else { + groups = append(groups, buffer.String()) + buffer.Reset() + ignoreSpace = true + expectDot = false + } + case ' ': + if inQuotes { + buffer.WriteRune(char) + } else { + expectDot = true + } + default: + if !inQuotes && !isValidBareChar(char) { + return nil, fmt.Errorf("invalid bare character: %c", char) + } + if !inQuotes && expectDot { + return nil, fmt.Errorf("what?") + } + buffer.WriteRune(char) + expectDot = false + } + } + if inQuotes { + return nil, fmt.Errorf("mismatched quotes") + } + if escapeNext { + return nil, fmt.Errorf("unfinished escape sequence") + } + if buffer.Len() > 0 { + groups = append(groups, buffer.String()) + } + if len(groups) == 0 { + return nil, fmt.Errorf("empty key") + } + return groups, nil +} + +func isValidBareChar(r rune) bool { + return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r) +} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go new file mode 100644 index 0000000..eb4d999 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -0,0 +1,655 @@ +// TOML lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "errors" + "fmt" + "io" + "regexp" + "strconv" + "strings" + + "github.com/pelletier/go-buffruneio" +) + +var dateRegexp *regexp.Regexp + +// Define state functions +type tomlLexStateFn func() tomlLexStateFn + +// Define lexer +type tomlLexer struct { + input *buffruneio.Reader // Textual source + buffer []rune // Runes composing the current token + tokens chan token + depth int + line int + col int + endbufferLine int + endbufferCol int +} + +// Basic read operations on input + +func (l *tomlLexer) read() rune { + r, err := l.input.ReadRune() + if err != nil { + panic(err) + } + if r == '\n' { + l.endbufferLine++ + l.endbufferCol = 1 + } else { + l.endbufferCol++ + } + return r +} + +func (l *tomlLexer) next() rune { + r := l.read() + + if r != eof { + l.buffer = append(l.buffer, r) + } + return r +} + +func (l *tomlLexer) ignore() { + l.buffer = make([]rune, 0) + l.line = l.endbufferLine + l.col = l.endbufferCol +} + +func (l *tomlLexer) skip() { + l.next() + l.ignore() +} + +func (l *tomlLexer) fastForward(n int) { + for i := 0; i < n; i++ { + l.next() + } +} + +func (l *tomlLexer) emitWithValue(t tokenType, value string) { + l.tokens <- token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + } + l.ignore() +} + +func (l *tomlLexer) emit(t tokenType) { + l.emitWithValue(t, string(l.buffer)) +} + +func (l *tomlLexer) peek() rune { + r, err := l.input.ReadRune() + if err != nil { + panic(err) + } + l.input.UnreadRune() + return r +} + +func (l *tomlLexer) follow(next string) bool { + for _, expectedRune := range next { + r, err := l.input.ReadRune() + defer l.input.UnreadRune() + if err != nil { + panic(err) + } + if expectedRune != r { + return false + } + } + return true +} + +// Error management + +func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { + l.tokens <- token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + } + return nil +} + +// State functions + +func (l *tomlLexer) lexVoid() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '[': + return l.lexKeyGroup + case '#': + return l.lexComment + case '=': + return l.lexEqual + case '\r': + fallthrough + case '\n': + l.skip() + continue + } + + if isSpace(next) { + l.skip() + } + + if l.depth > 0 { + return l.lexRvalue + } + + if isKeyStartChar(next) { + return l.lexKey + } + + if next == eof { + l.next() + break + } + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexRvalue() tomlLexStateFn { + for { + next := l.peek() + switch next { + case '.': + return l.errorf("cannot start float with a dot") + case '=': + return l.lexEqual + case '[': + l.depth++ + return l.lexLeftBracket + case ']': + l.depth-- + return l.lexRightBracket + case '{': + return l.lexLeftCurlyBrace + case '}': + return l.lexRightCurlyBrace + case '#': + return l.lexComment + case '"': + return l.lexString + case '\'': + return l.lexLiteralString + case ',': + return l.lexComma + case '\r': + fallthrough + case '\n': + l.skip() + if l.depth == 0 { + return l.lexVoid + } + return l.lexRvalue + case '_': + return l.errorf("cannot start number with underscore") + } + + if l.follow("true") { + return l.lexTrue + } + + if l.follow("false") { + return l.lexFalse + } + + if isSpace(next) { + l.skip() + continue + } + + if next == eof { + l.next() + break + } + + possibleDate := string(l.input.Peek(35)) + dateMatch := dateRegexp.FindString(possibleDate) + if dateMatch != "" { + l.fastForward(len(dateMatch)) + return l.lexDate + } + + if next == '+' || next == '-' || isDigit(next) { + return l.lexNumber + } + + if isAlphanumeric(next) { + return l.lexKey + } + + return l.errorf("no value can start with %c", next) + } + + l.emit(tokenEOF) + return nil +} + +func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenLeftCurlyBrace) + return l.lexRvalue +} + +func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { + l.next() + l.emit(tokenRightCurlyBrace) + return l.lexRvalue +} + +func (l *tomlLexer) lexDate() tomlLexStateFn { + l.emit(tokenDate) + return l.lexRvalue +} + +func (l *tomlLexer) lexTrue() tomlLexStateFn { + l.fastForward(4) + l.emit(tokenTrue) + return l.lexRvalue +} + +func (l *tomlLexer) lexFalse() tomlLexStateFn { + l.fastForward(5) + l.emit(tokenFalse) + return l.lexRvalue +} + +func (l *tomlLexer) lexEqual() tomlLexStateFn { + l.next() + l.emit(tokenEqual) + return l.lexRvalue +} + +func (l *tomlLexer) lexComma() tomlLexStateFn { + l.next() + l.emit(tokenComma) + return l.lexRvalue +} + +func (l *tomlLexer) lexKey() tomlLexStateFn { + growingString := "" + + for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { + if r == '"' { + l.next() + str, err := l.lexStringAsString(`"`, false, true) + if err != nil { + return l.errorf(err.Error()) + } + growingString += `"` + str + `"` + l.next() + continue + } else if r == '\n' { + return l.errorf("keys cannot contain new lines") + } else if isSpace(r) { + break + } else if !isValidBareChar(r) { + return l.errorf("keys cannot contain %c character", r) + } + growingString += string(r) + l.next() + } + l.emitWithValue(tokenKey, growingString) + return l.lexVoid +} + +func (l *tomlLexer) lexComment() tomlLexStateFn { + for next := l.peek(); next != '\n' && next != eof; next = l.peek() { + if next == '\r' && l.follow("\r\n") { + break + } + l.next() + } + l.ignore() + return l.lexVoid +} + +func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { + l.next() + l.emit(tokenLeftBracket) + return l.lexRvalue +} + +func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + // find end of string + for { + if l.follow(terminator) { + return growingString, nil + } + + next := l.peek() + if next == eof { + break + } + growingString += string(l.next()) + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexLiteralString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := "'" + discardLeadingNewLine := false + if l.follow("''") { + l.skip() + l.skip() + terminator = "'''" + discardLeadingNewLine = true + } + + str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +// Lex a string and return the results as a string. +// Terminator is the substring indicating the end of the token. +// The resulting string does not include the terminator. +func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { + growingString := "" + + if discardLeadingNewLine { + if l.follow("\r\n") { + l.skip() + l.skip() + } else if l.peek() == '\n' { + l.skip() + } + } + + for { + if l.follow(terminator) { + return growingString, nil + } + + if l.follow("\\") { + l.next() + switch l.peek() { + case '\r': + fallthrough + case '\n': + fallthrough + case '\t': + fallthrough + case ' ': + // skip all whitespace chars following backslash + for strings.ContainsRune("\r\n\t ", l.peek()) { + l.next() + } + case '"': + growingString += "\"" + l.next() + case 'n': + growingString += "\n" + l.next() + case 'b': + growingString += "\b" + l.next() + case 'f': + growingString += "\f" + l.next() + case '/': + growingString += "/" + l.next() + case 't': + growingString += "\t" + l.next() + case 'r': + growingString += "\r" + l.next() + case '\\': + growingString += "\\" + l.next() + case 'u': + l.next() + code := "" + for i := 0; i < 4; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return "", errors.New("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + case 'U': + l.next() + code := "" + for i := 0; i < 8; i++ { + c := l.peek() + if !isHexDigit(c) { + return "", errors.New("unfinished unicode escape") + } + l.next() + code = code + string(c) + } + intcode, err := strconv.ParseInt(code, 16, 64) + if err != nil { + return "", errors.New("invalid unicode escape: \\U" + code) + } + growingString += string(rune(intcode)) + default: + return "", errors.New("invalid escape sequence: \\" + string(l.peek())) + } + } else { + r := l.peek() + + if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) { + return "", fmt.Errorf("unescaped control character %U", r) + } + l.next() + growingString += string(r) + } + + if l.peek() == eof { + break + } + } + + return "", errors.New("unclosed string") +} + +func (l *tomlLexer) lexString() tomlLexStateFn { + l.skip() + + // handle special case for triple-quote + terminator := `"` + discardLeadingNewLine := false + acceptNewLines := false + if l.follow(`""`) { + l.skip() + l.skip() + terminator = `"""` + discardLeadingNewLine = true + acceptNewLines = true + } + + str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) + + if err != nil { + return l.errorf(err.Error()) + } + + l.emitWithValue(tokenString, str) + l.fastForward(len(terminator)) + l.ignore() + return l.lexRvalue +} + +func (l *tomlLexer) lexKeyGroup() tomlLexStateFn { + l.next() + + if l.peek() == '[' { + // token '[[' signifies an array of anonymous key groups + l.next() + l.emit(tokenDoubleLeftBracket) + return l.lexInsideKeyGroupArray + } + // vanilla key group + l.emit(tokenLeftBracket) + return l.lexInsideKeyGroup +} + +func (l *tomlLexer) lexInsideKeyGroupArray() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if len(l.buffer) > 0 { + l.emit(tokenKeyGroupArray) + } + l.next() + if l.peek() != ']' { + break + } + l.next() + l.emit(tokenDoubleRightBracket) + return l.lexVoid + case '[': + return l.errorf("group name cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed key group array") +} + +func (l *tomlLexer) lexInsideKeyGroup() tomlLexStateFn { + for r := l.peek(); r != eof; r = l.peek() { + switch r { + case ']': + if len(l.buffer) > 0 { + l.emit(tokenKeyGroup) + } + l.next() + l.emit(tokenRightBracket) + return l.lexVoid + case '[': + return l.errorf("group name cannot contain ']'") + default: + l.next() + } + } + return l.errorf("unclosed key group") +} + +func (l *tomlLexer) lexRightBracket() tomlLexStateFn { + l.next() + l.emit(tokenRightBracket) + return l.lexRvalue +} + +func (l *tomlLexer) lexNumber() tomlLexStateFn { + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + pointSeen := false + expSeen := false + digitSeen := false + for { + next := l.peek() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + l.next() + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if next == 'e' || next == 'E' { + expSeen = true + l.next() + r := l.peek() + if r == '+' || r == '-' { + l.next() + } + } else if isDigit(next) { + digitSeen = true + l.next() + } else if next == '_' { + l.next() + } else { + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen || expSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexRvalue +} + +func (l *tomlLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } + close(l.tokens) +} + +func init() { + dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) +} + +// Entry point +func lexToml(input io.Reader) chan token { + bufferedInput := buffruneio.NewReader(input) + l := &tomlLexer{ + input: bufferedInput, + tokens: make(chan token), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + go l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/match.go b/vendor/github.com/pelletier/go-toml/match.go new file mode 100644 index 0000000..48b0f2a --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/match.go @@ -0,0 +1,234 @@ +package toml + +import ( + "fmt" +) + +// support function to set positions for tomlValues +// NOTE: this is done to allow ctx.lastPosition to indicate the start of any +// values returned by the query engines +func tomlValueCheck(node interface{}, ctx *queryContext) interface{} { + switch castNode := node.(type) { + case *tomlValue: + ctx.lastPosition = castNode.position + return castNode.value + case []*TomlTree: + if len(castNode) > 0 { + ctx.lastPosition = castNode[0].position + } + return node + default: + return node + } +} + +// base match +type matchBase struct { + next pathFn +} + +func (f *matchBase) setNext(next pathFn) { + f.next = next +} + +// terminating functor - gathers results +type terminatingFn struct { + // empty +} + +func newTerminatingFn() *terminatingFn { + return &terminatingFn{} +} + +func (f *terminatingFn) setNext(next pathFn) { + // do nothing +} + +func (f *terminatingFn) call(node interface{}, ctx *queryContext) { + switch castNode := node.(type) { + case *TomlTree: + ctx.result.appendResult(node, castNode.position) + case *tomlValue: + ctx.result.appendResult(node, castNode.position) + default: + // use last position for scalars + ctx.result.appendResult(node, ctx.lastPosition) + } +} + +// match single key +type matchKeyFn struct { + matchBase + Name string +} + +func newMatchKeyFn(name string) *matchKeyFn { + return &matchKeyFn{Name: name} +} + +func (f *matchKeyFn) call(node interface{}, ctx *queryContext) { + if array, ok := node.([]*TomlTree); ok { + for _, tree := range array { + item := tree.values[f.Name] + if item != nil { + f.next.call(item, ctx) + } + } + } else if tree, ok := node.(*TomlTree); ok { + item := tree.values[f.Name] + if item != nil { + f.next.call(item, ctx) + } + } +} + +// match single index +type matchIndexFn struct { + matchBase + Idx int +} + +func newMatchIndexFn(idx int) *matchIndexFn { + return &matchIndexFn{Idx: idx} +} + +func (f *matchIndexFn) call(node interface{}, ctx *queryContext) { + if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok { + if f.Idx < len(arr) && f.Idx >= 0 { + f.next.call(arr[f.Idx], ctx) + } + } +} + +// filter by slicing +type matchSliceFn struct { + matchBase + Start, End, Step int +} + +func newMatchSliceFn(start, end, step int) *matchSliceFn { + return &matchSliceFn{Start: start, End: end, Step: step} +} + +func (f *matchSliceFn) call(node interface{}, ctx *queryContext) { + if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok { + // adjust indexes for negative values, reverse ordering + realStart, realEnd := f.Start, f.End + if realStart < 0 { + realStart = len(arr) + realStart + } + if realEnd < 0 { + realEnd = len(arr) + realEnd + } + if realEnd < realStart { + realEnd, realStart = realStart, realEnd // swap + } + // loop and gather + for idx := realStart; idx < realEnd; idx += f.Step { + f.next.call(arr[idx], ctx) + } + } +} + +// match anything +type matchAnyFn struct { + matchBase +} + +func newMatchAnyFn() *matchAnyFn { + return &matchAnyFn{} +} + +func (f *matchAnyFn) call(node interface{}, ctx *queryContext) { + if tree, ok := node.(*TomlTree); ok { + for _, v := range tree.values { + f.next.call(v, ctx) + } + } +} + +// filter through union +type matchUnionFn struct { + Union []pathFn +} + +func (f *matchUnionFn) setNext(next pathFn) { + for _, fn := range f.Union { + fn.setNext(next) + } +} + +func (f *matchUnionFn) call(node interface{}, ctx *queryContext) { + for _, fn := range f.Union { + fn.call(node, ctx) + } +} + +// match every single last node in the tree +type matchRecursiveFn struct { + matchBase +} + +func newMatchRecursiveFn() *matchRecursiveFn { + return &matchRecursiveFn{} +} + +func (f *matchRecursiveFn) call(node interface{}, ctx *queryContext) { + if tree, ok := node.(*TomlTree); ok { + var visit func(tree *TomlTree) + visit = func(tree *TomlTree) { + for _, v := range tree.values { + f.next.call(v, ctx) + switch node := v.(type) { + case *TomlTree: + visit(node) + case []*TomlTree: + for _, subtree := range node { + visit(subtree) + } + } + } + } + f.next.call(tree, ctx) + visit(tree) + } +} + +// match based on an externally provided functional filter +type matchFilterFn struct { + matchBase + Pos Position + Name string +} + +func newMatchFilterFn(name string, pos Position) *matchFilterFn { + return &matchFilterFn{Name: name, Pos: pos} +} + +func (f *matchFilterFn) call(node interface{}, ctx *queryContext) { + fn, ok := (*ctx.filters)[f.Name] + if !ok { + panic(fmt.Sprintf("%s: query context does not have filter '%s'", + f.Pos.String(), f.Name)) + } + switch castNode := tomlValueCheck(node, ctx).(type) { + case *TomlTree: + for _, v := range castNode.values { + if tv, ok := v.(*tomlValue); ok { + if fn(tv.value) { + f.next.call(v, ctx) + } + } else { + if fn(v) { + f.next.call(v, ctx) + } + } + } + case []interface{}: + for _, v := range castNode { + if fn(v) { + f.next.call(v, ctx) + } + } + } +} diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go new file mode 100644 index 0000000..25932d7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -0,0 +1,392 @@ +// TOML Parser. + +package toml + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +type tomlParser struct { + flow chan token + tree *TomlTree + tokensBuffer []token + currentGroup []string + seenGroupKeys []string +} + +type tomlParserStateFn func() tomlParserStateFn + +// Formats and panics an error message based on a token +func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *tomlParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *tomlParser) peek() *token { + if len(p.tokensBuffer) != 0 { + return &(p.tokensBuffer[0]) + } + + tok, ok := <-p.flow + if !ok { + return nil + } + p.tokensBuffer = append(p.tokensBuffer, tok) + return &tok +} + +func (p *tomlParser) assume(typ tokenType) { + tok := p.getToken() + if tok == nil { + p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) + } + if tok.typ != typ { + p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) + } +} + +func (p *tomlParser) getToken() *token { + if len(p.tokensBuffer) != 0 { + tok := p.tokensBuffer[0] + p.tokensBuffer = p.tokensBuffer[1:] + return &tok + } + tok, ok := <-p.flow + if !ok { + return nil + } + return &tok +} + +func (p *tomlParser) parseStart() tomlParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenDoubleLeftBracket: + return p.parseGroupArray + case tokenLeftBracket: + return p.parseGroup + case tokenKey: + return p.parseAssign + case tokenEOF: + return nil + default: + p.raiseError(tok, "unexpected token") + } + return nil +} + +func (p *tomlParser) parseGroupArray() tomlParserStateFn { + startToken := p.getToken() // discard the [[ + key := p.getToken() + if key.typ != tokenKeyGroupArray { + p.raiseError(key, "unexpected token %s, was expecting a key group array", key) + } + + // get or create group array element at the indicated part in the path + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid group array key: %s", err) + } + p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries + destTree := p.tree.GetPath(keys) + var array []*TomlTree + if destTree == nil { + array = make([]*TomlTree, 0) + } else if target, ok := destTree.([]*TomlTree); ok && target != nil { + array = destTree.([]*TomlTree) + } else { + p.raiseError(key, "key %s is already assigned and not of type group array", key) + } + p.currentGroup = keys + + // add a new tree to the end of the group array + newTree := newTomlTree() + newTree.position = startToken.Position + array = append(array, newTree) + p.tree.SetPath(p.currentGroup, array) + + // remove all keys that were children of this group array + prefix := key.val + "." + found := false + for ii := 0; ii < len(p.seenGroupKeys); { + groupKey := p.seenGroupKeys[ii] + if strings.HasPrefix(groupKey, prefix) { + p.seenGroupKeys = append(p.seenGroupKeys[:ii], p.seenGroupKeys[ii+1:]...) + } else { + found = (groupKey == key.val) + ii++ + } + } + + // keep this key name from use by other kinds of assignments + if !found { + p.seenGroupKeys = append(p.seenGroupKeys, key.val) + } + + // move to next parser state + p.assume(tokenDoubleRightBracket) + return p.parseStart +} + +func (p *tomlParser) parseGroup() tomlParserStateFn { + startToken := p.getToken() // discard the [ + key := p.getToken() + if key.typ != tokenKeyGroup { + p.raiseError(key, "unexpected token %s, was expecting a key group", key) + } + for _, item := range p.seenGroupKeys { + if item == key.val { + p.raiseError(key, "duplicated tables") + } + } + + p.seenGroupKeys = append(p.seenGroupKeys, key.val) + keys, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "invalid group array key: %s", err) + } + if err := p.tree.createSubTree(keys, startToken.Position); err != nil { + p.raiseError(key, "%s", err) + } + p.assume(tokenRightBracket) + p.currentGroup = keys + return p.parseStart +} + +func (p *tomlParser) parseAssign() tomlParserStateFn { + key := p.getToken() + p.assume(tokenEqual) + + value := p.parseRvalue() + var groupKey []string + if len(p.currentGroup) > 0 { + groupKey = p.currentGroup + } else { + groupKey = []string{} + } + + // find the group to assign, looking out for arrays of groups + var targetNode *TomlTree + switch node := p.tree.GetPath(groupKey).(type) { + case []*TomlTree: + targetNode = node[len(node)-1] + case *TomlTree: + targetNode = node + default: + p.raiseError(key, "Unknown group type for path: %s", + strings.Join(groupKey, ".")) + } + + // assign value to the found group + keyVals, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "%s", err) + } + if len(keyVals) != 1 { + p.raiseError(key, "Invalid key") + } + keyVal := keyVals[0] + localKey := []string{keyVal} + finalKey := append(groupKey, keyVal) + if targetNode.GetPath(localKey) != nil { + p.raiseError(key, "The following key was defined twice: %s", + strings.Join(finalKey, ".")) + } + var toInsert interface{} + + switch value.(type) { + case *TomlTree: + toInsert = value + default: + toInsert = &tomlValue{value, key.Position} + } + targetNode.values[keyVal] = toInsert + return p.parseStart +} + +var numberUnderscoreInvalidRegexp *regexp.Regexp + +func cleanupNumberToken(value string) (string, error) { + if numberUnderscoreInvalidRegexp.MatchString(value) { + return "", fmt.Errorf("invalid use of _ in number") + } + cleanedVal := strings.Replace(value, "_", "", -1) + return cleanedVal, nil +} + +func (p *tomlParser) parseRvalue() interface{} { + tok := p.getToken() + if tok == nil || tok.typ == tokenEOF { + p.raiseError(tok, "expecting a value") + } + + switch tok.typ { + case tokenString: + return tok.val + case tokenTrue: + return true + case tokenFalse: + return false + case tokenInteger: + cleanedVal, err := cleanupNumberToken(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err := strconv.ParseInt(cleanedVal, 10, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenFloat: + cleanedVal, err := cleanupNumberToken(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) + } + val, err := strconv.ParseFloat(cleanedVal, 64) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenDate: + val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) + if err != nil { + p.raiseError(tok, "%s", err) + } + return val + case tokenLeftBracket: + return p.parseArray() + case tokenLeftCurlyBrace: + return p.parseInlineTable() + case tokenEqual: + p.raiseError(tok, "cannot have multiple equals for the same key") + case tokenError: + p.raiseError(tok, "%s", tok) + } + + p.raiseError(tok, "never reached") + + return nil +} + +func tokenIsComma(t *token) bool { + return t != nil && t.typ == tokenComma +} + +func (p *tomlParser) parseInlineTable() *TomlTree { + tree := newTomlTree() + var previous *token +Loop: + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated inline table") + } + switch follow.typ { + case tokenRightCurlyBrace: + p.getToken() + break Loop + case tokenKey: + if !tokenIsComma(previous) && previous != nil { + p.raiseError(follow, "comma expected between fields in inline table") + } + key := p.getToken() + p.assume(tokenEqual) + value := p.parseRvalue() + tree.Set(key.val, value) + case tokenComma: + if previous == nil { + p.raiseError(follow, "inline table cannot start with a comma") + } + if tokenIsComma(previous) { + p.raiseError(follow, "need field between two commas in inline table") + } + p.getToken() + default: + p.raiseError(follow, "unexpected token type in inline table: %s", follow.typ.String()) + } + previous = follow + } + if tokenIsComma(previous) { + p.raiseError(previous, "trailing comma at the end of inline table") + } + return tree +} + +func (p *tomlParser) parseArray() interface{} { + var array []interface{} + arrayType := reflect.TypeOf(nil) + for { + follow := p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ == tokenRightBracket { + p.getToken() + break + } + val := p.parseRvalue() + if arrayType == nil { + arrayType = reflect.TypeOf(val) + } + if reflect.TypeOf(val) != arrayType { + p.raiseError(follow, "mixed types in array") + } + array = append(array, val) + follow = p.peek() + if follow == nil || follow.typ == tokenEOF { + p.raiseError(follow, "unterminated array") + } + if follow.typ != tokenRightBracket && follow.typ != tokenComma { + p.raiseError(follow, "missing comma") + } + if follow.typ == tokenComma { + p.getToken() + } + } + // An array of TomlTrees is actually an array of inline + // tables, which is a shorthand for a table array. If the + // array was not converted from []interface{} to []*TomlTree, + // the two notations would not be equivalent. + if arrayType == reflect.TypeOf(newTomlTree()) { + tomlArray := make([]*TomlTree, len(array)) + for i, v := range array { + tomlArray[i] = v.(*TomlTree) + } + return tomlArray + } + return array +} + +func parseToml(flow chan token) *TomlTree { + result := newTomlTree() + result.position = Position{1, 1} + parser := &tomlParser{ + flow: flow, + tree: result, + tokensBuffer: make([]token, 0), + currentGroup: make([]string, 0), + seenGroupKeys: make([]string, 0), + } + parser.run() + return result +} + +func init() { + numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d]|_$|^_)`) +} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go new file mode 100644 index 0000000..c17bff8 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/position.go @@ -0,0 +1,29 @@ +// Position support for go-toml + +package toml + +import ( + "fmt" +) + +// Position of a document element within a TOML document. +// +// Line and Col are both 1-indexed positions for the element's line number and +// column number, respectively. Values of zero or less will cause Invalid(), +// to return true. +type Position struct { + Line int // line within the document + Col int // column within the line +} + +// String representation of the position. +// Displays 1-indexed line and column numbers. +func (p Position) String() string { + return fmt.Sprintf("(%d, %d)", p.Line, p.Col) +} + +// Invalid returns whether or not the position is valid (i.e. with negative or +// null values) +func (p Position) Invalid() bool { + return p.Line <= 0 || p.Col <= 0 +} diff --git a/vendor/github.com/pelletier/go-toml/query.go b/vendor/github.com/pelletier/go-toml/query.go new file mode 100644 index 0000000..307a1ec --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/query.go @@ -0,0 +1,153 @@ +package toml + +import ( + "time" +) + +// NodeFilterFn represents a user-defined filter function, for use with +// Query.SetFilter(). +// +// The return value of the function must indicate if 'node' is to be included +// at this stage of the TOML path. Returning true will include the node, and +// returning false will exclude it. +// +// NOTE: Care should be taken to write script callbacks such that they are safe +// to use from multiple goroutines. +type NodeFilterFn func(node interface{}) bool + +// QueryResult is the result of Executing a Query. +type QueryResult struct { + items []interface{} + positions []Position +} + +// appends a value/position pair to the result set. +func (r *QueryResult) appendResult(node interface{}, pos Position) { + r.items = append(r.items, node) + r.positions = append(r.positions, pos) +} + +// Values is a set of values within a QueryResult. The order of values is not +// guaranteed to be in document order, and may be different each time a query is +// executed. +func (r QueryResult) Values() []interface{} { + values := make([]interface{}, len(r.items)) + for i, v := range r.items { + o, ok := v.(*tomlValue) + if ok { + values[i] = o.value + } else { + values[i] = v + } + } + return values +} + +// Positions is a set of positions for values within a QueryResult. Each index +// in Positions() corresponds to the entry in Value() of the same index. +func (r QueryResult) Positions() []Position { + return r.positions +} + +// runtime context for executing query paths +type queryContext struct { + result *QueryResult + filters *map[string]NodeFilterFn + lastPosition Position +} + +// generic path functor interface +type pathFn interface { + setNext(next pathFn) + call(node interface{}, ctx *queryContext) +} + +// A Query is the representation of a compiled TOML path. A Query is safe +// for concurrent use by multiple goroutines. +type Query struct { + root pathFn + tail pathFn + filters *map[string]NodeFilterFn +} + +func newQuery() *Query { + return &Query{ + root: nil, + tail: nil, + filters: &defaultFilterFunctions, + } +} + +func (q *Query) appendPath(next pathFn) { + if q.root == nil { + q.root = next + } else { + q.tail.setNext(next) + } + q.tail = next + next.setNext(newTerminatingFn()) // init the next functor +} + +// CompileQuery compiles a TOML path expression. The returned Query can be used +// to match elements within a TomlTree and its descendants. +func CompileQuery(path string) (*Query, error) { + return parseQuery(lexQuery(path)) +} + +// Execute executes a query against a TomlTree, and returns the result of the query. +func (q *Query) Execute(tree *TomlTree) *QueryResult { + result := &QueryResult{ + items: []interface{}{}, + positions: []Position{}, + } + if q.root == nil { + result.appendResult(tree, tree.GetPosition("")) + } else { + ctx := &queryContext{ + result: result, + filters: q.filters, + } + q.root.call(tree, ctx) + } + return result +} + +// SetFilter sets a user-defined filter function. These may be used inside +// "?(..)" query expressions to filter TOML document elements within a query. +func (q *Query) SetFilter(name string, fn NodeFilterFn) { + if q.filters == &defaultFilterFunctions { + // clone the static table + q.filters = &map[string]NodeFilterFn{} + for k, v := range defaultFilterFunctions { + (*q.filters)[k] = v + } + } + (*q.filters)[name] = fn +} + +var defaultFilterFunctions = map[string]NodeFilterFn{ + "tree": func(node interface{}) bool { + _, ok := node.(*TomlTree) + return ok + }, + "int": func(node interface{}) bool { + _, ok := node.(int64) + return ok + }, + "float": func(node interface{}) bool { + _, ok := node.(float64) + return ok + }, + "string": func(node interface{}) bool { + _, ok := node.(string) + return ok + }, + "time": func(node interface{}) bool { + _, ok := node.(time.Time) + return ok + }, + "bool": func(node interface{}) bool { + _, ok := node.(bool) + return ok + }, +} diff --git a/vendor/github.com/pelletier/go-toml/querylexer.go b/vendor/github.com/pelletier/go-toml/querylexer.go new file mode 100644 index 0000000..960681d --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/querylexer.go @@ -0,0 +1,356 @@ +// TOML JSONPath lexer. +// +// Written using the principles developed by Rob Pike in +// http://www.youtube.com/watch?v=HxaD_trXwRE + +package toml + +import ( + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +// Lexer state function +type queryLexStateFn func() queryLexStateFn + +// Lexer definition +type queryLexer struct { + input string + start int + pos int + width int + tokens chan token + depth int + line int + col int + stringTerm string +} + +func (l *queryLexer) run() { + for state := l.lexVoid; state != nil; { + state = state() + } + close(l.tokens) +} + +func (l *queryLexer) nextStart() { + // iterate by runes (utf8 characters) + // search for newlines and advance line/col counts + for i := l.start; i < l.pos; { + r, width := utf8.DecodeRuneInString(l.input[i:]) + if r == '\n' { + l.line++ + l.col = 1 + } else { + l.col++ + } + i += width + } + // advance start position to next token + l.start = l.pos +} + +func (l *queryLexer) emit(t tokenType) { + l.tokens <- token{ + Position: Position{l.line, l.col}, + typ: t, + val: l.input[l.start:l.pos], + } + l.nextStart() +} + +func (l *queryLexer) emitWithValue(t tokenType, value string) { + l.tokens <- token{ + Position: Position{l.line, l.col}, + typ: t, + val: value, + } + l.nextStart() +} + +func (l *queryLexer) next() rune { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + var r rune + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return r +} + +func (l *queryLexer) ignore() { + l.nextStart() +} + +func (l *queryLexer) backup() { + l.pos -= l.width +} + +func (l *queryLexer) errorf(format string, args ...interface{}) queryLexStateFn { + l.tokens <- token{ + Position: Position{l.line, l.col}, + typ: tokenError, + val: fmt.Sprintf(format, args...), + } + return nil +} + +func (l *queryLexer) peek() rune { + r := l.next() + l.backup() + return r +} + +func (l *queryLexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + l.backup() + return false +} + +func (l *queryLexer) follow(next string) bool { + return strings.HasPrefix(l.input[l.pos:], next) +} + +func (l *queryLexer) lexVoid() queryLexStateFn { + for { + next := l.peek() + switch next { + case '$': + l.pos++ + l.emit(tokenDollar) + continue + case '.': + if l.follow("..") { + l.pos += 2 + l.emit(tokenDotDot) + } else { + l.pos++ + l.emit(tokenDot) + } + continue + case '[': + l.pos++ + l.emit(tokenLeftBracket) + continue + case ']': + l.pos++ + l.emit(tokenRightBracket) + continue + case ',': + l.pos++ + l.emit(tokenComma) + continue + case '*': + l.pos++ + l.emit(tokenStar) + continue + case '(': + l.pos++ + l.emit(tokenLeftParen) + continue + case ')': + l.pos++ + l.emit(tokenRightParen) + continue + case '?': + l.pos++ + l.emit(tokenQuestion) + continue + case ':': + l.pos++ + l.emit(tokenColon) + continue + case '\'': + l.ignore() + l.stringTerm = string(next) + return l.lexString + case '"': + l.ignore() + l.stringTerm = string(next) + return l.lexString + } + + if isSpace(next) { + l.next() + l.ignore() + continue + } + + if isAlphanumeric(next) { + return l.lexKey + } + + if next == '+' || next == '-' || isDigit(next) { + return l.lexNumber + } + + if l.next() == eof { + break + } + + return l.errorf("unexpected char: '%v'", next) + } + l.emit(tokenEOF) + return nil +} + +func (l *queryLexer) lexKey() queryLexStateFn { + for { + next := l.peek() + if !isAlphanumeric(next) { + l.emit(tokenKey) + return l.lexVoid + } + + if l.next() == eof { + break + } + } + l.emit(tokenEOF) + return nil +} + +func (l *queryLexer) lexString() queryLexStateFn { + l.pos++ + l.ignore() + growingString := "" + + for { + if l.follow(l.stringTerm) { + l.emitWithValue(tokenString, growingString) + l.pos++ + l.ignore() + return l.lexVoid + } + + if l.follow("\\\"") { + l.pos++ + growingString += "\"" + } else if l.follow("\\'") { + l.pos++ + growingString += "'" + } else if l.follow("\\n") { + l.pos++ + growingString += "\n" + } else if l.follow("\\b") { + l.pos++ + growingString += "\b" + } else if l.follow("\\f") { + l.pos++ + growingString += "\f" + } else if l.follow("\\/") { + l.pos++ + growingString += "/" + } else if l.follow("\\t") { + l.pos++ + growingString += "\t" + } else if l.follow("\\r") { + l.pos++ + growingString += "\r" + } else if l.follow("\\\\") { + l.pos++ + growingString += "\\" + } else if l.follow("\\u") { + l.pos += 2 + code := "" + for i := 0; i < 4; i++ { + c := l.peek() + l.pos++ + if !isHexDigit(c) { + return l.errorf("unfinished unicode escape") + } + code = code + string(c) + } + l.pos-- + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return l.errorf("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + } else if l.follow("\\U") { + l.pos += 2 + code := "" + for i := 0; i < 8; i++ { + c := l.peek() + l.pos++ + if !isHexDigit(c) { + return l.errorf("unfinished unicode escape") + } + code = code + string(c) + } + l.pos-- + intcode, err := strconv.ParseInt(code, 16, 32) + if err != nil { + return l.errorf("invalid unicode escape: \\u" + code) + } + growingString += string(rune(intcode)) + } else if l.follow("\\") { + l.pos++ + return l.errorf("invalid escape sequence: \\" + string(l.peek())) + } else { + growingString += string(l.peek()) + } + + if l.next() == eof { + break + } + } + + return l.errorf("unclosed string") +} + +func (l *queryLexer) lexNumber() queryLexStateFn { + l.ignore() + if !l.accept("+") { + l.accept("-") + } + pointSeen := false + digitSeen := false + for { + next := l.next() + if next == '.' { + if pointSeen { + return l.errorf("cannot have two dots in one float") + } + if !isDigit(l.peek()) { + return l.errorf("float cannot end with a dot") + } + pointSeen = true + } else if isDigit(next) { + digitSeen = true + } else { + l.backup() + break + } + if pointSeen && !digitSeen { + return l.errorf("cannot start float with a dot") + } + } + + if !digitSeen { + return l.errorf("no digit in that number") + } + if pointSeen { + l.emit(tokenFloat) + } else { + l.emit(tokenInteger) + } + return l.lexVoid +} + +// Entry point +func lexQuery(input string) chan token { + l := &queryLexer{ + input: input, + tokens: make(chan token), + line: 1, + col: 1, + } + go l.run() + return l.tokens +} diff --git a/vendor/github.com/pelletier/go-toml/queryparser.go b/vendor/github.com/pelletier/go-toml/queryparser.go new file mode 100644 index 0000000..1cbfc83 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/queryparser.go @@ -0,0 +1,275 @@ +/* + Based on the "jsonpath" spec/concept. + + http://goessner.net/articles/JsonPath/ + https://code.google.com/p/json-path/ +*/ + +package toml + +import ( + "fmt" +) + +const maxInt = int(^uint(0) >> 1) + +type queryParser struct { + flow chan token + tokensBuffer []token + query *Query + union []pathFn + err error +} + +type queryParserStateFn func() queryParserStateFn + +// Formats and panics an error message based on a token +func (p *queryParser) parseError(tok *token, msg string, args ...interface{}) queryParserStateFn { + p.err = fmt.Errorf(tok.Position.String()+": "+msg, args...) + return nil // trigger parse to end +} + +func (p *queryParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *queryParser) backup(tok *token) { + p.tokensBuffer = append(p.tokensBuffer, *tok) +} + +func (p *queryParser) peek() *token { + if len(p.tokensBuffer) != 0 { + return &(p.tokensBuffer[0]) + } + + tok, ok := <-p.flow + if !ok { + return nil + } + p.backup(&tok) + return &tok +} + +func (p *queryParser) lookahead(types ...tokenType) bool { + result := true + buffer := []token{} + + for _, typ := range types { + tok := p.getToken() + if tok == nil { + result = false + break + } + buffer = append(buffer, *tok) + if tok.typ != typ { + result = false + break + } + } + // add the tokens back to the buffer, and return + p.tokensBuffer = append(p.tokensBuffer, buffer...) + return result +} + +func (p *queryParser) getToken() *token { + if len(p.tokensBuffer) != 0 { + tok := p.tokensBuffer[0] + p.tokensBuffer = p.tokensBuffer[1:] + return &tok + } + tok, ok := <-p.flow + if !ok { + return nil + } + return &tok +} + +func (p *queryParser) parseStart() queryParserStateFn { + tok := p.getToken() + + if tok == nil || tok.typ == tokenEOF { + return nil + } + + if tok.typ != tokenDollar { + return p.parseError(tok, "Expected '$' at start of expression") + } + + return p.parseMatchExpr +} + +// handle '.' prefix, '[]', and '..' +func (p *queryParser) parseMatchExpr() queryParserStateFn { + tok := p.getToken() + switch tok.typ { + case tokenDotDot: + p.query.appendPath(&matchRecursiveFn{}) + // nested parse for '..' + tok := p.getToken() + switch tok.typ { + case tokenKey: + p.query.appendPath(newMatchKeyFn(tok.val)) + return p.parseMatchExpr + case tokenLeftBracket: + return p.parseBracketExpr + case tokenStar: + // do nothing - the recursive predicate is enough + return p.parseMatchExpr + } + + case tokenDot: + // nested parse for '.' + tok := p.getToken() + switch tok.typ { + case tokenKey: + p.query.appendPath(newMatchKeyFn(tok.val)) + return p.parseMatchExpr + case tokenStar: + p.query.appendPath(&matchAnyFn{}) + return p.parseMatchExpr + } + + case tokenLeftBracket: + return p.parseBracketExpr + + case tokenEOF: + return nil // allow EOF at this stage + } + return p.parseError(tok, "expected match expression") +} + +func (p *queryParser) parseBracketExpr() queryParserStateFn { + if p.lookahead(tokenInteger, tokenColon) { + return p.parseSliceExpr + } + if p.peek().typ == tokenColon { + return p.parseSliceExpr + } + return p.parseUnionExpr +} + +func (p *queryParser) parseUnionExpr() queryParserStateFn { + var tok *token + + // this state can be traversed after some sub-expressions + // so be careful when setting up state in the parser + if p.union == nil { + p.union = []pathFn{} + } + +loop: // labeled loop for easy breaking + for { + if len(p.union) > 0 { + // parse delimiter or terminator + tok = p.getToken() + switch tok.typ { + case tokenComma: + // do nothing + case tokenRightBracket: + break loop + default: + return p.parseError(tok, "expected ',' or ']', not '%s'", tok.val) + } + } + + // parse sub expression + tok = p.getToken() + switch tok.typ { + case tokenInteger: + p.union = append(p.union, newMatchIndexFn(tok.Int())) + case tokenKey: + p.union = append(p.union, newMatchKeyFn(tok.val)) + case tokenString: + p.union = append(p.union, newMatchKeyFn(tok.val)) + case tokenQuestion: + return p.parseFilterExpr + default: + return p.parseError(tok, "expected union sub expression, not '%s', %d", tok.val, len(p.union)) + } + } + + // if there is only one sub-expression, use that instead + if len(p.union) == 1 { + p.query.appendPath(p.union[0]) + } else { + p.query.appendPath(&matchUnionFn{p.union}) + } + + p.union = nil // clear out state + return p.parseMatchExpr +} + +func (p *queryParser) parseSliceExpr() queryParserStateFn { + // init slice to grab all elements + start, end, step := 0, maxInt, 1 + + // parse optional start + tok := p.getToken() + if tok.typ == tokenInteger { + start = tok.Int() + tok = p.getToken() + } + if tok.typ != tokenColon { + return p.parseError(tok, "expected ':'") + } + + // parse optional end + tok = p.getToken() + if tok.typ == tokenInteger { + end = tok.Int() + tok = p.getToken() + } + if tok.typ == tokenRightBracket { + p.query.appendPath(newMatchSliceFn(start, end, step)) + return p.parseMatchExpr + } + if tok.typ != tokenColon { + return p.parseError(tok, "expected ']' or ':'") + } + + // parse optional step + tok = p.getToken() + if tok.typ == tokenInteger { + step = tok.Int() + if step < 0 { + return p.parseError(tok, "step must be a positive value") + } + tok = p.getToken() + } + if tok.typ != tokenRightBracket { + return p.parseError(tok, "expected ']'") + } + + p.query.appendPath(newMatchSliceFn(start, end, step)) + return p.parseMatchExpr +} + +func (p *queryParser) parseFilterExpr() queryParserStateFn { + tok := p.getToken() + if tok.typ != tokenLeftParen { + return p.parseError(tok, "expected left-parenthesis for filter expression") + } + tok = p.getToken() + if tok.typ != tokenKey && tok.typ != tokenString { + return p.parseError(tok, "expected key or string for filter funciton name") + } + name := tok.val + tok = p.getToken() + if tok.typ != tokenRightParen { + return p.parseError(tok, "expected right-parenthesis for filter expression") + } + p.union = append(p.union, newMatchFilterFn(name, tok.Position)) + return p.parseUnionExpr +} + +func parseQuery(flow chan token) (*Query, error) { + parser := &queryParser{ + flow: flow, + tokensBuffer: []token{}, + query: newQuery(), + } + parser.run() + return parser.query, parser.err +} diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh new file mode 100755 index 0000000..15ac1e1 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/test.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# fail out of the script if anything here fails +set -e + +# set the path to the present working directory +export GOPATH=`pwd` + +function git_clone() { + path=$1 + branch=$2 + version=$3 + if [ ! -d "src/$path" ]; then + mkdir -p src/$path + git clone https://$path.git src/$path + fi + pushd src/$path + git checkout "$branch" + git reset --hard "$version" + popd +} + +go get github.com/pelletier/go-buffruneio +go get github.com/davecgh/go-spew/spew + +# get code for BurntSushi TOML validation +# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize) +git_clone github.com/BurntSushi/toml master HEAD +git_clone github.com/BurntSushi/toml-test master HEAD #was: 0.2.0 HEAD + +# build the BurntSushi test application +go build -o toml-test github.com/BurntSushi/toml-test + +# vendorize the current lib for testing +# NOTE: this basically mocks an install without having to go back out to github for code +mkdir -p src/github.com/pelletier/go-toml/cmd +cp *.go *.toml src/github.com/pelletier/go-toml +cp -R cmd/* src/github.com/pelletier/go-toml/cmd +go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go + +# Run basic unit tests +go test github.com/pelletier/go-toml \ + github.com/pelletier/go-toml/cmd/tomljson + +# run the entire BurntSushi test suite +if [[ $# -eq 0 ]] ; then + echo "Running all BurntSushi tests" + ./toml-test ./test_program_bin | tee test_out +else + # run a specific test + test=$1 + test_path='src/github.com/BurntSushi/toml-test/tests' + valid_test="$test_path/valid/$test" + invalid_test="$test_path/invalid/$test" + + if [ -e "$valid_test.toml" ]; then + echo "Valid Test TOML for $test:" + echo "====" + cat "$valid_test.toml" + + echo "Valid Test JSON for $test:" + echo "====" + cat "$valid_test.json" + + echo "Go-TOML Output for $test:" + echo "====" + cat "$valid_test.toml" | ./test_program_bin + fi + + if [ -e "$invalid_test.toml" ]; then + echo "Invalid Test TOML for $test:" + echo "====" + cat "$invalid_test.toml" + + echo "Go-TOML Output for $test:" + echo "====" + echo "go-toml Output:" + cat "$invalid_test.toml" | ./test_program_bin + fi +fi diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go new file mode 100644 index 0000000..e598cf9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -0,0 +1,139 @@ +package toml + +import ( + "fmt" + "strconv" + "unicode" +) + +// Define tokens +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenComment + tokenKey + tokenString + tokenInteger + tokenTrue + tokenFalse + tokenFloat + tokenEqual + tokenLeftBracket + tokenRightBracket + tokenLeftCurlyBrace + tokenRightCurlyBrace + tokenLeftParen + tokenRightParen + tokenDoubleLeftBracket + tokenDoubleRightBracket + tokenDate + tokenKeyGroup + tokenKeyGroupArray + tokenComma + tokenColon + tokenDollar + tokenStar + tokenQuestion + tokenDot + tokenDotDot + tokenEOL +) + +var tokenTypeNames = []string{ + "Error", + "EOF", + "Comment", + "Key", + "String", + "Integer", + "True", + "False", + "Float", + "=", + "[", + "]", + "{", + "}", + "(", + ")", + "]]", + "[[", + "Date", + "KeyGroup", + "KeyGroupArray", + ",", + ":", + "$", + "*", + "?", + ".", + "..", + "EOL", +} + +type token struct { + Position + typ tokenType + val string +} + +func (tt tokenType) String() string { + idx := int(tt) + if idx < len(tokenTypeNames) { + return tokenTypeNames[idx] + } + return "Unknown" +} + +func (t token) Int() int { + if result, err := strconv.Atoi(t.val); err != nil { + panic(err) + } else { + return result + } +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + case tokenError: + return t.val + } + + return fmt.Sprintf("%q", t.val) +} + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isAlphanumeric(r rune) bool { + return unicode.IsLetter(r) || r == '_' +} + +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') +} + +func isDigit(r rune) bool { + return unicode.IsNumber(r) +} + +func isHexDigit(r rune) bool { + return isDigit(r) || + r == 'A' || r == 'B' || r == 'C' || r == 'D' || r == 'E' || r == 'F' +} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go new file mode 100644 index 0000000..c0e1ad2 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -0,0 +1,285 @@ +package toml + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "strings" +) + +type tomlValue struct { + value interface{} + position Position +} + +// TomlTree is the result of the parsing of a TOML file. +type TomlTree struct { + values map[string]interface{} + position Position +} + +func newTomlTree() *TomlTree { + return &TomlTree{ + values: make(map[string]interface{}), + position: Position{}, + } +} + +// TreeFromMap initializes a new TomlTree object using the given map. +func TreeFromMap(m map[string]interface{}) *TomlTree { + return &TomlTree{ + values: m, + } +} + +// Has returns a boolean indicating if the given key exists. +func (t *TomlTree) Has(key string) bool { + if key == "" { + return false + } + return t.HasPath(strings.Split(key, ".")) +} + +// HasPath returns true if the given path of keys exists, false otherwise. +func (t *TomlTree) HasPath(keys []string) bool { + return t.GetPath(keys) != nil +} + +// Keys returns the keys of the toplevel tree. +// Warning: this is a costly operation. +func (t *TomlTree) Keys() []string { + var keys []string + for k := range t.values { + keys = append(keys, k) + } + return keys +} + +// Get the value at key in the TomlTree. +// Key is a dot-separated path (e.g. a.b.c). +// Returns nil if the path does not exist in the tree. +// If keys is of length zero, the current tree is returned. +func (t *TomlTree) Get(key string) interface{} { + if key == "" { + return t + } + comps, err := parseKey(key) + if err != nil { + return nil + } + return t.GetPath(comps) +} + +// GetPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *TomlTree) GetPath(keys []string) interface{} { + if len(keys) == 0 { + return t + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return nil + } + switch node := value.(type) { + case *TomlTree: + subtree = node + case []*TomlTree: + // go to most recent element + if len(node) == 0 { + return nil + } + subtree = node[len(node)-1] + default: + return nil // cannot navigate through other node types + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.value + default: + return node + } +} + +// GetPosition returns the position of the given key. +func (t *TomlTree) GetPosition(key string) Position { + if key == "" { + return t.position + } + return t.GetPositionPath(strings.Split(key, ".")) +} + +// GetPositionPath returns the element in the tree indicated by 'keys'. +// If keys is of length zero, the current tree is returned. +func (t *TomlTree) GetPositionPath(keys []string) Position { + if len(keys) == 0 { + return t.position + } + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + value, exists := subtree.values[intermediateKey] + if !exists { + return Position{0, 0} + } + switch node := value.(type) { + case *TomlTree: + subtree = node + case []*TomlTree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + subtree = node[len(node)-1] + default: + return Position{0, 0} + } + } + // branch based on final node type + switch node := subtree.values[keys[len(keys)-1]].(type) { + case *tomlValue: + return node.position + case *TomlTree: + return node.position + case []*TomlTree: + // go to most recent element + if len(node) == 0 { + return Position{0, 0} + } + return node[len(node)-1].position + default: + return Position{0, 0} + } +} + +// GetDefault works like Get but with a default value +func (t *TomlTree) GetDefault(key string, def interface{}) interface{} { + val := t.Get(key) + if val == nil { + return def + } + return val +} + +// Set an element in the tree. +// Key is a dot-separated path (e.g. a.b.c). +// Creates all necessary intermediates trees, if needed. +func (t *TomlTree) Set(key string, value interface{}) { + t.SetPath(strings.Split(key, "."), value) +} + +// SetPath sets an element in the tree. +// Keys is an array of path elements (e.g. {"a","b","c"}). +// Creates all necessary intermediates trees, if needed. +func (t *TomlTree) SetPath(keys []string, value interface{}) { + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTomlTree() + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *TomlTree: + subtree = node + case []*TomlTree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + subtree.values[intermediateKey] = append(node, newTomlTree()) + } + subtree = node[len(node)-1] + } + } + + var toInsert interface{} + + switch value.(type) { + case *TomlTree: + toInsert = value + case []*TomlTree: + toInsert = value + case *tomlValue: + toInsert = value + default: + toInsert = &tomlValue{value: value} + } + + subtree.values[keys[len(keys)-1]] = toInsert +} + +// createSubTree takes a tree and a key and create the necessary intermediate +// subtrees to create a subtree at that point. In-place. +// +// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] +// and tree[a][b][c] +// +// Returns nil on success, error object on failure +func (t *TomlTree) createSubTree(keys []string, pos Position) error { + subtree := t + for _, intermediateKey := range keys { + if intermediateKey == "" { + return fmt.Errorf("empty intermediate table") + } + nextTree, exists := subtree.values[intermediateKey] + if !exists { + tree := newTomlTree() + tree.position = pos + subtree.values[intermediateKey] = tree + nextTree = tree + } + + switch node := nextTree.(type) { + case []*TomlTree: + subtree = node[len(node)-1] + case *TomlTree: + subtree = node + default: + return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", + strings.Join(keys, "."), intermediateKey, nextTree, nextTree) + } + } + return nil +} + +// Query compiles and executes a query on a tree and returns the query result. +func (t *TomlTree) Query(query string) (*QueryResult, error) { + q, err := CompileQuery(query) + if err != nil { + return nil, err + } + return q.Execute(t), nil +} + +// LoadReader creates a TomlTree from any io.Reader. +func LoadReader(reader io.Reader) (tree *TomlTree, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = errors.New(r.(string)) + } + }() + tree = parseToml(lexToml(reader)) + return +} + +// Load creates a TomlTree from a string. +func Load(content string) (tree *TomlTree, err error) { + return LoadReader(strings.NewReader(content)) +} + +// LoadFile creates a TomlTree from a file. +func LoadFile(path string) (tree *TomlTree, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + return LoadReader(file) +} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_conversions.go b/vendor/github.com/pelletier/go-toml/tomltree_conversions.go new file mode 100644 index 0000000..c9c6f95 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/tomltree_conversions.go @@ -0,0 +1,146 @@ +package toml + +// Tools to convert a TomlTree to different representations + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// encodes a string to a TOML-compliant string value +func encodeTomlString(value string) string { + result := "" + for _, rr := range value { + intRr := uint16(rr) + switch rr { + case '\b': + result += "\\b" + case '\t': + result += "\\t" + case '\n': + result += "\\n" + case '\f': + result += "\\f" + case '\r': + result += "\\r" + case '"': + result += "\\\"" + case '\\': + result += "\\\\" + default: + if intRr < 0x001F { + result += fmt.Sprintf("\\u%0.4X", intRr) + } else { + result += string(rr) + } + } + } + return result +} + +// Value print support function for ToString() +// Outputs the TOML compliant string representation of a value +func toTomlValue(item interface{}, indent int) string { + tab := strings.Repeat(" ", indent) + switch value := item.(type) { + case int64: + return tab + strconv.FormatInt(value, 10) + case float64: + return tab + strconv.FormatFloat(value, 'f', -1, 64) + case string: + return tab + "\"" + encodeTomlString(value) + "\"" + case bool: + if value { + return "true" + } + return "false" + case time.Time: + return tab + value.Format(time.RFC3339) + case []interface{}: + result := tab + "[\n" + for _, item := range value { + result += toTomlValue(item, indent+2) + ",\n" + } + return result + tab + "]" + default: + panic(fmt.Sprintf("unsupported value type: %v", value)) + } +} + +// Recursive support function for ToString() +// Outputs a tree, using the provided keyspace to prefix group names +func (t *TomlTree) toToml(indent, keyspace string) string { + result := "" + for k, v := range t.values { + // figure out the keyspace + combinedKey := k + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + // output based on type + switch node := v.(type) { + case []*TomlTree: + for _, item := range node { + if len(item.Keys()) > 0 { + result += fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey) + } + result += item.toToml(indent+" ", combinedKey) + } + case *TomlTree: + if len(node.Keys()) > 0 { + result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey) + } + result += node.toToml(indent+" ", combinedKey) + case map[string]interface{}: + sub := TreeFromMap(node) + + if len(sub.Keys()) > 0 { + result += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey) + } + result += sub.toToml(indent+" ", combinedKey) + case *tomlValue: + result += fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(node.value, 0)) + default: + result += fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(v, 0)) + } + } + return result +} + +// ToString is an alias for String +func (t *TomlTree) ToString() string { + return t.String() +} + +// String generates a human-readable representation of the current tree. +// Output spans multiple lines, and is suitable for ingest by a TOML parser +func (t *TomlTree) String() string { + return t.toToml("", "") +} + +// ToMap recursively generates a representation of the current tree using map[string]interface{}. +func (t *TomlTree) ToMap() map[string]interface{} { + result := map[string]interface{}{} + + for k, v := range t.values { + switch node := v.(type) { + case []*TomlTree: + var array []interface{} + for _, item := range node { + array = append(array, item.ToMap()) + } + result[k] = array + case *TomlTree: + result[k] = node.ToMap() + case map[string]interface{}: + sub := TreeFromMap(node) + result[k] = sub.ToMap() + case *tomlValue: + result[k] = node.value + } + } + + return result +} diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md index 6ea6422..273db3c 100644 --- a/vendor/github.com/pkg/errors/README.md +++ b/vendor/github.com/pkg/errors/README.md @@ -2,6 +2,8 @@ Package errors provides simple error handling primitives. +`go get github.com/pkg/errors` + The traditional error handling idiom in Go is roughly akin to ```go if err != nil { diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go index 6c45c8d..1c9731a 100644 --- a/vendor/github.com/pkg/errors/errors.go +++ b/vendor/github.com/pkg/errors/errors.go @@ -28,7 +28,7 @@ // to reverse the operation of errors.Wrap to retrieve the original error // for inspection. Any error value which implements this interface // -// type Causer interface { +// type causer interface { // Cause() error // } // @@ -43,6 +43,9 @@ // // unknown error // } // +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// // Formatted printing of errors // // All error values returned from this package implement fmt.Formatter and can @@ -77,6 +80,9 @@ // } // } // +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// // See the documentation for Frame.Format for more details. package errors @@ -85,68 +91,62 @@ import ( "io" ) -// _error is an error implementation returned by New and Errorf -// that implements its own fmt.Formatter. -type _error struct { +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { msg string *stack } -func (e _error) Error() string { return e.msg } +func (f *fundamental) Error() string { return f.msg } -func (e _error) Format(s fmt.State, verb rune) { +func (f *fundamental) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { - io.WriteString(s, e.msg) - fmt.Fprintf(s, "%+v", e.StackTrace()) + io.WriteString(s, f.msg) + f.stack.Format(s, verb) return } fallthrough case 's': - io.WriteString(s, e.msg) - } -} - -// New returns an error with the supplied message. -func New(message string) error { - return _error{ - message, - callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -func Errorf(format string, args ...interface{}) error { - return _error{ - fmt.Sprintf(format, args...), - callers(), + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) } } -type cause struct { - cause error - msg string -} - -func (c cause) Error() string { return fmt.Sprintf("%s: %v", c.msg, c.Cause()) } -func (c cause) Cause() error { return c.cause } - -// wrapper is an error implementation returned by Wrap and Wrapf -// that implements its own fmt.Formatter. -type wrapper struct { - cause +type withStack struct { + error *stack } -func (w wrapper) Format(s fmt.State, verb rune) { +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - fmt.Fprintf(s, "%+v", w.StackTrace()) + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) return } fallthrough @@ -163,12 +163,13 @@ func Wrap(err error, message string) error { if err == nil { return nil } - return wrapper{ - cause: cause{ - cause: err, - msg: message, - }, - stack: callers(), + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), } } @@ -178,12 +179,35 @@ func Wrapf(err error, format string, args ...interface{}) error { if err == nil { return nil } - return wrapper{ - cause: cause{ - cause: err, - msg: fmt.Sprintf(format, args...), - }, - stack: callers(), + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) } } @@ -191,7 +215,7 @@ func Wrapf(err error, format string, args ...interface{}) error { // An error value has a cause if it implements the following // interface: // -// type Causer interface { +// type causer interface { // Cause() error // } // diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go index 243a64a..6b1f289 100644 --- a/vendor/github.com/pkg/errors/stack.go +++ b/vendor/github.com/pkg/errors/stack.go @@ -100,6 +100,19 @@ func (st StackTrace) Format(s fmt.State, verb rune) { // stack represents a stack of program counters. type stack []uintptr +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + func (s *stack) StackTrace() StackTrace { f := make([]Frame, len(*s)) for i := 0; i < len(f); i++ { diff --git a/vendor/github.com/pkg/sftp/CONTRIBUTORS b/vendor/github.com/pkg/sftp/CONTRIBUTORS new file mode 100644 index 0000000..7eff823 --- /dev/null +++ b/vendor/github.com/pkg/sftp/CONTRIBUTORS @@ -0,0 +1,2 @@ +Dave Cheney +Saulius Gurklys diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE new file mode 100644 index 0000000..b7b5392 --- /dev/null +++ b/vendor/github.com/pkg/sftp/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2013, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/sftp/README.md b/vendor/github.com/pkg/sftp/README.md new file mode 100644 index 0000000..e406b1e --- /dev/null +++ b/vendor/github.com/pkg/sftp/README.md @@ -0,0 +1,27 @@ +sftp +---- + +The `sftp` package provides support for file system operations on remote ssh servers using the SFTP subsystem. + +[![UNIX Build Status](https://travis-ci.org/pkg/sftp.svg?branch=master)](https://travis-ci.org/pkg/sftp) [![GoDoc](http://godoc.org/github.com/pkg/sftp?status.svg)](http://godoc.org/github.com/pkg/sftp) + +usage and examples +------------------ + +See [godoc.org/github.com/pkg/sftp](http://godoc.org/github.com/pkg/sftp) for examples and usage. + +The basic operation of the package mirrors the facilities of the [os](http://golang.org/pkg/os) package. + +The Walker interface for directory traversal is heavily inspired by Keith Rarick's [fs](http://godoc.org/github.com/kr/fs) package. + +roadmap +------- + + * There is way too much duplication in the Client methods. If there was an unmarshal(interface{}) method this would reduce a heap of the duplication. + +contributing +------------ + +We welcome pull requests, bug fixes and issue reports. + +Before proposing a large change, first please discuss your change by raising an issue. diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go new file mode 100644 index 0000000..3e4c291 --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs.go @@ -0,0 +1,237 @@ +package sftp + +// ssh_FXP_ATTRS support +// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 + +import ( + "os" + "syscall" + "time" +) + +const ( + ssh_FILEXFER_ATTR_SIZE = 0x00000001 + ssh_FILEXFER_ATTR_UIDGID = 0x00000002 + ssh_FILEXFER_ATTR_PERMISSIONS = 0x00000004 + ssh_FILEXFER_ATTR_ACMODTIME = 0x00000008 + ssh_FILEXFER_ATTR_EXTENDED = 0x80000000 +) + +// fileInfo is an artificial type designed to satisfy os.FileInfo. +type fileInfo struct { + name string + size int64 + mode os.FileMode + mtime time.Time + sys interface{} +} + +// Name returns the base name of the file. +func (fi *fileInfo) Name() string { return fi.name } + +// Size returns the length in bytes for regular files; system-dependent for others. +func (fi *fileInfo) Size() int64 { return fi.size } + +// Mode returns file mode bits. +func (fi *fileInfo) Mode() os.FileMode { return fi.mode } + +// ModTime returns the last modification time of the file. +func (fi *fileInfo) ModTime() time.Time { return fi.mtime } + +// IsDir returns true if the file is a directory. +func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } + +func (fi *fileInfo) Sys() interface{} { return fi.sys } + +// FileStat holds the original unmarshalled values from a call to READDIR or *STAT. +// It is exported for the purposes of accessing the raw values via os.FileInfo.Sys() +type FileStat struct { + Size uint64 + Mode uint32 + Mtime uint32 + Atime uint32 + UID uint32 + GID uint32 + Extended []StatExtended +} + +// StatExtended contains additional, extended information for a FileStat. +type StatExtended struct { + ExtType string + ExtData string +} + +func fileInfoFromStat(st *FileStat, name string) os.FileInfo { + fs := &fileInfo{ + name: name, + size: int64(st.Size), + mode: toFileMode(st.Mode), + mtime: time.Unix(int64(st.Mtime), 0), + sys: st, + } + return fs +} + +func fileStatFromInfo(fi os.FileInfo) (uint32, FileStat) { + mtime := fi.ModTime().Unix() + atime := mtime + var flags uint32 = ssh_FILEXFER_ATTR_SIZE | + ssh_FILEXFER_ATTR_PERMISSIONS | + ssh_FILEXFER_ATTR_ACMODTIME + + fileStat := FileStat{ + Size: uint64(fi.Size()), + Mode: fromFileMode(fi.Mode()), + Mtime: uint32(mtime), + Atime: uint32(atime), + } + + // os specific file stat decoding + fileStatFromInfoOs(fi, &flags, &fileStat) + + return flags, fileStat +} + +func unmarshalAttrs(b []byte) (*FileStat, []byte) { + flags, b := unmarshalUint32(b) + var fs FileStat + if flags&ssh_FILEXFER_ATTR_SIZE == ssh_FILEXFER_ATTR_SIZE { + fs.Size, b = unmarshalUint64(b) + } + if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { + fs.UID, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { + fs.GID, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_PERMISSIONS == ssh_FILEXFER_ATTR_PERMISSIONS { + fs.Mode, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_ACMODTIME == ssh_FILEXFER_ATTR_ACMODTIME { + fs.Atime, b = unmarshalUint32(b) + fs.Mtime, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_EXTENDED == ssh_FILEXFER_ATTR_EXTENDED { + var count uint32 + count, b = unmarshalUint32(b) + ext := make([]StatExtended, count, count) + for i := uint32(0); i < count; i++ { + var typ string + var data string + typ, b = unmarshalString(b) + data, b = unmarshalString(b) + ext[i] = StatExtended{typ, data} + } + fs.Extended = ext + } + return &fs, b +} + +func marshalFileInfo(b []byte, fi os.FileInfo) []byte { + // attributes variable struct, and also variable per protocol version + // spec version 3 attributes: + // uint32 flags + // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE + // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS + // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED + // string extended_type + // string extended_data + // ... more extended data (extended_type - extended_data pairs), + // so that number of pairs equals extended_count + + flags, fileStat := fileStatFromInfo(fi) + + b = marshalUint32(b, flags) + if flags&ssh_FILEXFER_ATTR_SIZE != 0 { + b = marshalUint64(b, fileStat.Size) + } + if flags&ssh_FILEXFER_ATTR_UIDGID != 0 { + b = marshalUint32(b, fileStat.UID) + b = marshalUint32(b, fileStat.GID) + } + if flags&ssh_FILEXFER_ATTR_PERMISSIONS != 0 { + b = marshalUint32(b, fileStat.Mode) + } + if flags&ssh_FILEXFER_ATTR_ACMODTIME != 0 { + b = marshalUint32(b, fileStat.Atime) + b = marshalUint32(b, fileStat.Mtime) + } + + return b +} + +// toFileMode converts sftp filemode bits to the os.FileMode specification +func toFileMode(mode uint32) os.FileMode { + var fm = os.FileMode(mode & 0777) + switch mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fm |= os.ModeDevice + case syscall.S_IFCHR: + fm |= os.ModeDevice | os.ModeCharDevice + case syscall.S_IFDIR: + fm |= os.ModeDir + case syscall.S_IFIFO: + fm |= os.ModeNamedPipe + case syscall.S_IFLNK: + fm |= os.ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fm |= os.ModeSocket + } + if mode&syscall.S_ISGID != 0 { + fm |= os.ModeSetgid + } + if mode&syscall.S_ISUID != 0 { + fm |= os.ModeSetuid + } + if mode&syscall.S_ISVTX != 0 { + fm |= os.ModeSticky + } + return fm +} + +// fromFileMode converts from the os.FileMode specification to sftp filemode bits +func fromFileMode(mode os.FileMode) uint32 { + ret := uint32(0) + + if mode&os.ModeDevice != 0 { + if mode&os.ModeCharDevice != 0 { + ret |= syscall.S_IFCHR + } else { + ret |= syscall.S_IFBLK + } + } + if mode&os.ModeDir != 0 { + ret |= syscall.S_IFDIR + } + if mode&os.ModeSymlink != 0 { + ret |= syscall.S_IFLNK + } + if mode&os.ModeNamedPipe != 0 { + ret |= syscall.S_IFIFO + } + if mode&os.ModeSetgid != 0 { + ret |= syscall.S_ISGID + } + if mode&os.ModeSetuid != 0 { + ret |= syscall.S_ISUID + } + if mode&os.ModeSticky != 0 { + ret |= syscall.S_ISVTX + } + if mode&os.ModeSocket != 0 { + ret |= syscall.S_IFSOCK + } + + if mode&os.ModeType == 0 { + ret |= syscall.S_IFREG + } + ret |= uint32(mode & os.ModePerm) + + return ret +} diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go new file mode 100644 index 0000000..81cf3ea --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_stubs.go @@ -0,0 +1,11 @@ +// +build !cgo,!plan9 windows android + +package sftp + +import ( + "os" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + // todo +} diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go new file mode 100644 index 0000000..ab6ecde --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_unix.go @@ -0,0 +1,17 @@ +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris +// +build cgo + +package sftp + +import ( + "os" + "syscall" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + if statt, ok := fi.Sys().(*syscall.Stat_t); ok { + *flags |= ssh_FILEXFER_ATTR_UIDGID + fileStat.UID = statt.Uid + fileStat.GID = statt.Gid + } +} diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go new file mode 100644 index 0000000..41f1063 --- /dev/null +++ b/vendor/github.com/pkg/sftp/client.go @@ -0,0 +1,1133 @@ +package sftp + +import ( + "bytes" + "encoding/binary" + "io" + "os" + "path" + "sync/atomic" + "time" + + "github.com/kr/fs" + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" +) + +// MaxPacket sets the maximum size of the payload. +func MaxPacket(size int) func(*Client) error { + return func(c *Client) error { + if size < 1<<15 { + return errors.Errorf("size must be greater or equal to 32k") + } + c.maxPacket = size + return nil + } +} + +// NewClient creates a new SFTP client on conn, using zero or more option +// functions. +func NewClient(conn *ssh.Client, opts ...func(*Client) error) (*Client, error) { + s, err := conn.NewSession() + if err != nil { + return nil, err + } + if err := s.RequestSubsystem("sftp"); err != nil { + return nil, err + } + pw, err := s.StdinPipe() + if err != nil { + return nil, err + } + pr, err := s.StdoutPipe() + if err != nil { + return nil, err + } + + return NewClientPipe(pr, pw, opts...) +} + +// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser. +// This can be used for connecting to an SFTP server over TCP/TLS or by using +// the system's ssh client program (e.g. via exec.Command). +func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...func(*Client) error) (*Client, error) { + sftp := &Client{ + clientConn: clientConn{ + conn: conn{ + Reader: rd, + WriteCloser: wr, + }, + inflight: make(map[uint32]chan<- result), + }, + maxPacket: 1 << 15, + } + if err := sftp.applyOptions(opts...); err != nil { + wr.Close() + return nil, err + } + if err := sftp.sendInit(); err != nil { + wr.Close() + return nil, err + } + if err := sftp.recvVersion(); err != nil { + wr.Close() + return nil, err + } + sftp.clientConn.wg.Add(1) + go sftp.loop() + return sftp, nil +} + +// Client represents an SFTP session on a *ssh.ClientConn SSH connection. +// Multiple Clients can be active on a single SSH connection, and a Client +// may be called concurrently from multiple Goroutines. +// +// Client implements the github.com/kr/fs.FileSystem interface. +type Client struct { + clientConn + + maxPacket int // max packet size read or written. + nextid uint32 +} + +// Create creates the named file mode 0666 (before umask), truncating it if +// it already exists. If successful, methods on the returned File can be +// used for I/O; the associated file descriptor has mode O_RDWR. +func (c *Client) Create(path string) (*File, error) { + return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) +} + +const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + +func (c *Client) sendInit() error { + return c.clientConn.conn.sendPacket(sshFxInitPacket{ + Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + }) +} + +// returns the next value of c.nextid +func (c *Client) nextID() uint32 { + return atomic.AddUint32(&c.nextid, 1) +} + +func (c *Client) recvVersion() error { + typ, data, err := c.recvPacket() + if err != nil { + return err + } + if typ != ssh_FXP_VERSION { + return &unexpectedPacketErr{ssh_FXP_VERSION, typ} + } + + version, _ := unmarshalUint32(data) + if version != sftpProtocolVersion { + return &unexpectedVersionErr{sftpProtocolVersion, version} + } + + return nil +} + +// Walk returns a new Walker rooted at root. +func (c *Client) Walk(root string) *fs.Walker { + return fs.WalkFS(root, c) +} + +// ReadDir reads the directory named by dirname and returns a list of +// directory entries. +func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { + handle, err := c.opendir(p) + if err != nil { + return nil, err + } + defer c.close(handle) // this has to defer earlier than the lock below + var attrs []os.FileInfo + var done = false + for !done { + id := c.nextID() + typ, data, err1 := c.sendPacket(sshFxpReaddirPacket{ + ID: id, + Handle: handle, + }) + if err1 != nil { + err = err1 + done = true + break + } + switch typ { + case ssh_FXP_NAME: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + for i := uint32(0); i < count; i++ { + var filename string + filename, data = unmarshalString(data) + _, data = unmarshalString(data) // discard longname + var attr *FileStat + attr, data = unmarshalAttrs(data) + if filename == "." || filename == ".." { + continue + } + attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) + } + case ssh_FXP_STATUS: + // TODO(dfc) scope warning! + err = normaliseError(unmarshalStatus(id, data)) + done = true + default: + return nil, unimplementedPacketErr(typ) + } + } + if err == io.EOF { + err = nil + } + return attrs, err +} + +func (c *Client) opendir(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpOpendirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case ssh_FXP_HANDLE: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return handle, nil + case ssh_FXP_STATUS: + return "", unmarshalStatus(id, data) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Stat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file. +func (c *Client) Stat(p string) (os.FileInfo, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpStatPacket{ + ID: id, + Path: p, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_ATTRS: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return fileInfoFromStat(attr, path.Base(p)), nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// Lstat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link. +func (c *Client) Lstat(p string) (os.FileInfo, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpLstatPacket{ + ID: id, + Path: p, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_ATTRS: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return fileInfoFromStat(attr, path.Base(p)), nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// ReadLink reads the target of a symbolic link. +func (c *Client) ReadLink(p string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpReadlinkPacket{ + ID: id, + Path: p, + }) + if err != nil { + return "", err + } + switch typ { + case ssh_FXP_NAME: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore dummy attributes + return filename, nil + case ssh_FXP_STATUS: + return "", unmarshalStatus(id, data) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Symlink creates a symbolic link at 'newname', pointing at target 'oldname' +func (c *Client) Symlink(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpSymlinkPacket{ + ID: id, + Linkpath: newname, + Targetpath: oldname, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// setstat is a convience wrapper to allow for changing of various parts of the file descriptor. +func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpSetstatPacket{ + ID: id, + Path: path, + Flags: flags, + Attrs: attrs, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Chtimes changes the access and modification times of the named file. +func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error { + type times struct { + Atime uint32 + Mtime uint32 + } + attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())} + return c.setstat(path, ssh_FILEXFER_ATTR_ACMODTIME, attrs) +} + +// Chown changes the user and group owners of the named file. +func (c *Client) Chown(path string, uid, gid int) error { + type owner struct { + UID uint32 + GID uint32 + } + attrs := owner{uint32(uid), uint32(gid)} + return c.setstat(path, ssh_FILEXFER_ATTR_UIDGID, attrs) +} + +// Chmod changes the permissions of the named file. +func (c *Client) Chmod(path string, mode os.FileMode) error { + return c.setstat(path, ssh_FILEXFER_ATTR_PERMISSIONS, uint32(mode)) +} + +// Truncate sets the size of the named file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +func (c *Client) Truncate(path string, size int64) error { + return c.setstat(path, ssh_FILEXFER_ATTR_SIZE, uint64(size)) +} + +// Open opens the named file for reading. If successful, methods on the +// returned file can be used for reading; the associated file descriptor +// has mode O_RDONLY. +func (c *Client) Open(path string) (*File, error) { + return c.open(path, flags(os.O_RDONLY)) +} + +// OpenFile is the generalized open call; most users will use Open or +// Create instead. It opens the named file with specified flag (O_RDONLY +// etc.). If successful, methods on the returned File can be used for I/O. +func (c *Client) OpenFile(path string, f int) (*File, error) { + return c.open(path, flags(f)) +} + +func (c *Client) open(path string, pflags uint32) (*File, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpOpenPacket{ + ID: id, + Path: path, + Pflags: pflags, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_HANDLE: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return &File{c: c, path: path, handle: handle}, nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// close closes a handle handle previously returned in the response +// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid +// immediately after this request has been sent. +func (c *Client) close(handle string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpClosePacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) fstat(handle string) (*FileStat, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpFstatPacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_ATTRS: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return attr, nil + case ssh_FXP_STATUS: + return nil, unmarshalStatus(id, data) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// StatVFS retrieves VFS statistics from a remote host. +// +// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature +// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt. +func (c *Client) StatVFS(path string) (*StatVFS, error) { + // send the StatVFS packet to the server + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpStatvfsPacket{ + ID: id, + Path: path, + }) + if err != nil { + return nil, err + } + + switch typ { + // server responded with valid data + case ssh_FXP_EXTENDED_REPLY: + var response StatVFS + err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response) + if err != nil { + return nil, errors.New("can not parse reply") + } + + return &response, nil + + // the resquest failed + case ssh_FXP_STATUS: + return nil, errors.New(fxp(ssh_FXP_STATUS).String()) + + default: + return nil, unimplementedPacketErr(typ) + } +} + +// Join joins any number of path elements into a single path, adding a +// separating slash if necessary. The result is Cleaned; in particular, all +// empty strings are ignored. +func (c *Client) Join(elem ...string) string { return path.Join(elem...) } + +// Remove removes the specified file or directory. An error will be returned if no +// file or directory with the specified path exists, or if the specified directory +// is not empty. +func (c *Client) Remove(path string) error { + err := c.removeFile(path) + switch err := err.(type) { + case *StatusError: + switch err.Code { + // some servers, *cough* osx *cough*, return EPERM, not ENODIR. + // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY + case ssh_FX_PERMISSION_DENIED, ssh_FX_FAILURE, ssh_FX_FILE_IS_A_DIRECTORY: + return c.removeDirectory(path) + default: + return err + } + default: + return err + } + return err +} + +func (c *Client) removeFile(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRemovePacket{ + ID: id, + Filename: path, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) removeDirectory(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRmdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Rename renames a file. +func (c *Client) Rename(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRenamePacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) realpath(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRealpathPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case ssh_FXP_NAME: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore attributes + return filename, nil + case ssh_FXP_STATUS: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Getwd returns the current working directory of the server. Operations +// involving relative paths will be based at this location. +func (c *Client) Getwd() (string, error) { + return c.realpath(".") +} + +// Mkdir creates the specified directory. An error will be returned if a file or +// directory with the specified path already exists, or if the directory's +// parent folder does not exist (the method cannot create complete paths). +func (c *Client) Mkdir(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpMkdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// applyOptions applies options functions to the Client. +// If an error is encountered, option processing ceases. +func (c *Client) applyOptions(opts ...func(*Client) error) error { + for _, f := range opts { + if err := f(c); err != nil { + return err + } + } + return nil +} + +// File represents a remote file. +type File struct { + c *Client + path string + handle string + offset uint64 // current offset within remote file +} + +// Close closes the File, rendering it unusable for I/O. It returns an +// error, if any. +func (f *File) Close() error { + return f.c.close(f.handle) +} + +// Name returns the name of the file as presented to Open or Create. +func (f *File) Name() string { + return f.path +} + +const maxConcurrentRequests = 64 + +// Read reads up to len(b) bytes from the File. It returns the number of +// bytes read and an error, if any. EOF is signaled by a zero count with +// err set to io.EOF. +func (f *File) Read(b []byte) (int, error) { + // Split the read into multiple maxPacket sized concurrent reads + // bounded by maxConcurrentRequests. This allows reads with a suitably + // large buffer to transfer data at a much faster rate due to + // overlapping round trip times. + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + ch := make(chan result, 1) + type inflightRead struct { + b []byte + offset uint64 + } + reqs := map[uint32]inflightRead{} + type offsetErr struct { + offset uint64 + err error + } + var firstErr offsetErr + + sendReq := func(b []byte, offset uint64) { + reqID := f.c.nextID() + f.c.dispatchRequest(ch, sshFxpReadPacket{ + ID: reqID, + Handle: f.handle, + Offset: offset, + Len: uint32(len(b)), + }) + inFlight++ + reqs[reqID] = inflightRead{b: b, offset: offset} + } + + var read int + for len(b) > 0 || inFlight > 0 { + for inFlight < desiredInFlight && len(b) > 0 && firstErr.err == nil { + l := min(len(b), f.c.maxPacket) + rb := b[:l] + sendReq(rb, offset) + offset += uint64(l) + b = b[l:] + } + + if inFlight == 0 { + break + } + select { + case res := <-ch: + inFlight-- + if res.err != nil { + firstErr = offsetErr{offset: 0, err: res.err} + break + } + reqID, data := unmarshalUint32(res.data) + req, ok := reqs[reqID] + if !ok { + firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} + break + } + delete(reqs, reqID) + switch res.typ { + case ssh_FXP_STATUS: + if firstErr.err == nil || req.offset < firstErr.offset { + firstErr = offsetErr{ + offset: req.offset, + err: normaliseError(unmarshalStatus(reqID, res.data)), + } + break + } + case ssh_FXP_DATA: + l, data := unmarshalUint32(data) + n := copy(req.b, data[:l]) + read += n + if n < len(req.b) { + sendReq(req.b[l:], req.offset+uint64(l)) + } + if desiredInFlight < maxConcurrentRequests { + desiredInFlight++ + } + default: + firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} + break + } + } + } + // If the error is anything other than EOF, then there + // may be gaps in the data copied to the buffer so it's + // best to return 0 so the caller can't make any + // incorrect assumptions about the state of the buffer. + if firstErr.err != nil && firstErr.err != io.EOF { + read = 0 + } + f.offset += uint64(read) + return read, firstErr.err +} + +// WriteTo writes the file to w. The return value is the number of bytes +// written. Any error encountered during the write is also returned. +func (f *File) WriteTo(w io.Writer) (int64, error) { + fi, err := f.Stat() + if err != nil { + return 0, err + } + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + writeOffset := offset + fileSize := uint64(fi.Size()) + ch := make(chan result, 1) + type inflightRead struct { + b []byte + offset uint64 + } + reqs := map[uint32]inflightRead{} + pendingWrites := map[uint64][]byte{} + type offsetErr struct { + offset uint64 + err error + } + var firstErr offsetErr + + sendReq := func(b []byte, offset uint64) { + reqID := f.c.nextID() + f.c.dispatchRequest(ch, sshFxpReadPacket{ + ID: reqID, + Handle: f.handle, + Offset: offset, + Len: uint32(len(b)), + }) + inFlight++ + reqs[reqID] = inflightRead{b: b, offset: offset} + } + + var copied int64 + for firstErr.err == nil || inFlight > 0 { + for inFlight < desiredInFlight && firstErr.err == nil { + b := make([]byte, f.c.maxPacket) + sendReq(b, offset) + offset += uint64(f.c.maxPacket) + if offset > fileSize { + desiredInFlight = 1 + } + } + + if inFlight == 0 { + break + } + select { + case res := <-ch: + inFlight-- + if res.err != nil { + firstErr = offsetErr{offset: 0, err: res.err} + break + } + reqID, data := unmarshalUint32(res.data) + req, ok := reqs[reqID] + if !ok { + firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} + break + } + delete(reqs, reqID) + switch res.typ { + case ssh_FXP_STATUS: + if firstErr.err == nil || req.offset < firstErr.offset { + firstErr = offsetErr{offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data))} + break + } + case ssh_FXP_DATA: + l, data := unmarshalUint32(data) + if req.offset == writeOffset { + nbytes, err := w.Write(data) + copied += int64(nbytes) + if err != nil { + firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: err} + break + } + if nbytes < int(l) { + firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: io.ErrShortWrite} + break + } + switch { + case offset > fileSize: + desiredInFlight = 1 + case desiredInFlight < maxConcurrentRequests: + desiredInFlight++ + } + writeOffset += uint64(nbytes) + for pendingData, ok := pendingWrites[writeOffset]; ok; pendingData, ok = pendingWrites[writeOffset] { + nbytes, err := w.Write(pendingData) + if err != nil { + firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: err} + break + } + if nbytes < len(pendingData) { + firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: io.ErrShortWrite} + break + } + writeOffset += uint64(nbytes) + inFlight-- + } + } else { + // Don't write the data yet because + // this response came in out of order + // and we need to wait for responses + // for earlier segments of the file. + inFlight++ // Pending writes should still be considered inFlight. + pendingWrites[req.offset] = data + } + default: + firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} + break + } + } + } + if firstErr.err != io.EOF { + return copied, firstErr.err + } + return copied, nil +} + +// Stat returns the FileInfo structure describing file. If there is an +// error. +func (f *File) Stat() (os.FileInfo, error) { + fs, err := f.c.fstat(f.handle) + if err != nil { + return nil, err + } + return fileInfoFromStat(fs, path.Base(f.path)), nil +} + +// Write writes len(b) bytes to the File. It returns the number of bytes +// written and an error, if any. Write returns a non-nil error when n != +// len(b). +func (f *File) Write(b []byte) (int, error) { + // Split the write into multiple maxPacket sized concurrent writes + // bounded by maxConcurrentRequests. This allows writes with a suitably + // large buffer to transfer data at a much faster rate due to + // overlapping round trip times. + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + ch := make(chan result, 1) + var firstErr error + written := len(b) + for len(b) > 0 || inFlight > 0 { + for inFlight < desiredInFlight && len(b) > 0 && firstErr == nil { + l := min(len(b), f.c.maxPacket) + rb := b[:l] + f.c.dispatchRequest(ch, sshFxpWritePacket{ + ID: f.c.nextID(), + Handle: f.handle, + Offset: offset, + Length: uint32(len(rb)), + Data: rb, + }) + inFlight++ + offset += uint64(l) + b = b[l:] + } + + if inFlight == 0 { + break + } + select { + case res := <-ch: + inFlight-- + if res.err != nil { + firstErr = res.err + break + } + switch res.typ { + case ssh_FXP_STATUS: + id, _ := unmarshalUint32(res.data) + err := normaliseError(unmarshalStatus(id, res.data)) + if err != nil && firstErr == nil { + firstErr = err + break + } + if desiredInFlight < maxConcurrentRequests { + desiredInFlight++ + } + default: + firstErr = unimplementedPacketErr(res.typ) + break + } + } + } + // If error is non-nil, then there may be gaps in the data written to + // the file so it's best to return 0 so the caller can't make any + // incorrect assumptions about the state of the file. + if firstErr != nil { + written = 0 + } + f.offset += uint64(written) + return written, firstErr +} + +// ReadFrom reads data from r until EOF and writes it to the file. The return +// value is the number of bytes read. Any error except io.EOF encountered +// during the read is also returned. +func (f *File) ReadFrom(r io.Reader) (int64, error) { + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + ch := make(chan result, 1) + var firstErr error + read := int64(0) + b := make([]byte, f.c.maxPacket) + for inFlight > 0 || firstErr == nil { + for inFlight < desiredInFlight && firstErr == nil { + n, err := r.Read(b) + if err != nil { + firstErr = err + } + f.c.dispatchRequest(ch, sshFxpWritePacket{ + ID: f.c.nextID(), + Handle: f.handle, + Offset: offset, + Length: uint32(n), + Data: b[:n], + }) + inFlight++ + offset += uint64(n) + read += int64(n) + } + + if inFlight == 0 { + break + } + select { + case res := <-ch: + inFlight-- + if res.err != nil { + firstErr = res.err + break + } + switch res.typ { + case ssh_FXP_STATUS: + id, _ := unmarshalUint32(res.data) + err := normaliseError(unmarshalStatus(id, res.data)) + if err != nil && firstErr == nil { + firstErr = err + break + } + if desiredInFlight < maxConcurrentRequests { + desiredInFlight++ + } + default: + firstErr = unimplementedPacketErr(res.typ) + break + } + } + } + if firstErr == io.EOF { + firstErr = nil + } + // If error is non-nil, then there may be gaps in the data written to + // the file so it's best to return 0 so the caller can't make any + // incorrect assumptions about the state of the file. + if firstErr != nil { + read = 0 + } + f.offset += uint64(read) + return read, firstErr +} + +// Seek implements io.Seeker by setting the client offset for the next Read or +// Write. It returns the next offset read. Seeking before or after the end of +// the file is undefined. Seeking relative to the end calls Stat. +func (f *File) Seek(offset int64, whence int) (int64, error) { + switch whence { + case os.SEEK_SET: + f.offset = uint64(offset) + case os.SEEK_CUR: + f.offset = uint64(int64(f.offset) + offset) + case os.SEEK_END: + fi, err := f.Stat() + if err != nil { + return int64(f.offset), err + } + f.offset = uint64(fi.Size() + offset) + default: + return int64(f.offset), unimplementedSeekWhence(whence) + } + return int64(f.offset), nil +} + +// Chown changes the uid/gid of the current file. +func (f *File) Chown(uid, gid int) error { + return f.c.Chown(f.path, uid, gid) +} + +// Chmod changes the permissions of the current file. +func (f *File) Chmod(mode os.FileMode) error { + return f.c.Chmod(f.path, mode) +} + +// Truncate sets the size of the current file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +func (f *File) Truncate(size int64) error { + return f.c.Truncate(f.path, size) +} + +func min(a, b int) int { + if a > b { + return b + } + return a +} + +// normaliseError normalises an error into a more standard form that can be +// checked against stdlib errors like io.EOF or os.ErrNotExist. +func normaliseError(err error) error { + switch err := err.(type) { + case *StatusError: + switch err.Code { + case ssh_FX_EOF: + return io.EOF + case ssh_FX_NO_SUCH_FILE: + return os.ErrNotExist + case ssh_FX_OK: + return nil + default: + return err + } + default: + return err + } +} + +func unmarshalStatus(id uint32, data []byte) error { + sid, data := unmarshalUint32(data) + if sid != id { + return &unexpectedIDErr{id, sid} + } + code, data := unmarshalUint32(data) + msg, data, err := unmarshalStringSafe(data) + if err != nil { + return err + } + lang, _, _ := unmarshalStringSafe(data) + return &StatusError{ + Code: code, + msg: msg, + lang: lang, + } +} + +func marshalStatus(b []byte, err StatusError) []byte { + b = marshalUint32(b, err.Code) + b = marshalString(b, err.msg) + b = marshalString(b, err.lang) + return b +} + +// flags converts the flags passed to OpenFile into ssh flags. +// Unsupported flags are ignored. +func flags(f int) uint32 { + var out uint32 + switch f & os.O_WRONLY { + case os.O_WRONLY: + out |= ssh_FXF_WRITE + case os.O_RDONLY: + out |= ssh_FXF_READ + } + if f&os.O_RDWR == os.O_RDWR { + out |= ssh_FXF_READ | ssh_FXF_WRITE + } + if f&os.O_APPEND == os.O_APPEND { + out |= ssh_FXF_APPEND + } + if f&os.O_CREATE == os.O_CREATE { + out |= ssh_FXF_CREAT + } + if f&os.O_TRUNC == os.O_TRUNC { + out |= ssh_FXF_TRUNC + } + if f&os.O_EXCL == os.O_EXCL { + out |= ssh_FXF_EXCL + } + return out +} diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go new file mode 100644 index 0000000..9b03d11 --- /dev/null +++ b/vendor/github.com/pkg/sftp/conn.go @@ -0,0 +1,122 @@ +package sftp + +import ( + "encoding" + "io" + "sync" + + "github.com/pkg/errors" +) + +// conn implements a bidirectional channel on which client and server +// connections are multiplexed. +type conn struct { + io.Reader + io.WriteCloser + sync.Mutex // used to serialise writes to sendPacket +} + +func (c *conn) recvPacket() (uint8, []byte, error) { + return recvPacket(c) +} + +func (c *conn) sendPacket(m encoding.BinaryMarshaler) error { + c.Lock() + defer c.Unlock() + return sendPacket(c, m) +} + +type clientConn struct { + conn + wg sync.WaitGroup + sync.Mutex // protects inflight + inflight map[uint32]chan<- result // outstanding requests +} + +// Close closes the SFTP session. +func (c *clientConn) Close() error { + defer c.wg.Wait() + return c.conn.Close() +} + +func (c *clientConn) loop() { + defer c.wg.Done() + err := c.recv() + if err != nil { + c.broadcastErr(err) + } +} + +// recv continuously reads from the server and forwards responses to the +// appropriate channel. +func (c *clientConn) recv() error { + defer c.conn.Close() + for { + typ, data, err := c.recvPacket() + if err != nil { + return err + } + sid, _ := unmarshalUint32(data) + c.Lock() + ch, ok := c.inflight[sid] + delete(c.inflight, sid) + c.Unlock() + if !ok { + // This is an unexpected occurrence. Send the error + // back to all listeners so that they terminate + // gracefully. + return errors.Errorf("sid: %v not fond", sid) + } + ch <- result{typ: typ, data: data} + } +} + +// result captures the result of receiving the a packet from the server +type result struct { + typ byte + data []byte + err error +} + +type idmarshaler interface { + id() uint32 + encoding.BinaryMarshaler +} + +func (c *clientConn) sendPacket(p idmarshaler) (byte, []byte, error) { + ch := make(chan result, 1) + c.dispatchRequest(ch, p) + s := <-ch + return s.typ, s.data, s.err +} + +func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) { + c.Lock() + c.inflight[p.id()] = ch + if err := c.conn.sendPacket(p); err != nil { + delete(c.inflight, p.id()) + ch <- result{err: err} + } + c.Unlock() +} + +// broadcastErr sends an error to all goroutines waiting for a response. +func (c *clientConn) broadcastErr(err error) { + c.Lock() + listeners := make([]chan<- result, 0, len(c.inflight)) + for _, ch := range c.inflight { + listeners = append(listeners, ch) + } + c.Unlock() + for _, ch := range listeners { + ch <- result{err: err} + } +} + +type serverConn struct { + conn +} + +func (s *serverConn) sendError(p id, err error) error { + return s.sendPacket(statusFromError(p, err)) +} diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go new file mode 100644 index 0000000..3e264ab --- /dev/null +++ b/vendor/github.com/pkg/sftp/debug.go @@ -0,0 +1,9 @@ +// +build debug + +package sftp + +import "log" + +func debug(fmt string, args ...interface{}) { + log.Printf(fmt, args...) +} diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go new file mode 100644 index 0000000..fba4159 --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet.go @@ -0,0 +1,901 @@ +package sftp + +import ( + "bytes" + "encoding" + "encoding/binary" + "fmt" + "io" + "os" + "reflect" + + "github.com/pkg/errors" +) + +var ( + errShortPacket = errors.New("packet too short") + errUnknownExtendedPacket = errors.New("unknown extended packet") +) + +const ( + debugDumpTxPacket = false + debugDumpRxPacket = false + debugDumpTxPacketBytes = false + debugDumpRxPacketBytes = false +) + +func marshalUint32(b []byte, v uint32) []byte { + return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +func marshalUint64(b []byte, v uint64) []byte { + return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v)) +} + +func marshalString(b []byte, v string) []byte { + return append(marshalUint32(b, uint32(len(v))), v...) +} + +func marshal(b []byte, v interface{}) []byte { + if v == nil { + return b + } + switch v := v.(type) { + case uint8: + return append(b, v) + case uint32: + return marshalUint32(b, v) + case uint64: + return marshalUint64(b, v) + case string: + return marshalString(b, v) + case os.FileInfo: + return marshalFileInfo(b, v) + default: + switch d := reflect.ValueOf(v); d.Kind() { + case reflect.Struct: + for i, n := 0, d.NumField(); i < n; i++ { + b = append(marshal(b, d.Field(i).Interface())) + } + return b + case reflect.Slice: + for i, n := 0, d.Len(); i < n; i++ { + b = append(marshal(b, d.Index(i).Interface())) + } + return b + default: + panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v)) + } + } +} + +func unmarshalUint32(b []byte) (uint32, []byte) { + v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + return v, b[4:] +} + +func unmarshalUint32Safe(b []byte) (uint32, []byte, error) { + var v uint32 + if len(b) < 4 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint32(b) + return v, b, nil +} + +func unmarshalUint64(b []byte) (uint64, []byte) { + h, b := unmarshalUint32(b) + l, b := unmarshalUint32(b) + return uint64(h)<<32 | uint64(l), b +} + +func unmarshalUint64Safe(b []byte) (uint64, []byte, error) { + var v uint64 + if len(b) < 8 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint64(b) + return v, b, nil +} + +func unmarshalString(b []byte) (string, []byte) { + n, b := unmarshalUint32(b) + return string(b[:n]), b[n:] +} + +func unmarshalStringSafe(b []byte) (string, []byte, error) { + n, b, err := unmarshalUint32Safe(b) + if err != nil { + return "", nil, err + } + if int64(n) > int64(len(b)) { + return "", nil, errShortPacket + } + return string(b[:n]), b[n:], nil +} + +// sendPacket marshals p according to RFC 4234. +func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { + bb, err := m.MarshalBinary() + if err != nil { + return errors.Errorf("binary marshaller failed: %v", err) + } + if debugDumpTxPacketBytes { + debug("send packet: %s %d bytes %x", fxp(bb[0]), len(bb), bb[1:]) + } else if debugDumpTxPacket { + debug("send packet: %s %d bytes", fxp(bb[0]), len(bb)) + } + l := uint32(len(bb)) + hdr := []byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)} + _, err = w.Write(hdr) + if err != nil { + return errors.Errorf("failed to send packet header: %v", err) + } + _, err = w.Write(bb) + if err != nil { + return errors.Errorf("failed to send packet body: %v", err) + } + return nil +} + +func recvPacket(r io.Reader) (uint8, []byte, error) { + var b = []byte{0, 0, 0, 0} + if _, err := io.ReadFull(r, b); err != nil { + return 0, nil, err + } + l, _ := unmarshalUint32(b) + b = make([]byte, l) + if _, err := io.ReadFull(r, b); err != nil { + debug("recv packet %d bytes: err %v", l, err) + return 0, nil, err + } + if debugDumpRxPacketBytes { + debug("recv packet: %s %d bytes %x", fxp(b[0]), l, b[1:]) + } else if debugDumpRxPacket { + debug("recv packet: %s %d bytes", fxp(b[0]), l) + } + return b[0], b[1:], nil +} + +type extensionPair struct { + Name string + Data string +} + +func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) { + var ep extensionPair + var err error + ep.Name, b, err = unmarshalStringSafe(b) + if err != nil { + return ep, b, err + } + ep.Data, b, err = unmarshalStringSafe(b) + if err != nil { + return ep, b, err + } + return ep, b, err +} + +// Here starts the definition of packets along with their MarshalBinary +// implementations. +// Manually writing the marshalling logic wins us a lot of time and +// allocation. + +type sshFxInitPacket struct { + Version uint32 + Extensions []extensionPair +} + +func (p sshFxInitPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 // byte + uint32 + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_INIT) + b = marshalUint32(b, p.Version) + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + return b, nil +} + +func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error { + var err error + if p.Version, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + for len(b) > 0 { + var ep extensionPair + ep, b, err = unmarshalExtensionPair(b) + if err != nil { + return err + } + p.Extensions = append(p.Extensions, ep) + } + return nil +} + +type sshFxVersionPacket struct { + Version uint32 + Extensions []struct { + Name, Data string + } +} + +func (p sshFxVersionPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 // byte + uint32 + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_VERSION) + b = marshalUint32(b, p.Version) + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + return b, nil +} + +func marshalIDString(packetType byte, id uint32, str string) ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(str) + + b := make([]byte, 0, l) + b = append(b, packetType) + b = marshalUint32(b, id) + b = marshalString(b, str) + return b, nil +} + +func unmarshalIDString(b []byte, id *uint32, str *string) error { + var err error + *id, b, err = unmarshalUint32Safe(b) + if err != nil { + return err + } + *str, b, err = unmarshalStringSafe(b) + return err +} + +type sshFxpReaddirPacket struct { + ID uint32 + Handle string +} + +func (p sshFxpReaddirPacket) id() uint32 { return p.ID } + +func (p sshFxpReaddirPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_READDIR, p.ID, p.Handle) +} + +func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpOpendirPacket struct { + ID uint32 + Path string +} + +func (p sshFxpOpendirPacket) id() uint32 { return p.ID } + +func (p sshFxpOpendirPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_OPENDIR, p.ID, p.Path) +} + +func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpLstatPacket struct { + ID uint32 + Path string +} + +func (p sshFxpLstatPacket) id() uint32 { return p.ID } + +func (p sshFxpLstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_LSTAT, p.ID, p.Path) +} + +func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpStatPacket struct { + ID uint32 + Path string +} + +func (p sshFxpStatPacket) id() uint32 { return p.ID } + +func (p sshFxpStatPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_STAT, p.ID, p.Path) +} + +func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpFstatPacket struct { + ID uint32 + Handle string +} + +func (p sshFxpFstatPacket) id() uint32 { return p.ID } + +func (p sshFxpFstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_FSTAT, p.ID, p.Handle) +} + +func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpClosePacket struct { + ID uint32 + Handle string +} + +func (p sshFxpClosePacket) id() uint32 { return p.ID } + +func (p sshFxpClosePacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_CLOSE, p.ID, p.Handle) +} + +func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpRemovePacket struct { + ID uint32 + Filename string +} + +func (p sshFxpRemovePacket) id() uint32 { return p.ID } + +func (p sshFxpRemovePacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_REMOVE, p.ID, p.Filename) +} + +func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Filename) +} + +type sshFxpRmdirPacket struct { + ID uint32 + Path string +} + +func (p sshFxpRmdirPacket) id() uint32 { return p.ID } + +func (p sshFxpRmdirPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_RMDIR, p.ID, p.Path) +} + +func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpSymlinkPacket struct { + ID uint32 + Targetpath string + Linkpath string +} + +func (p sshFxpSymlinkPacket) id() uint32 { return p.ID } + +func (p sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Targetpath) + + 4 + len(p.Linkpath) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_SYMLINK) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Targetpath) + b = marshalString(b, p.Linkpath) + return b, nil +} + +func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Linkpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpReadlinkPacket struct { + ID uint32 + Path string +} + +func (p sshFxpReadlinkPacket) id() uint32 { return p.ID } + +func (p sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_READLINK, p.ID, p.Path) +} + +func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpRealpathPacket struct { + ID uint32 + Path string +} + +func (p sshFxpRealpathPacket) id() uint32 { return p.ID } + +func (p sshFxpRealpathPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_REALPATH, p.ID, p.Path) +} + +func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpNameAttr struct { + Name string + LongName string + Attrs []interface{} +} + +func (p sshFxpNameAttr) MarshalBinary() ([]byte, error) { + b := []byte{} + b = marshalString(b, p.Name) + b = marshalString(b, p.LongName) + for _, attr := range p.Attrs { + b = marshal(b, attr) + } + return b, nil +} + +type sshFxpNamePacket struct { + ID uint32 + NameAttrs []sshFxpNameAttr +} + +func (p sshFxpNamePacket) MarshalBinary() ([]byte, error) { + b := []byte{} + b = append(b, ssh_FXP_NAME) + b = marshalUint32(b, p.ID) + b = marshalUint32(b, uint32(len(p.NameAttrs))) + for _, na := range p.NameAttrs { + ab, err := na.MarshalBinary() + if err != nil { + return nil, err + } + + b = append(b, ab...) + } + return b, nil +} + +type sshFxpOpenPacket struct { + ID uint32 + Path string + Pflags uint32 + Flags uint32 // ignored +} + +func (p sshFxpOpenPacket) id() uint32 { return p.ID } + +func (p sshFxpOpenPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + + 4 + len(p.Path) + + 4 + 4 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_OPEN) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Pflags) + b = marshalUint32(b, p.Flags) + return b, nil +} + +func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpReadPacket struct { + ID uint32 + Handle string + Offset uint64 + Len uint32 +} + +func (p sshFxpReadPacket) id() uint32 { return p.ID } + +func (p sshFxpReadPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Handle) + + 8 + 4 // uint64 + uint32 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_READ) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Len) + return b, nil +} + +func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Len, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpRenamePacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p sshFxpRenamePacket) id() uint32 { return p.ID } + +func (p sshFxpRenamePacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_RENAME) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + return b, nil +} + +func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpWritePacket struct { + ID uint32 + Handle string + Offset uint64 + Length uint32 + Data []byte +} + +func (p sshFxpWritePacket) id() uint32 { return p.ID } + +func (p sshFxpWritePacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Handle) + + 8 + 4 + // uint64 + uint32 + len(p.Data) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_WRITE) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Length) + b = append(b, p.Data...) + return b, nil +} + +func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errShortPacket + } + + p.Data = append([]byte{}, b[:p.Length]...) + return nil +} + +type sshFxpMkdirPacket struct { + ID uint32 + Path string + Flags uint32 // ignored +} + +func (p sshFxpMkdirPacket) id() uint32 { return p.ID } + +func (p sshFxpMkdirPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Path) + + 4 // uint32 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_MKDIR) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + return b, nil +} + +func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpSetstatPacket struct { + ID uint32 + Path string + Flags uint32 + Attrs interface{} +} + +type sshFxpFsetstatPacket struct { + ID uint32 + Handle string + Flags uint32 + Attrs interface{} +} + +func (p sshFxpSetstatPacket) id() uint32 { return p.ID } +func (p sshFxpFsetstatPacket) id() uint32 { return p.ID } + +func (p sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Path) + + 4 // uint32 + uint64 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_SETSTAT) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + b = marshal(b, p.Attrs) + return b, nil +} + +func (p sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Handle) + + 4 // uint32 + uint64 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_FSETSTAT) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint32(b, p.Flags) + b = marshal(b, p.Attrs) + return b, nil +} + +func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +type sshFxpHandlePacket struct { + ID uint32 + Handle string +} + +func (p sshFxpHandlePacket) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_HANDLE} + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + return b, nil +} + +type sshFxpStatusPacket struct { + ID uint32 + StatusError +} + +func (p sshFxpStatusPacket) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_STATUS} + b = marshalUint32(b, p.ID) + b = marshalStatus(b, p.StatusError) + return b, nil +} + +type sshFxpDataPacket struct { + ID uint32 + Length uint32 + Data []byte +} + +func (p sshFxpDataPacket) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_DATA} + b = marshalUint32(b, p.ID) + b = marshalUint32(b, p.Length) + b = append(b, p.Data[:p.Length]...) + return b, nil +} + +func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errors.New("truncated packet") + } + + p.Data = make([]byte, p.Length) + copy(p.Data, b) + return nil +} + +type sshFxpStatvfsPacket struct { + ID uint32 + Path string +} + +func (p sshFxpStatvfsPacket) id() uint32 { return p.ID } + +func (p sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + len(p.Path) + + len("statvfs@openssh.com") + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_EXTENDED) + b = marshalUint32(b, p.ID) + b = marshalString(b, "statvfs@openssh.com") + b = marshalString(b, p.Path) + return b, nil +} + +// A StatVFS contains statistics about a filesystem. +type StatVFS struct { + ID uint32 + Bsize uint64 /* file system block size */ + Frsize uint64 /* fundamental fs block size */ + Blocks uint64 /* number of blocks (unit f_frsize) */ + Bfree uint64 /* free blocks in file system */ + Bavail uint64 /* free blocks for non-root */ + Files uint64 /* total file inodes */ + Ffree uint64 /* free file inodes */ + Favail uint64 /* free file inodes for to non-root */ + Fsid uint64 /* file system id */ + Flag uint64 /* bit mask of f_flag values */ + Namemax uint64 /* maximum filename length */ +} + +// TotalSpace calculates the amount of total space in a filesystem. +func (p *StatVFS) TotalSpace() uint64 { + return p.Frsize * p.Blocks +} + +// FreeSpace calculates the amount of free space in a filesystem. +func (p *StatVFS) FreeSpace() uint64 { + return p.Frsize * p.Bfree +} + +// Convert to ssh_FXP_EXTENDED_REPLY packet binary format +func (p *StatVFS) MarshalBinary() ([]byte, error) { + var buf bytes.Buffer + buf.Write([]byte{ssh_FXP_EXTENDED_REPLY}) + err := binary.Write(&buf, binary.BigEndian, p) + return buf.Bytes(), err +} + +type sshFxpExtendedPacket struct { + ID uint32 + ExtendedRequest string + SpecificPacket interface { + serverRespondablePacket + readonly() bool + } +} + +func (p sshFxpExtendedPacket) id() uint32 { return p.ID } +func (p sshFxpExtendedPacket) readonly() bool { return p.SpecificPacket.readonly() } + +func (p sshFxpExtendedPacket) respond(svr *Server) error { + return p.SpecificPacket.respond(svr) +} + +func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { + var err error + bOrig := b + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } + + // specific unmarshalling + switch p.ExtendedRequest { + case "statvfs@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketStatVFS{} + default: + return errUnknownExtendedPacket + } + + return p.SpecificPacket.UnmarshalBinary(bOrig) +} + +type sshFxpExtendedPacketStatVFS struct { + ID uint32 + ExtendedRequest string + Path string +} + +func (p sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID } +func (p sshFxpExtendedPacketStatVFS) readonly() bool { return true } +func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go new file mode 100644 index 0000000..b695528 --- /dev/null +++ b/vendor/github.com/pkg/sftp/release.go @@ -0,0 +1,5 @@ +// +build !debug + +package sftp + +func debug(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go new file mode 100644 index 0000000..e097bda --- /dev/null +++ b/vendor/github.com/pkg/sftp/server.go @@ -0,0 +1,607 @@ +package sftp + +// sftp server counterpart + +import ( + "encoding" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" +) + +const ( + sftpServerWorkerCount = 8 +) + +// Server is an SSH File Transfer Protocol (sftp) server. +// This is intended to provide the sftp subsystem to an ssh server daemon. +// This implementation currently supports most of sftp server protocol version 3, +// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +type Server struct { + serverConn + debugStream io.Writer + readOnly bool + pktChan chan rxPacket + openFiles map[string]*os.File + openFilesLock sync.RWMutex + handleCount int + maxTxPacket uint32 +} + +func (svr *Server) nextHandle(f *os.File) string { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + svr.handleCount++ + handle := strconv.Itoa(svr.handleCount) + svr.openFiles[handle] = f + return handle +} + +func (svr *Server) closeHandle(handle string) error { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + if f, ok := svr.openFiles[handle]; ok { + delete(svr.openFiles, handle) + return f.Close() + } + + return syscall.EBADF +} + +func (svr *Server) getHandle(handle string) (*os.File, bool) { + svr.openFilesLock.RLock() + defer svr.openFilesLock.RUnlock() + f, ok := svr.openFiles[handle] + return f, ok +} + +type serverRespondablePacket interface { + encoding.BinaryUnmarshaler + id() uint32 + respond(svr *Server) error +} + +// NewServer creates a new Server instance around the provided streams, serving +// content from the root of the filesystem. Optionally, ServerOption +// functions may be specified to further configure the Server. +// +// A subsequent call to Serve() is required to begin serving files over SFTP. +func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) { + s := &Server{ + serverConn: serverConn{ + conn: conn{ + Reader: rwc, + WriteCloser: rwc, + }, + }, + debugStream: ioutil.Discard, + pktChan: make(chan rxPacket, sftpServerWorkerCount), + openFiles: make(map[string]*os.File), + maxTxPacket: 1 << 15, + } + + for _, o := range options { + if err := o(s); err != nil { + return nil, err + } + } + + return s, nil +} + +// A ServerOption is a function which applies configuration to a Server. +type ServerOption func(*Server) error + +// WithDebug enables Server debugging output to the supplied io.Writer. +func WithDebug(w io.Writer) ServerOption { + return func(s *Server) error { + s.debugStream = w + return nil + } +} + +// ReadOnly configures a Server to serve files in read-only mode. +func ReadOnly() ServerOption { + return func(s *Server) error { + s.readOnly = true + return nil + } +} + +type rxPacket struct { + pktType fxp + pktBytes []byte +} + +// Up to N parallel servers +func (svr *Server) sftpServerWorker() error { + for p := range svr.pktChan { + var pkt interface { + encoding.BinaryUnmarshaler + id() uint32 + } + var readonly = true + switch p.pktType { + case ssh_FXP_INIT: + pkt = &sshFxInitPacket{} + case ssh_FXP_LSTAT: + pkt = &sshFxpLstatPacket{} + case ssh_FXP_OPEN: + pkt = &sshFxpOpenPacket{} + // readonly handled specially below + case ssh_FXP_CLOSE: + pkt = &sshFxpClosePacket{} + case ssh_FXP_READ: + pkt = &sshFxpReadPacket{} + case ssh_FXP_WRITE: + pkt = &sshFxpWritePacket{} + readonly = false + case ssh_FXP_FSTAT: + pkt = &sshFxpFstatPacket{} + case ssh_FXP_SETSTAT: + pkt = &sshFxpSetstatPacket{} + readonly = false + case ssh_FXP_FSETSTAT: + pkt = &sshFxpFsetstatPacket{} + readonly = false + case ssh_FXP_OPENDIR: + pkt = &sshFxpOpendirPacket{} + case ssh_FXP_READDIR: + pkt = &sshFxpReaddirPacket{} + case ssh_FXP_REMOVE: + pkt = &sshFxpRemovePacket{} + readonly = false + case ssh_FXP_MKDIR: + pkt = &sshFxpMkdirPacket{} + readonly = false + case ssh_FXP_RMDIR: + pkt = &sshFxpRmdirPacket{} + readonly = false + case ssh_FXP_REALPATH: + pkt = &sshFxpRealpathPacket{} + case ssh_FXP_STAT: + pkt = &sshFxpStatPacket{} + case ssh_FXP_RENAME: + pkt = &sshFxpRenamePacket{} + readonly = false + case ssh_FXP_READLINK: + pkt = &sshFxpReadlinkPacket{} + case ssh_FXP_SYMLINK: + pkt = &sshFxpSymlinkPacket{} + readonly = false + case ssh_FXP_EXTENDED: + pkt = &sshFxpExtendedPacket{} + default: + return errors.Errorf("unhandled packet type: %s", p.pktType) + } + if err := pkt.UnmarshalBinary(p.pktBytes); err != nil { + return err + } + + // handle FXP_OPENDIR specially + switch pkt := pkt.(type) { + case *sshFxpOpenPacket: + readonly = pkt.readonly() + case *sshFxpExtendedPacket: + readonly = pkt.SpecificPacket.readonly() + } + + // If server is operating read-only and a write operation is requested, + // return permission denied + if !readonly && svr.readOnly { + if err := svr.sendError(pkt, syscall.EPERM); err != nil { + return errors.Wrap(err, "failed to send read only packet response") + } + continue + } + + if err := handlePacket(svr, pkt); err != nil { + return err + } + } + return nil +} + +func handlePacket(s *Server, p interface{}) error { + switch p := p.(type) { + case *sshFxInitPacket: + return s.sendPacket(sshFxVersionPacket{sftpProtocolVersion, nil}) + case *sshFxpStatPacket: + // stat the requested file + info, err := os.Stat(p.Path) + if err != nil { + return s.sendError(p, err) + } + return s.sendPacket(sshFxpStatResponse{ + ID: p.ID, + info: info, + }) + case *sshFxpLstatPacket: + // stat the requested file + info, err := os.Lstat(p.Path) + if err != nil { + return s.sendError(p, err) + } + return s.sendPacket(sshFxpStatResponse{ + ID: p.ID, + info: info, + }) + case *sshFxpFstatPacket: + f, ok := s.getHandle(p.Handle) + if !ok { + return s.sendError(p, syscall.EBADF) + } + + info, err := f.Stat() + if err != nil { + return s.sendError(p, err) + } + + return s.sendPacket(sshFxpStatResponse{ + ID: p.ID, + info: info, + }) + case *sshFxpMkdirPacket: + // TODO FIXME: ignore flags field + err := os.Mkdir(p.Path, 0755) + return s.sendError(p, err) + case *sshFxpRmdirPacket: + err := os.Remove(p.Path) + return s.sendError(p, err) + case *sshFxpRemovePacket: + err := os.Remove(p.Filename) + return s.sendError(p, err) + case *sshFxpRenamePacket: + err := os.Rename(p.Oldpath, p.Newpath) + return s.sendError(p, err) + case *sshFxpSymlinkPacket: + err := os.Symlink(p.Targetpath, p.Linkpath) + return s.sendError(p, err) + case *sshFxpClosePacket: + return s.sendError(p, s.closeHandle(p.Handle)) + case *sshFxpReadlinkPacket: + f, err := os.Readlink(p.Path) + if err != nil { + return s.sendError(p, err) + } + + return s.sendPacket(sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []sshFxpNameAttr{{ + Name: f, + LongName: f, + Attrs: emptyFileStat, + }}, + }) + + case *sshFxpRealpathPacket: + f, err := filepath.Abs(p.Path) + if err != nil { + return s.sendError(p, err) + } + f = filepath.Clean(f) + return s.sendPacket(sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []sshFxpNameAttr{{ + Name: f, + LongName: f, + Attrs: emptyFileStat, + }}, + }) + case *sshFxpOpendirPacket: + return sshFxpOpenPacket{ + ID: p.ID, + Path: p.Path, + Pflags: ssh_FXF_READ, + }.respond(s) + case *sshFxpReadPacket: + f, ok := s.getHandle(p.Handle) + if !ok { + return s.sendError(p, syscall.EBADF) + } + + data := make([]byte, clamp(p.Len, s.maxTxPacket)) + n, err := f.ReadAt(data, int64(p.Offset)) + if err != nil && (err != io.EOF || n == 0) { + return s.sendError(p, err) + } + return s.sendPacket(sshFxpDataPacket{ + ID: p.ID, + Length: uint32(n), + Data: data[:n], + }) + case *sshFxpWritePacket: + f, ok := s.getHandle(p.Handle) + if !ok { + return s.sendError(p, syscall.EBADF) + } + + _, err := f.WriteAt(p.Data, int64(p.Offset)) + return s.sendError(p, err) + case serverRespondablePacket: + err := p.respond(s) + return errors.Wrap(err, "pkt.respond failed") + default: + return errors.Errorf("unexpected packet type %T", p) + } +} + +// Serve serves SFTP connections until the streams stop or the SFTP subsystem +// is stopped. +func (svr *Server) Serve() error { + var wg sync.WaitGroup + wg.Add(sftpServerWorkerCount) + for i := 0; i < sftpServerWorkerCount; i++ { + go func() { + defer wg.Done() + if err := svr.sftpServerWorker(); err != nil { + svr.conn.Close() // shuts down recvPacket + } + }() + } + + var err error + var pktType uint8 + var pktBytes []byte + for { + pktType, pktBytes, err = svr.recvPacket() + if err != nil { + break + } + svr.pktChan <- rxPacket{fxp(pktType), pktBytes} + } + + close(svr.pktChan) // shuts down sftpServerWorkers + wg.Wait() // wait for all workers to exit + + // close any still-open files + for handle, file := range svr.openFiles { + fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name()) + file.Close() + } + return err // error from recvPacket +} + +type id interface { + id() uint32 +} + +// The init packet has no ID, so we just return a zero-value ID +func (p sshFxInitPacket) id() uint32 { return 0 } + +type sshFxpStatResponse struct { + ID uint32 + info os.FileInfo +} + +func (p sshFxpStatResponse) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_ATTRS} + b = marshalUint32(b, p.ID) + b = marshalFileInfo(b, p.info) + return b, nil +} + +var emptyFileStat = []interface{}{uint32(0)} + +func (p sshFxpOpenPacket) readonly() bool { + return !p.hasPflags(ssh_FXF_WRITE) +} + +func (p sshFxpOpenPacket) hasPflags(flags ...uint32) bool { + for _, f := range flags { + if p.Pflags&f == 0 { + return false + } + } + return true +} + +func (p sshFxpOpenPacket) respond(svr *Server) error { + var osFlags int + if p.hasPflags(ssh_FXF_READ, ssh_FXF_WRITE) { + osFlags |= os.O_RDWR + } else if p.hasPflags(ssh_FXF_WRITE) { + osFlags |= os.O_WRONLY + } else if p.hasPflags(ssh_FXF_READ) { + osFlags |= os.O_RDONLY + } else { + // how are they opening? + return svr.sendError(p, syscall.EINVAL) + } + + if p.hasPflags(ssh_FXF_APPEND) { + osFlags |= os.O_APPEND + } + if p.hasPflags(ssh_FXF_CREAT) { + osFlags |= os.O_CREATE + } + if p.hasPflags(ssh_FXF_TRUNC) { + osFlags |= os.O_TRUNC + } + if p.hasPflags(ssh_FXF_EXCL) { + osFlags |= os.O_EXCL + } + + f, err := os.OpenFile(p.Path, osFlags, 0644) + if err != nil { + return svr.sendError(p, err) + } + + handle := svr.nextHandle(f) + return svr.sendPacket(sshFxpHandlePacket{p.ID, handle}) +} + +func (p sshFxpReaddirPacket) respond(svr *Server) error { + f, ok := svr.getHandle(p.Handle) + if !ok { + return svr.sendError(p, syscall.EBADF) + } + + dirname := f.Name() + dirents, err := f.Readdir(128) + if err != nil { + return svr.sendError(p, err) + } + + ret := sshFxpNamePacket{ID: p.ID} + for _, dirent := range dirents { + ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{ + Name: dirent.Name(), + LongName: runLs(dirname, dirent), + Attrs: []interface{}{dirent}, + }) + } + return svr.sendPacket(ret) +} + +func (p sshFxpSetstatPacket) respond(svr *Server) error { + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + debug("setstat name \"%s\"", p.Path) + if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = os.Truncate(p.Path, int64(size)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = os.Chmod(p.Path, os.FileMode(mode)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(p.Path, atimeT, mtimeT) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, b, err = unmarshalUint32Safe(b); err != nil { + } else { + err = os.Chown(p.Path, int(uid), int(gid)) + } + } + + return svr.sendError(p, err) +} + +func (p sshFxpFsetstatPacket) respond(svr *Server) error { + f, ok := svr.getHandle(p.Handle) + if !ok { + return svr.sendError(p, syscall.EBADF) + } + + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + debug("fsetstat name \"%s\"", f.Name()) + if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = f.Truncate(int64(size)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = f.Chmod(os.FileMode(mode)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(f.Name(), atimeT, mtimeT) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, b, err = unmarshalUint32Safe(b); err != nil { + } else { + err = f.Chown(int(uid), int(gid)) + } + } + + return svr.sendError(p, err) +} + +// translateErrno translates a syscall error number to a SFTP error code. +func translateErrno(errno syscall.Errno) uint32 { + switch errno { + case 0: + return ssh_FX_OK + case syscall.ENOENT: + return ssh_FX_NO_SUCH_FILE + case syscall.EPERM: + return ssh_FX_PERMISSION_DENIED + } + + return ssh_FX_FAILURE +} + +func statusFromError(p id, err error) sshFxpStatusPacket { + ret := sshFxpStatusPacket{ + ID: p.id(), + StatusError: StatusError{ + // ssh_FX_OK = 0 + // ssh_FX_EOF = 1 + // ssh_FX_NO_SUCH_FILE = 2 ENOENT + // ssh_FX_PERMISSION_DENIED = 3 + // ssh_FX_FAILURE = 4 + // ssh_FX_BAD_MESSAGE = 5 + // ssh_FX_NO_CONNECTION = 6 + // ssh_FX_CONNECTION_LOST = 7 + // ssh_FX_OP_UNSUPPORTED = 8 + Code: ssh_FX_OK, + }, + } + if err != nil { + debug("statusFromError: error is %T %#v", err, err) + ret.StatusError.Code = ssh_FX_FAILURE + ret.StatusError.msg = err.Error() + if err == io.EOF { + ret.StatusError.Code = ssh_FX_EOF + } else if errno, ok := err.(syscall.Errno); ok { + ret.StatusError.Code = translateErrno(errno) + } else if pathError, ok := err.(*os.PathError); ok { + debug("statusFromError: error is %T %#v", pathError.Err, pathError.Err) + if errno, ok := pathError.Err.(syscall.Errno); ok { + ret.StatusError.Code = translateErrno(errno) + } + } + } + return ret +} + +func clamp(v, max uint32) uint32 { + if v > max { + return max + } + return v +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go new file mode 100644 index 0000000..8c01dac --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go @@ -0,0 +1,21 @@ +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness? + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024 + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go new file mode 100644 index 0000000..c26870e --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_impl.go @@ -0,0 +1,25 @@ +// +build darwin linux,!gccgo + +// fill in statvfs structure with OS specific values +// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance) + +package sftp + +import ( + "syscall" +) + +func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error { + stat := &syscall.Statfs_t{} + if err := syscall.Statfs(p.Path, stat); err != nil { + return svr.sendPacket(statusFromError(p, err)) + } + + retPkt, err := statvfsFromStatfst(stat) + if err != nil { + return svr.sendPacket(statusFromError(p, err)) + } + retPkt.ID = p.ID + + return svr.sendPacket(retPkt) +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go new file mode 100644 index 0000000..77fd1bf --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_linux.go @@ -0,0 +1,23 @@ +// +build !gccgo,linux + +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Frsize), + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Fsid: uint64(uint64(stat.Fsid.X__val[1])<<32 | uint64(stat.Fsid.X__val[0])), // endianness? + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: uint64(stat.Namelen), + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go new file mode 100644 index 0000000..1512a13 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go @@ -0,0 +1,11 @@ +// +build !darwin,!linux gccgo + +package sftp + +import ( + "syscall" +) + +func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error { + return syscall.ENOTSUP +} diff --git a/vendor/github.com/pkg/sftp/server_stubs.go b/vendor/github.com/pkg/sftp/server_stubs.go new file mode 100644 index 0000000..3b1ddbd --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_stubs.go @@ -0,0 +1,12 @@ +// +build !cgo,!plan9 windows android + +package sftp + +import ( + "os" + "path" +) + +func runLs(dirname string, dirent os.FileInfo) string { + return path.Join(dirname, dirent.Name()) +} diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go new file mode 100644 index 0000000..8c3f0b4 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_unix.go @@ -0,0 +1,143 @@ +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris +// +build cgo + +package sftp + +import ( + "fmt" + "os" + "path" + "syscall" + "time" +) + +func runLsTypeWord(dirent os.FileInfo) string { + // find first character, the type char + // b Block special file. + // c Character special file. + // d Directory. + // l Symbolic link. + // s Socket link. + // p FIFO. + // - Regular file. + tc := '-' + mode := dirent.Mode() + if (mode & os.ModeDir) != 0 { + tc = 'd' + } else if (mode & os.ModeDevice) != 0 { + tc = 'b' + if (mode & os.ModeCharDevice) != 0 { + tc = 'c' + } + } else if (mode & os.ModeSymlink) != 0 { + tc = 'l' + } else if (mode & os.ModeSocket) != 0 { + tc = 's' + } else if (mode & os.ModeNamedPipe) != 0 { + tc = 'p' + } + + // owner + orc := '-' + if (mode & 0400) != 0 { + orc = 'r' + } + owc := '-' + if (mode & 0200) != 0 { + owc = 'w' + } + oxc := '-' + ox := (mode & 0100) != 0 + setuid := (mode & os.ModeSetuid) != 0 + if ox && setuid { + oxc = 's' + } else if setuid { + oxc = 'S' + } else if ox { + oxc = 'x' + } + + // group + grc := '-' + if (mode & 040) != 0 { + grc = 'r' + } + gwc := '-' + if (mode & 020) != 0 { + gwc = 'w' + } + gxc := '-' + gx := (mode & 010) != 0 + setgid := (mode & os.ModeSetgid) != 0 + if gx && setgid { + gxc = 's' + } else if setgid { + gxc = 'S' + } else if gx { + gxc = 'x' + } + + // all / others + arc := '-' + if (mode & 04) != 0 { + arc = 'r' + } + awc := '-' + if (mode & 02) != 0 { + awc = 'w' + } + axc := '-' + ax := (mode & 01) != 0 + sticky := (mode & os.ModeSticky) != 0 + if ax && sticky { + axc = 't' + } else if sticky { + axc = 'T' + } else if ax { + axc = 'x' + } + + return fmt.Sprintf("%c%c%c%c%c%c%c%c%c%c", tc, orc, owc, oxc, grc, gwc, gxc, arc, awc, axc) +} + +func runLsStatt(dirname string, dirent os.FileInfo, statt *syscall.Stat_t) string { + // example from openssh sftp server: + // crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd + // format: + // {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name + + typeword := runLsTypeWord(dirent) + numLinks := statt.Nlink + uid := statt.Uid + gid := statt.Gid + username := fmt.Sprintf("%d", uid) + groupname := fmt.Sprintf("%d", gid) + // TODO FIXME: uid -> username, gid -> groupname lookup for ls -l format output + + mtime := dirent.ModTime() + monthStr := mtime.Month().String()[0:3] + day := mtime.Day() + year := mtime.Year() + now := time.Now() + isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) + + yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) + if isOld { + yearOrTime = fmt.Sprintf("%d", year) + } + + return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) +} + +// ls -l style output for a file, which is in the 'long output' section of a readdir response packet +// this is a very simple (lazy) implementation, just enough to look almost like openssh in a few basic cases +func runLs(dirname string, dirent os.FileInfo) string { + dsys := dirent.Sys() + if dsys == nil { + } else if statt, ok := dsys.(*syscall.Stat_t); !ok { + } else { + return runLsStatt(dirname, dirent, statt) + } + + return path.Join(dirname, dirent.Name()) +} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go new file mode 100644 index 0000000..22184af --- /dev/null +++ b/vendor/github.com/pkg/sftp/sftp.go @@ -0,0 +1,217 @@ +// Package sftp implements the SSH File Transfer Protocol as described in +// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt +package sftp + +import ( + "fmt" + + "github.com/pkg/errors" +) + +const ( + ssh_FXP_INIT = 1 + ssh_FXP_VERSION = 2 + ssh_FXP_OPEN = 3 + ssh_FXP_CLOSE = 4 + ssh_FXP_READ = 5 + ssh_FXP_WRITE = 6 + ssh_FXP_LSTAT = 7 + ssh_FXP_FSTAT = 8 + ssh_FXP_SETSTAT = 9 + ssh_FXP_FSETSTAT = 10 + ssh_FXP_OPENDIR = 11 + ssh_FXP_READDIR = 12 + ssh_FXP_REMOVE = 13 + ssh_FXP_MKDIR = 14 + ssh_FXP_RMDIR = 15 + ssh_FXP_REALPATH = 16 + ssh_FXP_STAT = 17 + ssh_FXP_RENAME = 18 + ssh_FXP_READLINK = 19 + ssh_FXP_SYMLINK = 20 + ssh_FXP_STATUS = 101 + ssh_FXP_HANDLE = 102 + ssh_FXP_DATA = 103 + ssh_FXP_NAME = 104 + ssh_FXP_ATTRS = 105 + ssh_FXP_EXTENDED = 200 + ssh_FXP_EXTENDED_REPLY = 201 +) + +const ( + ssh_FX_OK = 0 + ssh_FX_EOF = 1 + ssh_FX_NO_SUCH_FILE = 2 + ssh_FX_PERMISSION_DENIED = 3 + ssh_FX_FAILURE = 4 + ssh_FX_BAD_MESSAGE = 5 + ssh_FX_NO_CONNECTION = 6 + ssh_FX_CONNECTION_LOST = 7 + ssh_FX_OP_UNSUPPORTED = 8 + + // see draft-ietf-secsh-filexfer-13 + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 + ssh_FX_INVALID_HANDLE = 9 + ssh_FX_NO_SUCH_PATH = 10 + ssh_FX_FILE_ALREADY_EXISTS = 11 + ssh_FX_WRITE_PROTECT = 12 + ssh_FX_NO_MEDIA = 13 + ssh_FX_NO_SPACE_ON_FILESYSTEM = 14 + ssh_FX_QUOTA_EXCEEDED = 15 + ssh_FX_UNKNOWN_PRINCIPAL = 16 + ssh_FX_LOCK_CONFLICT = 17 + ssh_FX_DIR_NOT_EMPTY = 18 + ssh_FX_NOT_A_DIRECTORY = 19 + ssh_FX_INVALID_FILENAME = 20 + ssh_FX_LINK_LOOP = 21 + ssh_FX_CANNOT_DELETE = 22 + ssh_FX_INVALID_PARAMETER = 23 + ssh_FX_FILE_IS_A_DIRECTORY = 24 + ssh_FX_BYTE_RANGE_LOCK_CONFLICT = 25 + ssh_FX_BYTE_RANGE_LOCK_REFUSED = 26 + ssh_FX_DELETE_PENDING = 27 + ssh_FX_FILE_CORRUPT = 28 + ssh_FX_OWNER_INVALID = 29 + ssh_FX_GROUP_INVALID = 30 + ssh_FX_NO_MATCHING_BYTE_RANGE_LOCK = 31 +) + +const ( + ssh_FXF_READ = 0x00000001 + ssh_FXF_WRITE = 0x00000002 + ssh_FXF_APPEND = 0x00000004 + ssh_FXF_CREAT = 0x00000008 + ssh_FXF_TRUNC = 0x00000010 + ssh_FXF_EXCL = 0x00000020 +) + +type fxp uint8 + +func (f fxp) String() string { + switch f { + case ssh_FXP_INIT: + return "SSH_FXP_INIT" + case ssh_FXP_VERSION: + return "SSH_FXP_VERSION" + case ssh_FXP_OPEN: + return "SSH_FXP_OPEN" + case ssh_FXP_CLOSE: + return "SSH_FXP_CLOSE" + case ssh_FXP_READ: + return "SSH_FXP_READ" + case ssh_FXP_WRITE: + return "SSH_FXP_WRITE" + case ssh_FXP_LSTAT: + return "SSH_FXP_LSTAT" + case ssh_FXP_FSTAT: + return "SSH_FXP_FSTAT" + case ssh_FXP_SETSTAT: + return "SSH_FXP_SETSTAT" + case ssh_FXP_FSETSTAT: + return "SSH_FXP_FSETSTAT" + case ssh_FXP_OPENDIR: + return "SSH_FXP_OPENDIR" + case ssh_FXP_READDIR: + return "SSH_FXP_READDIR" + case ssh_FXP_REMOVE: + return "SSH_FXP_REMOVE" + case ssh_FXP_MKDIR: + return "SSH_FXP_MKDIR" + case ssh_FXP_RMDIR: + return "SSH_FXP_RMDIR" + case ssh_FXP_REALPATH: + return "SSH_FXP_REALPATH" + case ssh_FXP_STAT: + return "SSH_FXP_STAT" + case ssh_FXP_RENAME: + return "SSH_FXP_RENAME" + case ssh_FXP_READLINK: + return "SSH_FXP_READLINK" + case ssh_FXP_SYMLINK: + return "SSH_FXP_SYMLINK" + case ssh_FXP_STATUS: + return "SSH_FXP_STATUS" + case ssh_FXP_HANDLE: + return "SSH_FXP_HANDLE" + case ssh_FXP_DATA: + return "SSH_FXP_DATA" + case ssh_FXP_NAME: + return "SSH_FXP_NAME" + case ssh_FXP_ATTRS: + return "SSH_FXP_ATTRS" + case ssh_FXP_EXTENDED: + return "SSH_FXP_EXTENDED" + case ssh_FXP_EXTENDED_REPLY: + return "SSH_FXP_EXTENDED_REPLY" + default: + return "unknown" + } +} + +type fx uint8 + +func (f fx) String() string { + switch f { + case ssh_FX_OK: + return "SSH_FX_OK" + case ssh_FX_EOF: + return "SSH_FX_EOF" + case ssh_FX_NO_SUCH_FILE: + return "SSH_FX_NO_SUCH_FILE" + case ssh_FX_PERMISSION_DENIED: + return "SSH_FX_PERMISSION_DENIED" + case ssh_FX_FAILURE: + return "SSH_FX_FAILURE" + case ssh_FX_BAD_MESSAGE: + return "SSH_FX_BAD_MESSAGE" + case ssh_FX_NO_CONNECTION: + return "SSH_FX_NO_CONNECTION" + case ssh_FX_CONNECTION_LOST: + return "SSH_FX_CONNECTION_LOST" + case ssh_FX_OP_UNSUPPORTED: + return "SSH_FX_OP_UNSUPPORTED" + default: + return "unknown" + } +} + +type unexpectedPacketErr struct { + want, got uint8 +} + +func (u *unexpectedPacketErr) Error() string { + return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got)) +} + +func unimplementedPacketErr(u uint8) error { + return errors.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) +} + +type unexpectedIDErr struct{ want, got uint32 } + +func (u *unexpectedIDErr) Error() string { + return fmt.Sprintf("sftp: unexpected id: want %v, got %v", u.want, u.got) +} + +func unimplementedSeekWhence(whence int) error { + return errors.Errorf("sftp: unimplemented seek whence %v", whence) +} + +func unexpectedCount(want, got uint32) error { + return errors.Errorf("sftp: unexpected count: want %v, got %v", want, got) +} + +type unexpectedVersionErr struct{ want, got uint32 } + +func (u *unexpectedVersionErr) Error() string { + return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got) +} + +// A StatusError is returned when an SFTP operation fails, and provides +// additional information about the failure. +type StatusError struct { + Code uint32 + msg, lang string +} + +func (s *StatusError) Error() string { return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) } diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go index 64cc40f..003e99f 100644 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -559,10 +559,14 @@ type UnifiedDiff struct { func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() - w := func(format string, args ...interface{}) error { + wf := func(format string, args ...interface{}) error { _, err := buf.WriteString(fmt.Sprintf(format, args...)) return err } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } if len(diff.Eol) == 0 { diff.Eol = "\n" @@ -581,26 +585,28 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { if len(diff.ToDate) > 0 { toDate = "\t" + diff.ToDate } - err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } } } first, last := g[0], g[len(g)-1] range1 := formatRangeUnified(first.I1, last.I2) range2 := formatRangeUnified(first.J1, last.J2) - if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { return err } for _, c := range g { i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 if c.Tag == 'e' { for _, line := range diff.A[i1:i2] { - if err := w(" " + line); err != nil { + if err := ws(" " + line); err != nil { return err } } @@ -608,14 +614,14 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { } if c.Tag == 'r' || c.Tag == 'd' { for _, line := range diff.A[i1:i2] { - if err := w("-" + line); err != nil { + if err := ws("-" + line); err != nil { return err } } } if c.Tag == 'r' || c.Tag == 'i' { for _, line := range diff.B[j1:j2] { - if err := w("+" + line); err != nil { + if err := ws("+" + line); err != nil { return err } } @@ -669,12 +675,18 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { buf := bufio.NewWriter(writer) defer buf.Flush() var diffErr error - w := func(format string, args ...interface{}) { + wf := func(format string, args ...interface{}) { _, err := buf.WriteString(fmt.Sprintf(format, args...)) if diffErr == nil && err != nil { diffErr = err } } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } if len(diff.Eol) == 0 { diff.Eol = "\n" @@ -700,15 +712,17 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { if len(diff.ToDate) > 0 { toDate = "\t" + diff.ToDate } - w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - w("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } } first, last := g[0], g[len(g)-1] - w("***************" + diff.Eol) + ws("***************" + diff.Eol) range1 := formatRangeContext(first.I1, last.I2) - w("*** %s ****%s", range1, diff.Eol) + wf("*** %s ****%s", range1, diff.Eol) for _, c := range g { if c.Tag == 'r' || c.Tag == 'd' { for _, cc := range g { @@ -716,7 +730,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { continue } for _, line := range diff.A[cc.I1:cc.I2] { - w(prefix[cc.Tag] + line) + ws(prefix[cc.Tag] + line) } } break @@ -724,7 +738,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { } range2 := formatRangeContext(first.J1, last.J2) - w("--- %s ----%s", range2, diff.Eol) + wf("--- %s ----%s", range2, diff.Eol) for _, c := range g { if c.Tag == 'r' || c.Tag == 'i' { for _, cc := range g { @@ -732,7 +746,7 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { continue } for _, line := range diff.B[cc.J1:cc.J2] { - w(prefix[cc.Tag] + line) + ws(prefix[cc.Tag] + line) } } break diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 0000000..298f0e2 --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md new file mode 100644 index 0000000..db20817 --- /dev/null +++ b/vendor/github.com/spf13/afero/README.md @@ -0,0 +1,449 @@ +![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) + +A FileSystem Abstraction System for Go + +[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +# Overview + +Afero is an filesystem framework providing a simple, uniform and universal API +interacting with any filesystem, as an abstraction layer providing interfaces, +types and methods. Afero has an exceptionally clean interface and simple design +without needless constructors or initialization methods. + +Afero is also a library providing a base set of interoperable backend +filesystems that make it easy to work with afero while retaining all the power +and benefit of the os and ioutil packages. + +Afero provides significant improvements over using the os package alone, most +notably the ability to create mock and testing filesystems without relying on the disk. + +It is suitable for use in a any situation where you would consider using the OS +package as it provides an additional abstraction that makes it easy to use a +memory backed file system during testing. It also adds support for the http +filesystem for full interoperability. + + +## Afero Features + +* A single consistent API for accessing a variety of filesystems +* Interoperation between a variety of file system types +* A set of interfaces to encourage and enforce interoperability between backends +* An atomic cross platform memory backed file system +* Support for compositional (union) file systems by combining multiple file systems acting as one +* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) +* A set of utility functions ported from io, ioutil & hugo to be afero aware + + +# Using Afero + +Afero is easy to use and easier to adopt. + +A few different ways you could use Afero: + +* Use the interfaces alone to define you own file system. +* Wrap for the OS packages. +* Define different filesystems for different parts of your application. +* Use Afero for mock filesystems while testing + +## Step 1: Install Afero + +First use go get to install the latest version of the library. + + $ go get github.com/spf13/afero + +Next include Afero in your application. +```go +import "github.com/spf13/afero" +``` + +## Step 2: Declare a backend + +First define a package variable and set it to a pointer to a filesystem. +```go +var AppFs afero.Fs = afero.NewMemMapFs() + +or + +var AppFs afero.Fs = afero.NewOsFs() +``` +It is important to note that if you repeat the composite literal you +will be using a completely new and isolated filesystem. In the case of +OsFs it will still use the same underlying filesystem but will reduce +the ability to drop in other filesystems as desired. + +## Step 3: Use it like you would the OS package + +Throughout your application use any function and method like you normally +would. + +So if my application before had: +```go +os.Open('/tmp/foo') +``` +We would replace it with a call to `AppFs.Open('/tmp/foo')`. + +`AppFs` being the variable we defined above. + + +## List of all available functions + +File System Methods Available: +```go +Chmod(name string, mode os.FileMode) : error +Chtimes(name string, atime time.Time, mtime time.Time) : error +Create(name string) : File, error +Mkdir(name string, perm os.FileMode) : error +MkdirAll(path string, perm os.FileMode) : error +Name() : string +Open(name string) : File, error +OpenFile(name string, flag int, perm os.FileMode) : File, error +Remove(name string) : error +RemoveAll(path string) : error +Rename(oldname, newname string) : error +Stat(name string) : os.FileInfo, error +``` +File Interfaces and Methods Available: +```go +io.Closer +io.Reader +io.ReaderAt +io.Seeker +io.Writer +io.WriterAt + +Name() : string +Readdir(count int) : []os.FileInfo, error +Readdirnames(n int) : []string, error +Stat() : os.FileInfo, error +Sync() : error +Truncate(size int64) : error +WriteString(s string) : ret int, err error +``` +In some applications it may make sense to define a new package that +simply exports the file system variable for easy access from anywhere. + +## Using Afero's utility functions + +Afero provides a set of functions to make it easier to use the underlying file systems. +These functions have been primarily ported from io & ioutil with some developed for Hugo. + +The afero utilities support all afero compatible backends. + +The list of utilities includes: + +```go +DirExists(path string) (bool, error) +Exists(path string) (bool, error) +FileContainsBytes(filename string, subslice []byte) (bool, error) +GetTempDir(subPath string) string +IsDir(path string) (bool, error) +IsEmpty(path string) (bool, error) +ReadDir(dirname string) ([]os.FileInfo, error) +ReadFile(filename string) ([]byte, error) +SafeWriteReader(path string, r io.Reader) (err error) +TempDir(dir, prefix string) (name string, err error) +TempFile(dir, prefix string) (f File, err error) +Walk(root string, walkFn filepath.WalkFunc) error +WriteFile(filename string, data []byte, perm os.FileMode) error +WriteReader(path string, r io.Reader) (err error) +``` +For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) + +They are available under two different approaches to use. You can either call +them directly where the first parameter of each function will be the file +system, or you can declare a new `Afero`, a custom type used to bind these +functions as methods to a given filesystem. + +### Calling utilities directly + +```go +fs := new(afero.MemMapFs) +f, err := afero.TempFile(fs,"", "ioutil-test") + +``` + +### Calling via Afero + +```go +fs := afero.NewMemMapFs +afs := &Afero{Fs: fs} +f, err := afs.TempFile("", "ioutil-test") +``` + +## Using Afero for Testing + +There is a large benefit to using a mock filesystem for testing. It has a +completely blank state every time it is initialized and can be easily +reproducible regardless of OS. You could create files to your heart’s content +and the file access would be fast while also saving you from all the annoying +issues with deleting temporary files, Windows file locking, etc. The MemMapFs +backend is perfect for testing. + +* Much faster than performing I/O operations on disk +* Avoid security issues and permissions +* Far more control. 'rm -rf /' with confidence +* Test setup is far more easier to do +* No test cleanup needed + +One way to accomplish this is to define a variable as mentioned above. +In your application this will be set to afero.NewOsFs() during testing you +can set it to afero.NewMemMapFs(). + +It wouldn't be uncommon to have each test initialize a blank slate memory +backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere +appropriate in my application code. This approach ensures that Tests are order +independent, with no test relying on the state left by an earlier test. + +Then in my tests I would initialize a new MemMapFs for each test: +```go +func TestExist(t *testing.T) { + appFS = afero.NewMemMapFs() + // create test files and directories + appFS.MkdirAll("src/a", 0755)) + afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) + afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) + _, err := appFS.Stat("src/c") + if os.IsNotExist(err) { + t.Errorf("file \"%s\" does not exist.\n", name) + } +} + +``` + +# Available Backends + +## Operating System Native + +### OsFs + +The first is simply a wrapper around the native OS calls. This makes it +very easy to use as all of the calls are the same as the existing OS +calls. It also makes it trivial to have your code use the OS during +operation and a mock filesystem during testing or as needed. + +```go +appfs := afero.NewOsFs() +appfs.MkdirAll("src/a", 0755)) +``` + +## Memory Backed Storage + +### MemMapFs + +Afero also provides a fully atomic memory backed filesystem perfect for use in +mocking and to speed up unnecessary disk io when persistence isn’t +necessary. It is fully concurrent and will work within go routines +safely. + +```go +mm := afero.NewMemMapFs() +mm.MkdirAll("src/a", 0755)) +``` + +#### InMemoryFile + +As part of MemMapFs, Afero also provides an atomic, fully concurrent memory +backed file implementation. This can be used in other memory backed file +systems with ease. Plans are to add a radix tree memory stored file +system using InMemoryFile. + +## Network Interfaces + +### SftpFs + +Afero has experimental support for secure file transfer protocol (sftp). Which can +be used to perform file operations over a encrypted channel. + +## Filtering Backends + +### BasePathFs + +The BasePathFs restricts all operations to a given path within an Fs. +The given file name to the operations on this Fs will be prepended with +the base path before calling the source Fs. + +```go +bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") +``` + +### ReadOnlyFs + +A thin wrapper around the source Fs providing a read only view. + +```go +fs := afero.NewReadOnlyFs(afero.NewOsFs()) +_, err := fs.Create("/file.txt") +// err = syscall.EPERM +``` + +# RegexpFs + +A filtered view on file names, any file NOT matching +the passed regexp will be treated as non-existing. +Files not matching the regexp provided will not be created. +Directories are not filtered. + +```go +fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) +_, err := fs.Create("/file.html") +// err = syscall.ENOENT +``` + +### HttpFs + +Afero provides an http compatible backend which can wrap any of the existing +backends. + +The Http package requires a slightly specific version of Open which +returns an http.File type. + +Afero provides an httpFs file system which satisfies this requirement. +Any Afero FileSystem can be used as an httpFs. + +```go +httpFs := afero.NewHttpFs() +fileserver := http.FileServer(httpFs.Dir())) +http.Handle("/", fileserver) +``` + +## Composite Backends + +Afero provides the ability have two filesystems (or more) act as a single +file system. + +### CacheOnReadFs + +The CacheOnReadFs will lazily make copies of any accessed files from the base +layer into the overlay. Subsequent reads will be pulled from the overlay +directly permitting the request is within the cache duration of when it was +created in the overlay. + +If the base filesystem is writeable, any changes to files will be +done first to the base, then to the overlay layer. Write calls to open file +handles like `Write()` or `Truncate()` to the overlay first. + +To writing files to the overlay only, you can use the overlay Fs directly (not +via the union Fs). + +Cache files in the layer for the given time.Duration, a cache duration of 0 +means "forever" meaning the file will not be re-requested from the base ever. + +A read-only base will make the overlay also read-only but still copy files +from the base to the overlay when they're not present (or outdated) in the +caching layer. + +```go +base := afero.NewOsFs() +layer := afero.NewMemMapFs() +ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +``` + +### CopyOnWriteFs() + +The CopyOnWriteFs is a read only base file system with a potentially +writeable layer on top. + +Read operations will first look in the overlay and if not found there, will +serve the file from the base. + +Changes to the file system will only be made in the overlay. + +Any attempt to modify a file found only in the base will copy the file to the +overlay layer before modification (including opening a file with a writable +handle). + +Removing and Renaming files present only in the base layer is not currently +permitted. If a file is present in the base layer and the overlay, only the +overlay will be removed/renamed. + +```go + base := afero.NewOsFs() + roBase := afero.NewReadOnlyFs(base) + ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) + + fh, _ = ufs.Create("/home/test/file2.txt") + fh.WriteString("This is a test") + fh.Close() +``` + +In this example all write operations will only occur in memory (MemMapFs) +leaving the base filesystem (OsFs) untouched. + + +## Desired/possible backends + +The following is a short list of possible backends we hope someone will +implement: + +* SSH +* ZIP +* TAR +* S3 + +# About the project + +## What's in the name + +Afero comes from the latin roots Ad-Facere. + +**"Ad"** is a prefix meaning "to". + +**"Facere"** is a form of the root "faciō" making "make or do". + +The literal meaning of afero is "to make" or "to do" which seems very fitting +for a library that allows one to make files and directories and do things with them. + +The English word that shares the same roots as Afero is "affair". Affair shares +the same concept but as a noun it means "something that is made or done" or "an +object of a particular type". + +It's also nice that unlike some of my other libraries (hugo, cobra, viper) it +Googles very well. + +## Release Notes + +* **0.10.0** 2015.12.10 + * Full compatibility with Windows + * Introduction of afero utilities + * Test suite rewritten to work cross platform + * Normalize paths for MemMapFs + * Adding Sync to the file interface + * **Breaking Change** Walk and ReadDir have changed parameter order + * Moving types used by MemMapFs to a subpackage + * General bugfixes and improvements +* **0.9.0** 2015.11.05 + * New Walk function similar to filepath.Walk + * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC + * MemMapFs.Remove now really deletes the file + * InMemoryFile.Readdir and Readdirnames work correctly + * InMemoryFile functions lock it for concurrent access + * Test suite improvements +* **0.8.0** 2014.10.28 + * First public version + * Interfaces feel ready for people to build using + * Interfaces satisfy all known uses + * MemMapFs passes the majority of the OS test suite + * OsFs passes the majority of the OS test suite + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13) +* [jaqx0r](https://github.com/jaqx0r) +* [mbertschler](https://github.com/mbertschler) +* [xor-gate](https://github.com/xor-gate) + +## License + +Afero is released under the Apache 2.0 license. See +[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 0000000..e3df3f4 --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,108 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and all any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + //Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + //Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml new file mode 100644 index 0000000..006f315 --- /dev/null +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -0,0 +1,15 @@ +version: '{build}' +clone_folder: C:\gopath\src\github.com\spf13\afero +environment: + GOPATH: C:\gopath +build_script: +- cmd: >- + go version + + go env + + go get -v github.com/spf13/afero/... + + go build github.com/spf13/afero +test_script: +- cmd: go test -v github.com/spf13/afero diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 0000000..6ec6ca9 --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,145 @@ +package afero + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return "", err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return &os.PathError{"realPath", name, errors.New("got a real OS path instead of a virtual")} + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{"chtimes", name, err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{"chmod", name, err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{"stat", name, err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{"rename", oldname, err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{"rename", newname, err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{"remove_all", name, err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{"remove", name, err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{"openfile", name, err} + } + return b.source.OpenFile(name, flag, mode) +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{"open", name, err} + } + return b.source.Open(name) +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{"mkdir", name, err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{"mkdir", name, err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{"create", name, err} + } + return b.source.Create(name) +} + +// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 0000000..d742425 --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,296 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + cacheUnknown cacheState = iota + // not present in the overlay, unknown if it exists in the base: + cacheMiss + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT { + return cacheMiss, nil, nil + } + var ok bool + if err, ok = err.(*os.PathError); ok { + if err == os.ErrNotExist { + return cacheMiss, nil, nil + } + } + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyToLayer(name); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{base: bfi, layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{base: bfile, layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{base: bfh, layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 0000000..5728243 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 0000000..968fc27 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,25 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +build !darwin +// +build !openbsd +// +build !freebsd +// +build !dragonfly +// +build !netbsd + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 0000000..ed692ae --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,253 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes() and Chmod()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + origErr := err + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == syscall.ENOENT || err == syscall.ENOTDIR { + return u.base.Stat(name) + } + return nil, origErr + } + return fi, nil +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{base: bfile, layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return syscall.EEXIST + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return syscall.EEXIST + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) +} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 0000000..c421936 --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 0000000..5c3a3d8 --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,230 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, prefix string) (f File, err error) { + return TempFile(a.Fs, dir, prefix) +} + +func TempFile(fs Fs, dir, prefix string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.Mkdir(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 0000000..e104013 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 0000000..03a57ee --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 0000000..9096ff0 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,285 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "os" + "path/filepath" + "sync" + "sync/atomic" +) + +import "time" + +const FilePathSeparator = string(filepath.Separator) + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time +} + +func (d FileData) Name() string { + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true} +} + +func ChangeFileName(f *FileData, newname string) { + f.name = newname +} + +func SetMode(f *FileData, mode os.FileMode) { + f.mode = mode +} + +func SetModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + SetModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + f.fileData.Lock() + defer f.fileData.Unlock() + return f.fileData.name +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed == true { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Read(b) +} + +func (f *File) Truncate(size int64) error { + if f.closed == true { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{"truncate", f.fileData.name, errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + SetModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed == true { + return 0, ErrFileClosed + } + switch whence { + case 0: + atomic.StoreInt64(&f.at, offset) + case 1: + atomic.AddInt64(&f.at, int64(offset)) + case 2: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.readOnly { + return 0, &os.PathError{"write", f.fileData.name, errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + SetModTime(f.fileData, time.Now()) + + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + _, name := filepath.Split(s.name) + return name +} +func (s *FileInfo) Mode() os.FileMode { return s.mode } +func (s *FileInfo) ModTime() time.Time { return s.modtime } +func (s *FileInfo) IsDir() bool { return s.dir } +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("Out of range") + ErrTooLarge = errors.New("Too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 0000000..2e259b8 --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,349 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +var memfsInit sync.Once + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) + }) + return m.data +} + +func (MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + mem.RemoveFromMemDir(parent, f) + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, 0777) + if err != nil { + //log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + //log.Println("Open after Mkdir error:", err) + return + } + } + + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{"mkdir", name, ErrFileExists} + } else { + m.mu.Lock() + item := mem.CreateDir(name) + m.getData()[name] = item + m.registerWithParent(item) + m.mu.Unlock() + } + return nil +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } else { + return err + } + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{"open", name, ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + file, err := m.openWrite(name) + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{"remove", name, err} + } + delete(m.getData(), name) + } else { + return &os.PathError{"remove", name, os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p, _ := range m.getData() { + if strings.HasPrefix(p, path) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{"rename", oldname, ErrFileNotFound} + } + return nil +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + name = normalizePath(name) + f, ok := m.getData()[name] + if !ok { + return &os.PathError{"chmod", name, ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + f, ok := m.getData()[name] + if !ok { + return &os.PathError{"chtimes", name, ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{x} + fmt.Println(x.Name(), y.Size()) + } +} + +func debugMemMapList(fs Fs) { + if x, ok := fs.(*MemMapFs); ok { + x.List() + } +} diff --git a/vendor/github.com/spf13/afero/memradix.go b/vendor/github.com/spf13/afero/memradix.go new file mode 100644 index 0000000..87527f3 --- /dev/null +++ b/vendor/github.com/spf13/afero/memradix.go @@ -0,0 +1,14 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 0000000..6b8bce1 --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,94 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 0000000..1d90e46 --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,108 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfOs(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem is OsFs use Lstat, else use fs.Stat +func lstatIfOs(fs Fs, path string) (info os.FileInfo, err error) { + _, ok := fs.(*OsFs) + if ok { + info, err = os.Lstat(path) + } else { + info, err = fs.Stat(path) + } + return +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfOs(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 0000000..f1fa55b --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,70 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 0000000..9d92dbc --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,214 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +// +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/sftp.go b/vendor/github.com/spf13/afero/sftp.go new file mode 100644 index 0000000..a095a54 --- /dev/null +++ b/vendor/github.com/spf13/afero/sftp.go @@ -0,0 +1,128 @@ +// Copyright © 2015 Jerry Jacobs . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" + + "github.com/spf13/afero/sftp" + + "github.com/pkg/sftp" +) + +// SftpFs is a Fs implementation that uses functions provided by the sftp package. +// +// For details in any method, check the documentation of the sftp package +// (github.com/pkg/sftp). +type SftpFs struct{ + SftpClient *sftp.Client +} + +func (s SftpFs) Name() string { return "SftpFs" } + +func (s SftpFs) Create(name string) (File, error) { + f, err := sftpfs.FileCreate(s.SftpClient, name) + return f, err +} + +func (s SftpFs) Mkdir(name string, perm os.FileMode) error { + err := s.SftpClient.Mkdir(name) + if err != nil { + return err + } + return s.SftpClient.Chmod(name, perm) +} + +func (s SftpFs) MkdirAll(path string, perm os.FileMode) error { + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := s.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return err + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = s.MkdirAll(path[0:j-1], perm) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = s.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := s.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +func (s SftpFs) Open(name string) (File, error) { + f, err := sftpfs.FileOpen(s.SftpClient, name) + return f, err +} + +func (s SftpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return nil,nil +} + +func (s SftpFs) Remove(name string) error { + return s.SftpClient.Remove(name) +} + +func (s SftpFs) RemoveAll(path string) error { + // TODO have a look at os.RemoveAll + // https://github.com/golang/go/blob/master/src/os/path.go#L66 + return nil +} + +func (s SftpFs) Rename(oldname, newname string) error { + return s.SftpClient.Rename(oldname, newname) +} + +func (s SftpFs) Stat(name string) (os.FileInfo, error) { + return s.SftpClient.Stat(name) +} + +func (s SftpFs) Lstat(p string) (os.FileInfo, error) { + return s.SftpClient.Lstat(p) +} + +func (s SftpFs) Chmod(name string, mode os.FileMode) error { + return s.SftpClient.Chmod(name, mode) +} + +func (s SftpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return s.SftpClient.Chtimes(name, atime, mtime) +} diff --git a/vendor/github.com/spf13/afero/sftp/file.go b/vendor/github.com/spf13/afero/sftp/file.go new file mode 100644 index 0000000..bab4abc --- /dev/null +++ b/vendor/github.com/spf13/afero/sftp/file.go @@ -0,0 +1,95 @@ +// Copyright © 2015 Jerry Jacobs . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sftpfs + +import ( + "os" + "github.com/pkg/sftp" +) + +type File struct { + fd *sftp.File +} + +func FileOpen(s *sftp.Client, name string) (*File, error) { + fd, err := s.Open(name) + if err != nil { + return &File{}, err + } + return &File{fd: fd}, nil +} + +func FileCreate(s *sftp.Client, name string) (*File, error) { + fd, err := s.Create(name) + if err != nil { + return &File{}, err + } + return &File{fd: fd}, nil +} + +func (f *File) Close() error { + return f.fd.Close() +} + +func (f *File) Name() string { + return f.fd.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return f.fd.Stat() +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Truncate(size int64) error { + return f.fd.Truncate(size) +} + +func (f *File) Read(b []byte) (n int, err error) { + return f.fd.Read(b) +} + +// TODO +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + return 0,nil +} + +// TODO +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + return nil,nil +} + +// TODO +func (f *File) Readdirnames(n int) (names []string, err error) { + return nil,nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + return f.fd.Seek(offset, whence) +} + +func (f *File) Write(b []byte) (n int, err error) { + return f.fd.Write(b) +} + +// TODO +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + return 0,nil +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.fd.Write([]byte(s)) +} diff --git a/vendor/github.com/spf13/afero/sftp_test_go b/vendor/github.com/spf13/afero/sftp_test_go new file mode 100644 index 0000000..bb00535 --- /dev/null +++ b/vendor/github.com/spf13/afero/sftp_test_go @@ -0,0 +1,286 @@ +// Copyright © 2015 Jerry Jacobs . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "testing" + "os" + "log" + "fmt" + "net" + "flag" + "time" + "io/ioutil" + "crypto/rsa" + _rand "crypto/rand" + "encoding/pem" + "crypto/x509" + + "golang.org/x/crypto/ssh" + "github.com/pkg/sftp" +) + +type SftpFsContext struct { + sshc *ssh.Client + sshcfg *ssh.ClientConfig + sftpc *sftp.Client +} + +// TODO we only connect with hardcoded user+pass for now +// it should be possible to use $HOME/.ssh/id_rsa to login into the stub sftp server +func SftpConnect(user, password, host string) (*SftpFsContext, error) { +/* + pemBytes, err := ioutil.ReadFile(os.Getenv("HOME") + "/.ssh/id_rsa") + if err != nil { + return nil,err + } + + signer, err := ssh.ParsePrivateKey(pemBytes) + if err != nil { + return nil,err + } + + sshcfg := &ssh.ClientConfig{ + User: user, + Auth: []ssh.AuthMethod{ + ssh.Password(password), + ssh.PublicKeys(signer), + }, + } +*/ + + sshcfg := &ssh.ClientConfig{ + User: user, + Auth: []ssh.AuthMethod{ + ssh.Password(password), + }, + } + + sshc, err := ssh.Dial("tcp", host, sshcfg) + if err != nil { + return nil,err + } + + sftpc, err := sftp.NewClient(sshc) + if err != nil { + return nil,err + } + + ctx := &SftpFsContext{ + sshc: sshc, + sshcfg: sshcfg, + sftpc: sftpc, + } + + return ctx,nil +} + +func (ctx *SftpFsContext) Disconnect() error { + ctx.sftpc.Close() + ctx.sshc.Close() + return nil +} + +// TODO for such a weird reason rootpath is "." when writing "file1" with afero sftp backend +func RunSftpServer(rootpath string) { + var ( + readOnly bool + debugLevelStr string + debugLevel int + debugStderr bool + rootDir string + ) + + flag.BoolVar(&readOnly, "R", false, "read-only server") + flag.BoolVar(&debugStderr, "e", true, "debug to stderr") + flag.StringVar(&debugLevelStr, "l", "none", "debug level") + flag.StringVar(&rootDir, "root", rootpath, "root directory") + flag.Parse() + + debugStream := ioutil.Discard + if debugStderr { + debugStream = os.Stderr + debugLevel = 1 + } + + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + fmt.Fprintf(debugStream, "Login: %s\n", c.User()) + if c.User() == "test" && string(pass) == "test" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + } + + privateBytes, err := ioutil.ReadFile("./test/id_rsa") + if err != nil { + log.Fatal("Failed to load private key", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key", err) + } + + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + log.Fatal("failed to listen for connection", err) + } + fmt.Printf("Listening on %v\n", listener.Addr()) + + nConn, err := listener.Accept() + if err != nil { + log.Fatal("failed to accept incoming connection", err) + } + + // Before use, a handshake must be performed on the incoming + // net.Conn. + _, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + log.Fatal("failed to handshake", err) + } + fmt.Fprintf(debugStream, "SSH server established\n") + + // The incoming Request channel must be serviced. + go ssh.DiscardRequests(reqs) + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of an SFTP session, this is "subsystem" + // with a payload string of "sftp" + fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType()) + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType()) + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + log.Fatal("could not accept channel.", err) + } + fmt.Fprintf(debugStream, "Channel accepted\n") + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "subsystem" request. + go func(in <-chan *ssh.Request) { + for req := range in { + fmt.Fprintf(debugStream, "Request: %v\n", req.Type) + ok := false + switch req.Type { + case "subsystem": + fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:]) + if string(req.Payload[4:]) == "sftp" { + ok = true + } + } + fmt.Fprintf(debugStream, " - accepted: %v\n", ok) + req.Reply(ok, nil) + } + }(requests) + + server, err := sftp.NewServer(channel, channel, debugStream, debugLevel, readOnly, rootpath) + if err != nil { + log.Fatal(err) + } + if err := server.Serve(); err != nil { + log.Fatal("sftp server completed with error:", err) + } + } +} + +// MakeSSHKeyPair make a pair of public and private keys for SSH access. +// Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file. +// Private Key generated is PEM encoded +func MakeSSHKeyPair(bits int, pubKeyPath, privateKeyPath string) error { + privateKey, err := rsa.GenerateKey(_rand.Reader, bits) + if err != nil { + return err + } + + // generate and write private key as PEM + privateKeyFile, err := os.Create(privateKeyPath) + defer privateKeyFile.Close() + if err != nil { + return err + } + + privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)} + if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil { + return err + } + + // generate and write public key + pub, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return err + } + + return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0655) +} + +func TestSftpCreate(t *testing.T) { + os.Mkdir("./test", 0777) + MakeSSHKeyPair(1024, "./test/id_rsa.pub", "./test/id_rsa") + + go RunSftpServer("./test/") + time.Sleep(5 * time.Second) + + ctx, err := SftpConnect("test", "test", "localhost:2022") + if err != nil { + t.Fatal(err) + } + defer ctx.Disconnect() + + var AppFs Fs = SftpFs{ + SftpClient: ctx.sftpc, + } + + AppFs.MkdirAll("test/dir1/dir2/dir3", os.FileMode(0777)) + AppFs.Mkdir("test/foo", os.FileMode(0000)) + AppFs.Chmod("test/foo", os.FileMode(0700)) + AppFs.Mkdir("test/bar", os.FileMode(0777)) + + file, err := AppFs.Create("file1") + if err != nil { + t.Error(err) + } + defer file.Close() + + file.Write([]byte("hello\t")) + file.WriteString("world!\n") + + f1, err := AppFs.Open("file1") + if err != nil { + log.Fatalf("open: %v", err) + } + defer f1.Close() + + b := make([]byte, 100) + + _, err = f1.Read(b) + fmt.Println(string(b)) + + // TODO check here if "hello\tworld\n" is in buffer b +} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 0000000..99f9e5d --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,274 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + base File + layer File + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.base != nil { + f.base.Close() + } + if f.layer != nil { + return f.layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.layer != nil { + n, err := f.layer.Read(s) + if (err == nil || err == io.EOF) && f.base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.base != nil { + return f.base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.layer != nil { + n, err := f.layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.base != nil { + _, err = f.base.Seek(o+int64(n), os.SEEK_SET) + } + return n, err + } + if f.base != nil { + return f.base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.layer != nil { + pos, err = f.layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.base != nil { + _, err = f.base.Seek(o, w) + } + return pos, err + } + if f.base != nil { + return f.base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.layer != nil { + n, err = f.layer.Write(s) + if err == nil && f.base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.base.Write(s) + } + return n, err + } + if f.base != nil { + return f.base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.layer != nil { + n, err = f.layer.WriteAt(s, o) + if err == nil && f.base != nil { + _, err = f.base.WriteAt(s, o) + } + return n, err + } + if f.base != nil { + return f.base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.layer != nil { + return f.layer.Name() + } + return f.base.Name() +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + if f.off == 0 { + var files = make(map[string]os.FileInfo) + var rfi []os.FileInfo + if f.layer != nil { + rfi, err = f.layer.Readdir(-1) + if err != nil { + return nil, err + } + for _, fi := range rfi { + files[fi.Name()] = fi + } + } + + if f.base != nil { + rfi, err = f.base.Readdir(-1) + if err != nil { + return nil, err + } + for _, fi := range rfi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + } + for _, fi := range files { + f.files = append(f.files, fi) + } + } + if c == -1 { + return f.files[f.off:], nil + } + defer func() { f.off += c }() + return f.files[f.off:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.layer != nil { + return f.layer.Stat() + } + if f.base != nil { + return f.base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.layer != nil { + err = f.layer.Sync() + if err == nil && f.base != nil { + err = f.base.Sync() + } + return err + } + if f.base != nil { + return f.base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.layer != nil { + err = f.layer.Truncate(s) + if err == nil && f.base != nil { + err = f.base.Truncate(s) + } + return err + } + if f.base != nil { + return f.base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.layer != nil { + n, err = f.layer.WriteString(s) + if err == nil && f.base != nil { + _, err = f.base.WriteString(s) + } + return n, err + } + if f.base != nil { + return f.base.WriteString(s) + } + return 0, BADFD +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 0000000..2f44e6a --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,331 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + log.Panicln(err) + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plan forms +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func isMn(r rune) bool { + return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go index 2377896..863585b 100644 --- a/vendor/github.com/spf13/cast/caste.go +++ b/vendor/github.com/spf13/cast/caste.go @@ -19,7 +19,7 @@ import ( // ToTimeE casts an empty interface to time.Time. func ToTimeE(i interface{}) (tim time.Time, err error) { i = indirect(i) - jww.DEBUG.Println("ToTimeE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToTimeE called on type:", reflect.TypeOf(i)) switch s := i.(type) { case time.Time: @@ -38,7 +38,7 @@ func ToTimeE(i interface{}) (tim time.Time, err error) { // ToDurationE casts an empty interface to time.Duration. func ToDurationE(i interface{}) (d time.Duration, err error) { i = indirect(i) - jww.DEBUG.Println("ToDurationE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToDurationE called on type:", reflect.TypeOf(i)) switch s := i.(type) { case time.Duration: @@ -61,7 +61,7 @@ func ToDurationE(i interface{}) (d time.Duration, err error) { // ToBoolE casts an empty interface to a bool. func ToBoolE(i interface{}) (bool, error) { i = indirect(i) - jww.DEBUG.Println("ToBoolE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToBoolE called on type:", reflect.TypeOf(i)) switch b := i.(type) { case bool: @@ -83,7 +83,7 @@ func ToBoolE(i interface{}) (bool, error) { // ToFloat64E casts an empty interface to a float64. func ToFloat64E(i interface{}) (float64, error) { i = indirect(i) - jww.DEBUG.Println("ToFloat64E called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToFloat64E called on type:", reflect.TypeOf(i)) switch s := i.(type) { case float64: @@ -114,7 +114,7 @@ func ToFloat64E(i interface{}) (float64, error) { // ToInt64E casts an empty interface to an int64. func ToInt64E(i interface{}) (int64, error) { i = indirect(i) - jww.DEBUG.Println("ToInt64E called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToInt64E called on type:", reflect.TypeOf(i)) switch s := i.(type) { case int64: @@ -150,7 +150,7 @@ func ToInt64E(i interface{}) (int64, error) { // ToIntE casts an empty interface to an int. func ToIntE(i interface{}) (int, error) { i = indirect(i) - jww.DEBUG.Println("ToIntE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToIntE called on type:", reflect.TypeOf(i)) switch s := i.(type) { case int: @@ -225,7 +225,7 @@ func indirectToStringerOrError(a interface{}) interface{} { // ToStringE casts an empty interface to a string. func ToStringE(i interface{}) (string, error) { i = indirectToStringerOrError(i) - jww.DEBUG.Println("ToStringE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToStringE called on type:", reflect.TypeOf(i)) switch s := i.(type) { case string: @@ -263,7 +263,7 @@ func ToStringE(i interface{}) (string, error) { // ToStringMapStringE casts an empty interface to a map[string]string. func ToStringMapStringE(i interface{}) (map[string]string, error) { - jww.DEBUG.Println("ToStringMapStringE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToStringMapStringE called on type:", reflect.TypeOf(i)) var m = map[string]string{} @@ -292,7 +292,7 @@ func ToStringMapStringE(i interface{}) (map[string]string, error) { // ToStringMapStringSliceE casts an empty interface to a map[string][]string. func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - jww.DEBUG.Println("ToStringMapStringSliceE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToStringMapStringSliceE called on type:", reflect.TypeOf(i)) var m = map[string][]string{} @@ -348,7 +348,7 @@ func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { // ToStringMapBoolE casts an empty interface to a map[string]bool. func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - jww.DEBUG.Println("ToStringMapBoolE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToStringMapBoolE called on type:", reflect.TypeOf(i)) var m = map[string]bool{} @@ -372,7 +372,7 @@ func ToStringMapBoolE(i interface{}) (map[string]bool, error) { // ToStringMapE casts an empty interface to a map[string]interface{}. func ToStringMapE(i interface{}) (map[string]interface{}, error) { - jww.DEBUG.Println("ToStringMapE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToStringMapE called on type:", reflect.TypeOf(i)) var m = map[string]interface{}{} @@ -391,7 +391,7 @@ func ToStringMapE(i interface{}) (map[string]interface{}, error) { // ToSliceE casts an empty interface to a []interface{}. func ToSliceE(i interface{}) ([]interface{}, error) { - jww.DEBUG.Println("ToSliceE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToSliceE called on type:", reflect.TypeOf(i)) var s []interface{} @@ -413,7 +413,7 @@ func ToSliceE(i interface{}) ([]interface{}, error) { // ToStringSliceE casts an empty interface to a []string. func ToStringSliceE(i interface{}) ([]string, error) { - jww.DEBUG.Println("ToStringSliceE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToStringSliceE called on type:", reflect.TypeOf(i)) var a []string @@ -440,7 +440,7 @@ func ToStringSliceE(i interface{}) ([]string, error) { // ToIntSliceE casts an empty interface to a []int. func ToIntSliceE(i interface{}) ([]int, error) { - jww.DEBUG.Println("ToIntSliceE called on type:", reflect.TypeOf(i)) + jww.TRACE.Println("ToIntSliceE called on type:", reflect.TypeOf(i)) if i == nil { return []int{}, fmt.Errorf("Unable to Cast %#v to []int", i) diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 965df13..eb143d7 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -419,20 +419,26 @@ func (f *FlagSet) PrintDefaults() { fmt.Fprintf(f.out(), "%s", usages) } -// isZeroValue guesses whether the string represents the zero -// value for a flag. It is not accurate but in practice works OK. -func isZeroValue(value string) bool { - switch value { - case "false": - return true - case "": - return true - case "": - return true - case "0": +// defaultIsZeroValue returns true if the default value for this flag represents +// a zero value. +func (f *Flag) defaultIsZeroValue() bool { + switch f.Value.(type) { + case boolFlag: + return f.DefValue == "false" + case *durationValue: + // Beginning in Go 1.7, duration zero values are "0s" + return f.DefValue == "0" || f.DefValue == "0s" + case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: + return f.DefValue == "0" + case *stringValue: + return f.DefValue == "" + case *ipValue, *ipMaskValue, *ipNetValue: + return f.DefValue == "" + case *intSliceValue, *stringSliceValue: + return f.DefValue == "[]" + default: return true } - return false } // UnquoteUsage extracts a back-quoted name from the usage @@ -455,22 +461,19 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { break // Only one back quote; use type name. } } - // No explicit name, so use type if we can find one. - name = "value" - switch flag.Value.(type) { - case boolFlag: + + name = flag.Value.Type() + switch name { + case "bool": name = "" - case *durationValue: - name = "duration" - case *float64Value: + case "float64": name = "float" - case *intValue, *int64Value: + case "int64": name = "int" - case *stringValue: - name = "string" - case *uintValue, *uint64Value: + case "uint64": name = "uint" } + return } @@ -519,7 +522,7 @@ func (f *FlagSet) FlagUsages() string { } line += usage - if !isZeroValue(flag.DefValue) { + if !flag.defaultIsZeroValue() { if flag.Value.Type() == "string" { line += fmt.Sprintf(" (default %q)", flag.DefValue) } else { diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index b53648b..927a440 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -1,6 +1,7 @@ package pflag import ( + "bytes" "encoding/csv" "fmt" "strings" @@ -21,10 +22,17 @@ func newStringSliceValue(val []string, p *[]string) *stringSliceValue { return ssv } -func (s *stringSliceValue) Set(val string) error { +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } stringReader := strings.NewReader(val) csvReader := csv.NewReader(stringReader) - v, err := csvReader.Read() + return csvReader.Read() +} + +func (s *stringSliceValue) Set(val string) error { + v, err := readAsCSV(val) if err != nil { return err } @@ -41,7 +49,13 @@ func (s *stringSliceValue) Type() string { return "stringSlice" } -func (s *stringSliceValue) String() string { return "[" + strings.Join(*s.value, ",") + "]" } +func (s *stringSliceValue) String() string { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + w.Write(*s.value) + w.Flush() + return "[" + strings.TrimSuffix(b.String(), fmt.Sprintln()) + "]" +} func stringSliceConv(sval string) (interface{}, error) { sval = strings.Trim(sval, "[]") @@ -49,8 +63,7 @@ func stringSliceConv(sval string) (interface{}, error) { if len(sval) == 0 { return []string{}, nil } - v := strings.Split(sval, ",") - return v, nil + return readAsCSV(sval) } // GetStringSlice return the []string value of a flag with the given name diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index cad242d..4ebd8dd 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -10,7 +10,7 @@ Many Go projects are built using Viper including: * [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) * [Docker Notary](https://github.com/docker/Notary) * [BloomApi](https://www.bloomapi.com/) -* [DOIt](https://github.com/bryanl/doit) +* [doctl(https://github.com/digitalocean/doctl) [![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index 0cc4553..fe6cb45 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -21,9 +21,9 @@ import ( "strings" "unicode" - "github.com/BurntSushi/toml" "github.com/hashicorp/hcl" "github.com/magiconair/properties" + toml "github.com/pelletier/go-toml" "github.com/spf13/cast" jww "github.com/spf13/jwalterweatherman" "gopkg.in/yaml.v2" @@ -77,7 +77,7 @@ func absPathify(inPath string) string { // Check if File / Directory Exists func exists(path string) (bool, error) { - _, err := os.Stat(path) + _, err := v.fs.Stat(path) if err == nil { return true, nil } @@ -155,9 +155,14 @@ func unmarshallConfigReader(in io.Reader, c map[string]interface{}, configType s } case "toml": - if _, err := toml.Decode(buf.String(), &c); err != nil { + tree, err := toml.LoadReader(buf) + if err != nil { return ConfigParseError{err} } + tmap := tree.ToMap() + for k, v := range tmap { + c[k] = v + } case "properties", "props", "prop": var p *properties.Properties diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index a2633b0..f17790e 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -23,7 +23,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -33,6 +32,7 @@ import ( "github.com/fsnotify/fsnotify" "github.com/mitchellh/mapstructure" + "github.com/spf13/afero" "github.com/spf13/cast" jww "github.com/spf13/jwalterweatherman" "github.com/spf13/pflag" @@ -132,6 +132,9 @@ type Viper struct { // A set of paths to look for the config file in configPaths []string + // The filesystem to read config from. + fs afero.Fs + // A set of remote providers to search for the configuration remoteProviders []*defaultRemoteProvider @@ -161,6 +164,7 @@ func New() *Viper { v := new(Viper) v.keyDelim = "." v.configName = "config" + v.fs = afero.NewOsFs() v.config = make(map[string]interface{}) v.override = make(map[string]interface{}) v.defaults = make(map[string]interface{}) @@ -446,6 +450,11 @@ func (v *Viper) SetTypeByDefaultValue(enable bool) { v.typeByDefValue = enable } +// GetViper gets the global Viper instance. +func GetViper() *Viper { + return v +} + // Viper is essentially repository for configurations // Get can retrieve any value given the key to use // Get has the behavior of returning the value associated with the first @@ -551,6 +560,12 @@ func (v *Viper) GetInt(key string) int { return cast.ToInt(v.Get(key)) } +// Returns the value associated with the key as an integer +func GetInt64(key string) int64 { return v.GetInt64(key) } +func (v *Viper) GetInt64(key string) int64 { + return cast.ToInt64(v.Get(key)) +} + // Returns the value associated with the key as a float64 func GetFloat64(key string) float64 { return v.GetFloat64(key) } func (v *Viper) GetFloat64(key string) float64 { @@ -660,8 +675,8 @@ func (v *Viper) BindPFlags(flags *pflag.FlagSet) (err error) { return v.BindFlagValues(pflagValueSet{flags}) } -// Bind a specific key to a pflag (as used by cobra) -// Example(where serverCmd is a Cobra instance): +// Bind a specific key to a pflag (as used by cobra). +// Example (where serverCmd is a Cobra instance): // // serverCmd.Flags().Int("port", 1138, "Port to run Application server on") // Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) @@ -930,7 +945,7 @@ func (v *Viper) ReadInConfig() error { return UnsupportedConfigError(v.getConfigType()) } - file, err := ioutil.ReadFile(v.getConfigFile()) + file, err := afero.ReadFile(v.fs, v.getConfigFile()) if err != nil { return err } @@ -948,7 +963,7 @@ func (v *Viper) MergeInConfig() error { return UnsupportedConfigError(v.getConfigType()) } - file, err := ioutil.ReadFile(v.getConfigFile()) + file, err := afero.ReadFile(v.fs, v.getConfigFile()) if err != nil { return err } @@ -1202,12 +1217,19 @@ func (v *Viper) AllSettings() map[string]interface{} { return m } +// Se the filesystem to use to read configuration. +func SetFs(fs afero.Fs) { v.SetFs(fs) } +func (v *Viper) SetFs(fs afero.Fs) { + v.fs = fs +} + // Name for the config file. // Does not include extension. func SetConfigName(in string) { v.SetConfigName(in) } func (v *Viper) SetConfigName(in string) { if in != "" { v.configName = in + v.configFile = "" } } diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go index 6671c98..6d709b5 100644 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -67,6 +67,8 @@ type Channel interface { // boolean, otherwise the return value will be false. Channel // requests are out-of-band messages so they may be sent even // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. SendRequest(name string, wantReply bool, payload []byte) (bool, error) // Stderr returns an io.ReadWriter that writes to this channel diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go index 979d919..e786f2f 100644 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -23,7 +23,6 @@ func (e *OpenChannelError) Error() string { // ConnMetadata holds metadata for the connection. type ConnMetadata interface { // User returns the user ID for this connection. - // It is empty if no authentication is used. User() string // SessionID returns the sesson hash, also denoted by H. diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index e73a1c1..37df1b3 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -284,7 +284,6 @@ userAuthLoop: switch userAuthReq.Method { case "none": if config.NoClientAuth { - s.user = "" authErr = nil } case "password": diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index cb34cc2..b139412 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -55,11 +55,11 @@ const ( func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { if isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. - cc, err := p.t.dialClientConn(addr) + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) if err != nil { return nil, err } - cc.singleUse = true return cc, nil } p.mu.Lock() @@ -104,7 +104,8 @@ func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { // run in its own goroutine. func (c *dialCall) dial(addr string) { - c.res, c.err = c.p.t.dialClientConn(addr) + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) close(c.done) c.p.mu.Lock() diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go index 71a4e29..20fd762 100644 --- a/vendor/golang.org/x/net/http2/errors.go +++ b/vendor/golang.org/x/net/http2/errors.go @@ -64,9 +64,17 @@ func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: type StreamError struct { StreamID uint32 Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} } func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 981d407..c9b09bb 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -594,6 +594,7 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { var ( errStreamID = errors.New("invalid stream ID") errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") ) func validStreamIDOrZero(streamID uint32) bool { @@ -607,18 +608,40 @@ func validStreamID(streamID uint32) bool { // WriteData writes a DATA frame. // // It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - // TODO: ignoring padding for now. will add when somebody cares. + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteData writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } + if len(pad) > 255 { + return errPadLength + } var flags Flags if endStream { flags |= FlagDataEndStream } + if pad != nil { + flags |= FlagDataPadded + } f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) return f.endWrite() } @@ -840,7 +863,7 @@ func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID == 0 { return nil, ConnectionError(ErrCodeProtocol) } - return nil, StreamError{fh.StreamID, ErrCodeProtocol} + return nil, streamError(fh.StreamID, ErrCodeProtocol) } return &WindowUpdateFrame{ FrameHeader: fh, @@ -921,7 +944,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { } } if len(p)-int(padLength) <= 0 { - return nil, StreamError{fh.StreamID, ErrCodeProtocol} + return nil, streamError(fh.StreamID, ErrCodeProtocol) } hf.headerFragBuf = p[:len(p)-int(padLength)] return hf, nil @@ -1396,6 +1419,9 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { hdec.SetEmitEnabled(true) hdec.SetMaxStringLength(fr.maxHeaderStringLen()) hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && logFrameReads { + log.Printf("http2: decoded hpack field %+v", hf) + } if !httplex.ValidHeaderFieldValue(hf.Value) { invalid = headerFieldValueError(hf.Value) } @@ -1454,11 +1480,17 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } if invalid != nil { fr.errDetail = invalid - return nil, StreamError{mh.StreamID, ErrCodeProtocol} + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} } if err := mh.checkPseudos(); err != nil { fr.errDetail = err - return nil, StreamError{mh.StreamID, ErrCodeProtocol} + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} } return mh, nil } diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 0173aed..401923b 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -13,7 +13,8 @@ // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. -package http2 +// +package http2 // import "golang.org/x/net/http2" import ( "bufio" @@ -349,3 +350,13 @@ func (s *sorter) SortStrings(ss []string) { sort.Sort(s) s.v = save } + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be a non-empty string starting with '/', and not +// start with two slashes. +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +func validPseudoPath(v string) bool { + return len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/') +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index f368738..8206fa7 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -922,7 +922,7 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // state here anyway, after telling the peer // we're hanging up on them. st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream - errCancel := StreamError{st.id, ErrCodeCancel} + errCancel := streamError(st.id, ErrCodeCancel) sc.resetStream(errCancel) case stateHalfClosedRemote: sc.closeStream(st, errHandlerComplete) @@ -1133,7 +1133,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { return nil } if !st.flow.add(int32(f.Increment)) { - return StreamError{f.StreamID, ErrCodeFlowControl} + return streamError(f.StreamID, ErrCodeFlowControl) } default: // connection-level flow control if !sc.flow.add(int32(f.Increment)) { @@ -1159,7 +1159,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { if st != nil { st.gotReset = true st.cancelCtx() - sc.closeStream(st, StreamError{f.StreamID, f.ErrCode}) + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) } return nil } @@ -1176,6 +1176,10 @@ func (sc *serverConn) closeStream(st *stream, err error) { } delete(sc.streams, st.id) if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + p.CloseWithError(err) } st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc @@ -1277,6 +1281,8 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { func (sc *serverConn) processData(f *DataFrame) error { sc.serveG.check() + data := f.Data() + // "If a DATA frame is received whose stream is not in "open" // or "half closed (local)" state, the recipient MUST respond // with a stream error (Section 5.4.2) of type STREAM_CLOSED." @@ -1288,32 +1294,55 @@ func (sc *serverConn) processData(f *DataFrame) error { // the http.Handler returned, so it's done reading & // done writing). Try to stop the client from sending // more DATA. - return StreamError{id, ErrCodeStreamClosed} + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + return streamError(id, ErrCodeStreamClosed) } if st.body == nil { panic("internal error: should have a body in this state") } - data := f.Data() // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return StreamError{id, ErrCodeStreamClosed} + return streamError(id, ErrCodeStreamClosed) } - if len(data) > 0 { + if f.Length > 0 { // Check whether the client has flow control quota. - if int(st.inflow.available()) < len(data) { - return StreamError{id, ErrCodeFlowControl} + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) } - st.inflow.take(int32(len(data))) - wrote, err := st.body.Write(data) - if err != nil { - return StreamError{id, ErrCodeStreamClosed} + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) } - if wrote != len(data) { - panic("internal error: bad Writer") + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) } - st.bodyBytes += int64(len(data)) } if f.StreamEnded() { st.endStream() @@ -1417,14 +1446,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // REFUSED_STREAM." if sc.unackedSettings == 0 { // They should know better. - return StreamError{st.id, ErrCodeProtocol} + return streamError(st.id, ErrCodeProtocol) } // Assume it's a network race, where they just haven't // received our last SETTINGS update. But actually // this can't happen yet, because we don't yet provide // a way for users to adjust server parameters at // runtime. - return StreamError{st.id, ErrCodeRefusedStream} + return streamError(st.id, ErrCodeRefusedStream) } rw, req, err := sc.newWriterAndRequest(st, f) @@ -1458,11 +1487,11 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { } st.gotTrailerHeader = true if !f.StreamEnded() { - return StreamError{st.id, ErrCodeProtocol} + return streamError(st.id, ErrCodeProtocol) } if len(f.PseudoFields()) > 0 { - return StreamError{st.id, ErrCodeProtocol} + return streamError(st.id, ErrCodeProtocol) } if st.trailer != nil { for _, hf := range f.RegularFields() { @@ -1471,7 +1500,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { // TODO: send more details to the peer somehow. But http2 has // no way to send debug data at a stream level. Discuss with // HTTP folk. - return StreamError{st.id, ErrCodeProtocol} + return streamError(st.id, ErrCodeProtocol) } st.trailer[key] = append(st.trailer[key], hf.Value) } @@ -1532,7 +1561,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res isConnect := method == "CONNECT" if isConnect { if path != "" || scheme != "" || authority == "" { - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } } else if method == "" || path == "" || (scheme != "https" && scheme != "http") { @@ -1546,13 +1575,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res // "All HTTP/2 requests MUST include exactly one valid // value for the :method, :scheme, and :path // pseudo-header fields" - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } bodyOpen := !f.StreamEnded() if method == "HEAD" && bodyOpen { // HEAD requests can't have bodies - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } var tlsState *tls.ConnectionState // nil if not scheme https @@ -1610,7 +1639,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res var err error url_, err = url.ParseRequestURI(path) if err != nil { - return nil, nil, StreamError{f.StreamID, ErrCodeProtocol} + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) } requestURI = path } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index de3f5fe..bcd27ae 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -16,6 +16,7 @@ import ( "io" "io/ioutil" "log" + "math" "net" "net/http" "sort" @@ -148,27 +149,28 @@ type ClientConn struct { readerDone chan struct{} // closed on error readerErr error // set before readerDone is closed - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - bw *bufio.Writer - br *bufio.Reader - fr *Framer - lastActive time.Time - - // Settings from peer: + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) maxFrameSize uint32 maxConcurrentStreams uint32 initialWindowSize uint32 - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte wmu sync.Mutex // held while writing; acquire AFTER mu if holding both werr error // first write error that has occurred @@ -339,7 +341,7 @@ func shouldRetryRequest(req *http.Request, err error) bool { return err == errClientConnUnusable } -func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -348,7 +350,7 @@ func (t *Transport) dialClientConn(addr string) (*ClientConn, error) { if err != nil { return nil, err } - return t.NewClientConn(tconn) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -409,14 +411,10 @@ func (t *Transport) expectContinueTimeout() time.Duration { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - if VerboseLogs { - t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr()) - } - if _, err := c.Write(clientPreface); err != nil { - t.vlogf("client preface write error: %v", err) - return nil, err - } + return t.newClientConn(c, false) +} +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -426,7 +424,13 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { initialWindowSize: 65535, // spec default maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) } + cc.cond = sync.NewCond(&cc.mu) cc.flow.add(int32(initialWindowSize)) @@ -454,6 +458,8 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } + + cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) cc.inflow.add(transportDefaultConnFlow + initialWindowSize) @@ -462,33 +468,6 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return nil, cc.werr } - // Read the obligatory SETTINGS frame - f, err := cc.fr.ReadFrame() - if err != nil { - return nil, err - } - sf, ok := f.(*SettingsFrame) - if !ok { - return nil, fmt.Errorf("expected settings frame, got: %T", f) - } - cc.fr.WriteSettingsAck() - cc.bw.Flush() - - sf.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingInitialWindowSize: - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE? - t.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - go cc.readLoop() return cc, nil } @@ -521,7 +500,7 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { } return cc.goAway == nil && !cc.closed && int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && - cc.nextStreamID < 2147483647 + cc.nextStreamID < math.MaxInt32 } func (cc *ClientConn) closeIfIdle() { @@ -531,9 +510,13 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } cc.tconn.Close() } @@ -639,15 +622,18 @@ func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) { // We have a body but a zero content length. Test to see if // it's actually zero or just unset. var buf [1]byte - n, rerr := io.ReadFull(body, buf[:]) + n, rerr := body.Read(buf[:]) if rerr != nil && rerr != io.EOF { return errorReader{rerr}, -1 } if n == 1 { // Oh, guess there is data in this Body Reader after all. // The ContentLength field just wasn't set. - // Stich the Body back together again, re-attaching our + // Stitch the Body back together again, re-attaching our // consumed byte. + if rerr == io.EOF { + return bytes.NewReader(buf[:]), 1 + } return io.MultiReader(bytes.NewReader(buf[:]), body), -1 } // Body is actually zero bytes. @@ -747,30 +733,34 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { bodyWritten := false ctx := reqContext(req) + handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, nil + } + for { select { case re := <-readLoopResCh: - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - cc.forgetStreamID(cs.ID) - return nil, re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, nil + return handleReadLoopResponse(re) case <-respHeaderTimer: cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { @@ -804,6 +794,12 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // forgetStreamID. return nil, cs.resetErr case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } if err != nil { return nil, err } @@ -908,10 +904,11 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( err = cc.fr.WriteData(cs.ID, sentEnd, data) if err == nil { // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or opt-out? - // Use some heuristic on the body type? Nagel-like timers? - // Based on 'n'? Only last chunk of this for loop, unless flow control - // tokens are low? For now, always: + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. err = cc.bw.Flush() } cc.wmu.Unlock() @@ -921,28 +918,33 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( } } + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + defer cc.mu.Unlock() + trls = cc.encodeTrailers(req) + } + cc.wmu.Lock() - if !sentEnd { - var trls []byte - if hasTrailers { - cc.mu.Lock() - trls = cc.encodeTrailers(req) - cc.mu.Unlock() - } + defer cc.wmu.Unlock() - // Avoid forgetting to send an END_STREAM if the encoded - // trailers are 0 bytes. Both results produce and END_STREAM. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) } if ferr := cc.bw.Flush(); ferr != nil && err == nil { err = ferr } - cc.wmu.Unlock() - return err } @@ -996,6 +998,22 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail host = req.URL.Host } + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + // Check for any invalid headers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) @@ -1018,7 +1036,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail cc.writeHeader(":authority", host) cc.writeHeader(":method", req.Method) if req.Method != "CONNECT" { - cc.writeHeader(":path", req.URL.RequestURI()) + cc.writeHeader(":path", path) cc.writeHeader(":scheme", "https") } if trailers != "" { @@ -1188,6 +1206,14 @@ func (e GoAwayError) Error() string { e.LastStreamID, e.ErrCode, e.DebugData) } +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + func (rl *clientConnReadLoop) cleanup() { cc := rl.cc defer cc.tconn.Close() @@ -1199,16 +1225,14 @@ func (rl *clientConnReadLoop) cleanup() { // gotten a response yet. err := cc.readerErr cc.mu.Lock() - if err == io.EOF { - if cc.goAway != nil { - err = GoAwayError{ - LastStreamID: cc.goAway.LastStreamID, - ErrCode: cc.goAway.ErrCode, - DebugData: cc.goAwayDebug, - } - } else { - err = io.ErrUnexpectedEOF + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF } for _, cs := range rl.activeRes { cs.bufPipe.CloseWithError(err) @@ -1228,15 +1252,20 @@ func (rl *clientConnReadLoop) cleanup() { func (rl *clientConnReadLoop) run() error { cc := rl.cc rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a reply + gotReply := false // ever saw a HEADERS reply + gotSettings := false for { f, err := cc.fr.ReadFrame() if err != nil { - cc.vlogf("Transport readFrame error: (%T) %v", err, err) + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { - rl.endStreamError(cs, cc.fr.errDetail) + cs.cc.writeStreamReset(cs.ID, se.Code, err) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) } continue } else if err != nil { @@ -1245,6 +1274,13 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport received %s", summarizeFrame(f)) } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } maybeIdle := false // whether frame might transition us to idle switch f := f.(type) { @@ -1273,6 +1309,9 @@ func (rl *clientConnReadLoop) run() error { cc.logf("Transport: unhandled response frame type %T", f) } if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } return err } if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { @@ -1522,10 +1561,27 @@ var errClosedResponseBody = errors.New("http2: response body closed") func (b transportResponseBody) Close() error { cs := b.cs - if cs.bufPipe.Err() != io.EOF { - // TODO: write test for this - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() } + cs.bufPipe.BreakWithError(errClosedResponseBody) return nil } @@ -1533,6 +1589,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() if cs == nil { cc.mu.Lock() neverSent := cc.nextStreamID @@ -1546,10 +1603,22 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { // TODO: be stricter here? only silently ignore things which // we canceled, but not things which were closed normally // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } return nil } - if data := f.Data(); len(data) > 0 { - if cs.bufPipe.b == nil { + if f.Length > 0 { + if len(data) > 0 && cs.bufPipe.b == nil { // Data frame after it's already closed? cc.logf("http2: Transport received DATA frame for closed stream; closing connection") return ConnectionError(ErrCodeProtocol) @@ -1557,17 +1626,30 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { // Check connection-level flow control. cc.mu.Lock() - if cs.inflow.available() >= int32(len(data)) { - cs.inflow.take(int32(len(data))) + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) } else { cc.mu.Unlock() return ConnectionError(ErrCodeFlowControl) } + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + cs.inflow.add(pad) + cc.inflow.add(pad) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(pad)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) + cc.bw.Flush() + cc.wmu.Unlock() + } cc.mu.Unlock() - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err + if len(data) > 0 { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } } } @@ -1596,6 +1678,11 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { if isConnectionCloseRequest(cs.req) { rl.closeWhenIdle = true } + + select { + case cs.resc <- resAndError{err: err}: + default: + } } func (cs *clientStream) copyTrailers() { @@ -1623,18 +1710,39 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { cc := rl.cc cc.mu.Lock() defer cc.mu.Unlock() - return f.ForeachSetting(func(s Setting) error { + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { switch s.ID { case SettingMaxFrameSize: cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val case SettingInitialWindowSize: - // TODO: error if this is too large. + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } - // TODO: adjust flow control of still-open + // Adjust flow control of currently-open // frames by the difference of the old initial // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + cc.initialWindowSize = s.Val default: // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. @@ -1642,6 +1750,16 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { } return nil }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr } func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { @@ -1678,7 +1796,7 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { // which closes this, so there // isn't a race. default: - err := StreamError{cs.ID, f.ErrCode} + err := streamError(cs.ID, f.ErrCode) cs.resetErr = err close(cs.peerReset) cs.bufPipe.CloseWithError(err) @@ -1715,8 +1833,10 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { } func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: do something with err? send it as a debug frame to the peer? - // But that's only in GOAWAY. Invent a new frame type? Is there one already? + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) cc.bw.Flush() diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 0d51417..1643c08 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -1,6 +1,7 @@ # OAuth2 for Go [![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) oauth2 package contains a client implementation for OAuth 2.0 spec. diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index b952362..565d731 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -14,10 +14,10 @@ import ( "path/filepath" "runtime" + "cloud.google.com/go/compute/metadata" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" ) // DefaultClient returns an HTTP Client that uses the diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 4e96fb6..82399b0 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -21,9 +21,9 @@ import ( "strings" "time" + "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" ) // Endpoint is Google's OAuth 2.0 endpoint. diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go index 2343443..683d2d2 100644 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -2,8 +2,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package jws provides encoding and decoding utilities for -// signed JWS messages. +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. package jws // import "golang.org/x/oauth2/jws" import ( @@ -64,7 +72,7 @@ func (c *ClaimSet) encode() (string, error) { } if len(c.PrivateClaims) == 0 { - return base64Encode(b), nil + return base64.RawURLEncoding.EncodeToString(b), nil } // Marshal private claim set and then append it to b. @@ -82,7 +90,7 @@ func (c *ClaimSet) encode() (string, error) { } b[len(b)-1] = ',' // Replace closing curly brace with a comma. b = append(b, prv[1:]...) // Append private claims. - return base64Encode(b), nil + return base64.RawURLEncoding.EncodeToString(b), nil } // Header represents the header for the signed JWS payloads. @@ -102,7 +110,7 @@ func (h *Header) encode() (string, error) { if err != nil { return "", err } - return base64Encode(b), nil + return base64.RawURLEncoding.EncodeToString(b), nil } // Decode decodes a claim set from a JWS payload. @@ -113,7 +121,7 @@ func Decode(payload string) (*ClaimSet, error) { // TODO(jbd): Provide more context about the error. return nil, errors.New("jws: invalid token received") } - decoded, err := base64Decode(s[1]) + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) if err != nil { return nil, err } @@ -140,7 +148,7 @@ func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { if err != nil { return "", err } - return fmt.Sprintf("%s.%s", ss, base64Encode(sig)), nil + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil } // Encode encodes a signed JWS with provided header and claim set. @@ -163,7 +171,7 @@ func Verify(token string, key *rsa.PublicKey) error { } signedContent := parts[0] + "." + parts[1] - signatureString, err := base64Decode(parts[2]) + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) if err != nil { return err } @@ -172,23 +180,3 @@ func Verify(token string, key *rsa.PublicKey) error { h.Write([]byte(signedContent)) return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) } - -// base64Encode returns and Base64url encoded version of the input string with any -// trailing "=" stripped. -func base64Encode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// base64Decode decodes the Base64url encoded string -func base64Decode(s string) ([]byte, error) { - // add back missing padding - switch len(s) % 4 { - case 1: - s += "===" - case 2: - s += "==" - case 3: - s += "=" - } - return base64.URLEncoding.DecodeString(s) -} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index a682896..7b06bfe 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -21,6 +21,8 @@ import ( // NoContext is the default context you should supply if not using // your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. var NoContext = context.TODO() // RegisterBrokenAuthHeaderProvider registers an OAuth2 server @@ -37,6 +39,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) { // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). type Config struct { // ClientID is the application's ID. ClientID string @@ -295,7 +299,7 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { c, err := internal.ContextClient(ctx) if err != nil { - return &http.Client{Transport: internal.ErrorTransport{err}} + return &http.Client{Transport: internal.ErrorTransport{Err: err}} } return c } diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/text/PATENTS b/vendor/golang.org/x/text/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/text/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go new file mode 100644 index 0000000..ca917d2 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/code.go @@ -0,0 +1,338 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gen + +import ( + "bytes" + "encoding/gob" + "fmt" + "hash" + "hash/fnv" + "io" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// This file contains utilities for generating code. + +// TODO: other write methods like: +// - slices, maps, types, etc. + +// CodeWriter is a utility for writing structured code. It computes the content +// hash and size of written content. It ensures there are newlines between +// written code blocks. +type CodeWriter struct { + buf bytes.Buffer + Size int + Hash hash.Hash32 // content hash + gob *gob.Encoder + // For comments we skip the usual one-line separator if they are followed by + // a code block. + skipSep bool +} + +func (w *CodeWriter) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +// NewCodeWriter returns a new CodeWriter. +func NewCodeWriter() *CodeWriter { + h := fnv.New32() + return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} +} + +// WriteGoFile appends the buffer with the total size of all created structures +// and writes it as a Go file to the the given file with the given package name. +func (w *CodeWriter) WriteGoFile(filename, pkg string) { + f, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer f.Close() + if _, err = w.WriteGo(f, pkg); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo appends the buffer with the total size of all created structures and +// writes it as a Go file to the the given writer with the given package name. +func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { + sz := w.Size + w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) + defer w.buf.Reset() + return WriteGo(out, pkg, w.buf.Bytes()) +} + +func (w *CodeWriter) printf(f string, x ...interface{}) { + fmt.Fprintf(w, f, x...) +} + +func (w *CodeWriter) insertSep() { + if w.skipSep { + w.skipSep = false + return + } + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n") +} + +// WriteComment writes a comment block. All line starts are prefixed with "//". +// Initial empty lines are gobbled. The indentation for the first line is +// stripped from consecutive lines. +func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { + s := fmt.Sprintf(comment, args...) + s = strings.Trim(s, "\n") + + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n// ") + w.skipSep = true + + // strip first indent level. + sep := "\n" + for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { + sep += s[:1] + } + + strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) + + w.printf("\n") +} + +func (w *CodeWriter) writeSizeInfo(size int) { + w.printf("// Size: %d bytes\n", size) +} + +// WriteConst writes a constant of the given name and value. +func (w *CodeWriter) WriteConst(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + + switch v.Type().Kind() { + case reflect.String: + // See golang.org/issue/13145. + const arbitraryCutoff = 16 + if v.Len() > arbitraryCutoff { + w.printf("var %s %s = ", name, typeName(x)) + } else { + w.printf("const %s %s = ", name, typeName(x)) + } + w.WriteString(v.String()) + w.printf("\n") + default: + w.printf("const %s = %#v\n", name, x) + } +} + +// WriteVar writes a variable of the given name and value. +func (w *CodeWriter) WriteVar(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + oldSize := w.Size + sz := int(v.Type().Size()) + w.Size += sz + + switch v.Type().Kind() { + case reflect.String: + w.printf("var %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + case reflect.Struct: + w.gob.Encode(x) + fallthrough + case reflect.Slice, reflect.Array: + w.printf("var %s = ", name) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + default: + w.printf("var %s %s = ", name, typeName(x)) + w.gob.Encode(x) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + } + w.printf("\n") +} + +func (w *CodeWriter) writeValue(v reflect.Value) { + x := v.Interface() + switch v.Kind() { + case reflect.String: + w.WriteString(v.String()) + case reflect.Array: + // Don't double count: callers of WriteArray count on the size being + // added, so we need to discount it here. + w.Size -= int(v.Type().Size()) + w.writeSlice(x, true) + case reflect.Slice: + w.writeSlice(x, false) + case reflect.Struct: + w.printf("%s{\n", typeName(v.Interface())) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + w.printf("%s: ", t.Field(i).Name) + w.writeValue(v.Field(i)) + w.printf(",\n") + } + w.printf("}") + default: + w.printf("%#v", x) + } +} + +// WriteString writes a string literal. +func (w *CodeWriter) WriteString(s string) { + io.WriteString(w.Hash, s) // content hash + w.Size += len(s) + + const maxInline = 40 + if len(s) <= maxInline { + w.printf("%q", s) + return + } + + // We will render the string as a multi-line string. + const maxWidth = 80 - 4 - len(`"`) - len(`" +`) + + // When starting on its own line, go fmt indents line 2+ an extra level. + n, max := maxWidth, maxWidth-4 + + // Print "" +\n, if a string does not start on its own line. + b := w.buf.Bytes() + if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { + w.printf("\"\" + // Size: %d bytes\n", len(s)) + n, max = maxWidth, maxWidth + } + + w.printf(`"`) + + for sz, p := 0, 0; p < len(s); { + var r rune + r, sz = utf8.DecodeRuneInString(s[p:]) + out := s[p : p+sz] + chars := 1 + if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { + switch sz { + case 1: + out = fmt.Sprintf("\\x%02x", s[p]) + case 2, 3: + out = fmt.Sprintf("\\u%04x", r) + case 4: + out = fmt.Sprintf("\\U%08x", r) + } + chars = len(out) + } + if n -= chars; n < 0 { + w.printf("\" +\n\"") + n = max - len(out) + } + w.printf("%s", out) + p += sz + } + w.printf(`"`) +} + +// WriteSlice writes a slice value. +func (w *CodeWriter) WriteSlice(x interface{}) { + w.writeSlice(x, false) +} + +// WriteArray writes an array value. +func (w *CodeWriter) WriteArray(x interface{}) { + w.writeSlice(x, true) +} + +func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { + v := reflect.ValueOf(x) + w.gob.Encode(v.Len()) + w.Size += v.Len() * int(v.Type().Elem().Size()) + name := typeName(x) + if isArray { + name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) + } + if isArray { + w.printf("%s{\n", name) + } else { + w.printf("%s{ // %d elements\n", name, v.Len()) + } + + switch kind := v.Type().Elem().Kind(); kind { + case reflect.String: + for _, s := range x.([]string) { + w.WriteString(s) + w.printf(",\n") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // nLine and nBlock are the number of elements per line and block. + nLine, nBlock, format := 8, 64, "%d," + switch kind { + case reflect.Uint8: + format = "%#02x," + case reflect.Uint16: + format = "%#04x," + case reflect.Uint32: + nLine, nBlock, format = 4, 32, "%#08x," + case reflect.Uint, reflect.Uint64: + nLine, nBlock, format = 4, 32, "%#016x," + case reflect.Int8: + nLine = 16 + } + n := nLine + for i := 0; i < v.Len(); i++ { + if i%nBlock == 0 && v.Len() > nBlock { + w.printf("// Entry %X - %X\n", i, i+nBlock-1) + } + x := v.Index(i).Interface() + w.gob.Encode(x) + w.printf(format, x) + if n--; n == 0 { + n = nLine + w.printf("\n") + } + } + w.printf("\n") + case reflect.Struct: + zero := reflect.Zero(v.Type().Elem()).Interface() + for i := 0; i < v.Len(); i++ { + x := v.Index(i).Interface() + w.gob.EncodeValue(v) + if !reflect.DeepEqual(zero, x) { + line := fmt.Sprintf("%#v,\n", x) + line = line[strings.IndexByte(line, '{'):] + w.printf("%d: ", i) + w.printf(line) + } + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + w.printf("%d: %#v,\n", i, v.Index(i).Interface()) + } + default: + panic("gen: slice elem type not supported") + } + w.printf("}") +} + +// WriteType writes a definition of the type of the given value and returns the +// type name. +func (w *CodeWriter) WriteType(x interface{}) string { + t := reflect.TypeOf(x) + w.printf("type %s struct {\n", t.Name()) + for i := 0; i < t.NumField(); i++ { + w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) + } + w.printf("}\n") + return t.Name() +} + +// typeName returns the name of the go type of x. +func typeName(x interface{}) string { + t := reflect.ValueOf(x).Type() + return strings.Replace(fmt.Sprint(t), "main.", "", 1) +} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go new file mode 100644 index 0000000..dfaa278 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/gen.go @@ -0,0 +1,226 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gen contains common code for the various code generation tools in the +// text repository. Its usage ensures consistency between tools. +// +// This package defines command line flags that are common to most generation +// tools. The flags allow for specifying specific Unicode and CLDR versions +// in the public Unicode data repository (http://www.unicode.org/Public). +// +// A local Unicode data mirror can be set through the flag -local or the +// environment variable UNICODE_DIR. The former takes precedence. The local +// directory should follow the same structure as the public repository. +// +// IANA data can also optionally be mirrored by putting it in the iana directory +// rooted at the top of the local mirror. Beware, though, that IANA data is not +// versioned. So it is up to the developer to use the right version. +package gen // import "golang.org/x/text/internal/gen" + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "path/filepath" + "unicode" + + "golang.org/x/text/unicode/cldr" +) + +var ( + url = flag.String("url", + "http://www.unicode.org/Public", + "URL of Unicode database directory") + iana = flag.String("iana", + "http://www.iana.org", + "URL of the IANA repository") + unicodeVersion = flag.String("unicode", + getEnv("UNICODE_VERSION", unicode.Version), + "unicode version to use") + cldrVersion = flag.String("cldr", + getEnv("CLDR_VERSION", cldr.Version), + "cldr version to use") + // Allow an environment variable to specify the local directory. + // go generate doesn't allow specifying arguments; this is a useful + // alternative to specifying a local mirror. + localDir = flag.String("local", + os.Getenv("UNICODE_DIR"), + "directory containing local data files; for debugging only.") +) + +func getEnv(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +// Init performs common initialization for a gen command. It parses the flags +// and sets up the standard logging parameters. +func Init() { + log.SetPrefix("") + log.SetFlags(log.Lshortfile) + flag.Parse() +} + +const header = `// This file was generated by go generate; DO NOT EDIT + +package %s + +` + +// UnicodeVersion reports the requested Unicode version. +func UnicodeVersion() string { + return *unicodeVersion +} + +// UnicodeVersion reports the requested CLDR version. +func CLDRVersion() string { + return *cldrVersion +} + +// IsLocal reports whether the user specified a local directory. +func IsLocal() bool { + return *localDir != "" +} + +// OpenUCDFile opens the requested UCD file. The file is specified relative to +// the public Unicode root directory. It will call log.Fatal if there are any +// errors. +func OpenUCDFile(file string) io.ReadCloser { + return openUnicode(path.Join(*unicodeVersion, "ucd", file)) +} + +// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there +// are any errors. +func OpenCLDRCoreZip() io.ReadCloser { + return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") +} + +// OpenUnicodeFile opens the requested file of the requested category from the +// root of the Unicode data archive. The file is specified relative to the +// public Unicode root directory. If version is "", it will use the default +// Unicode version. It will call log.Fatal if there are any errors. +func OpenUnicodeFile(category, version, file string) io.ReadCloser { + if version == "" { + version = UnicodeVersion() + } + return openUnicode(path.Join(category, version, file)) +} + +// OpenIANAFile opens the requested IANA file. The file is specified relative +// to the IANA root, which is typically either http://www.iana.org or the +// iana directory in the local mirror. It will call log.Fatal if there are any +// errors. +func OpenIANAFile(path string) io.ReadCloser { + return Open(*iana, "iana", path) +} + +// Open opens subdir/path if a local directory is specified and the file exists, +// where subdir is a directory relative to the local root, or fetches it from +// urlRoot/path otherwise. It will call log.Fatal if there are any errors. +func Open(urlRoot, subdir, path string) io.ReadCloser { + if *localDir != "" { + path = filepath.FromSlash(path) + if f, err := os.Open(filepath.Join(*localDir, subdir, path)); err == nil { + return f + } + } + return get(urlRoot, path) +} + +func openUnicode(path string) io.ReadCloser { + if *localDir != "" { + path = filepath.FromSlash(path) + f, err := os.Open(filepath.Join(*localDir, path)) + if err != nil { + log.Fatal(err) + } + return f + } + return get(*url, path) +} + +func get(root, path string) io.ReadCloser { + url := root + "/" + path + fmt.Printf("Fetching %s...", url) + defer fmt.Println(" done.") + resp, err := http.Get(url) + if err != nil { + log.Fatalf("HTTP GET: %v", err) + } + if resp.StatusCode != 200 { + log.Fatalf("Bad GET status for %q: %q", url, resp.Status) + } + return resp.Body +} + +// TODO: use Write*Version in all applicable packages. + +// WriteUnicodeVersion writes a constant for the Unicode version from which the +// tables are generated. +func WriteUnicodeVersion(w io.Writer) { + fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) +} + +// WriteCLDRVersion writes a constant for the CLDR version from which the +// tables are generated. +func WriteCLDRVersion(w io.Writer) { + fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) +} + +// WriteGoFile prepends a standard file comment and package statement to the +// given bytes, applies gofmt, and writes them to a file with the given name. +// It will call log.Fatal if there are any errors. +func WriteGoFile(filename, pkg string, b []byte) { + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo prepends a standard file comment and package statement to the given +// bytes, applies gofmt, and writes them to w. +func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { + src := []byte(fmt.Sprintf(header, pkg)) + src = append(src, b...) + formatted, err := format.Source(src) + if err != nil { + // Print the generated code even in case of an error so that the + // returned error can be meaningfully interpreted. + n, _ = w.Write(src) + return n, err + } + return w.Write(formatted) +} + +// Repackage rewrites a Go file from belonging to package main to belonging to +// the given package. +func Repackage(inFile, outFile, pkg string) { + src, err := ioutil.ReadFile(inFile) + if err != nil { + log.Fatalf("reading %s: %v", inFile, err) + } + const toDelete = "package main\n\n" + i := bytes.Index(src, []byte(toDelete)) + if i < 0 { + log.Fatalf("Could not find %q in %s.", toDelete, inFile) + } + w := &bytes.Buffer{} + w.Write(src[i+len(toDelete):]) + WriteGoFile(outFile, pkg, w.Bytes()) +} diff --git a/vendor/golang.org/x/text/internal/triegen/compact.go b/vendor/golang.org/x/text/internal/triegen/compact.go new file mode 100644 index 0000000..397b975 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/compact.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +// This file defines Compacter and its implementations. + +import "io" + +// A Compacter generates an alternative, more space-efficient way to store a +// trie value block. A trie value block holds all possible values for the last +// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block +// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). +type Compacter interface { + // Size returns whether the Compacter could encode the given block as well + // as its size in case it can. len(v) is always 64. + Size(v []uint64) (sz int, ok bool) + + // Store stores the block using the Compacter's compression method. + // It returns a handle with which the block can be retrieved. + // len(v) is always 64. + Store(v []uint64) uint32 + + // Print writes the data structures associated to the given store to w. + Print(w io.Writer) error + + // Handler returns the name of a function that gets called during trie + // lookup for blocks generated by the Compacter. The function should be of + // the form func (n uint32, b byte) uint64, where n is the index returned by + // the Compacter's Store method and b is the last byte of the UTF-8 + // encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the + // block. + Handler() string +} + +// simpleCompacter is the default Compacter used by builder. It implements a +// normal trie block. +type simpleCompacter builder + +func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { + return blockSize * b.ValueSize, true +} + +func (b *simpleCompacter) Store(v []uint64) uint32 { + h := uint32(len(b.ValueBlocks) - blockOffset) + b.ValueBlocks = append(b.ValueBlocks, v) + return h +} + +func (b *simpleCompacter) Print(io.Writer) error { + // Structures are printed in print.go. + return nil +} + +func (b *simpleCompacter) Handler() string { + panic("Handler should be special-cased for this Compacter") +} diff --git a/vendor/golang.org/x/text/internal/triegen/print.go b/vendor/golang.org/x/text/internal/triegen/print.go new file mode 100644 index 0000000..8d9f120 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/print.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// print writes all the data structures as well as the code necessary to use the +// trie to w. +func (b *builder) print(w io.Writer) error { + b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize + b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize + b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize + b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize + b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize + + // If we only have one root trie, all starter blocks are at position 0 and + // we can access the arrays directly. + if len(b.Trie) == 1 { + // At this point we cannot refer to the generated tables directly. + b.ASCIIBlock = b.Name + "Values" + b.StarterBlock = b.Name + "Index" + } else { + // Otherwise we need to have explicit starter indexes in the trie + // structure. + b.ASCIIBlock = "t.ascii" + b.StarterBlock = "t.utf8Start" + } + + b.SourceType = "[]byte" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + b.SourceType = "string" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + if err := trieGen.Execute(w, b); err != nil { + return err + } + + for _, c := range b.Compactions { + if err := c.c.Print(w); err != nil { + return err + } + } + + return nil +} + +func printValues(n int, values []uint64) string { + w := &bytes.Buffer{} + boff := n * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) + var newline bool + for i, v := range values { + if i%6 == 0 { + newline = true + } + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) + } + } + return w.String() +} + +func printIndex(b *builder, nr int, n *node) string { + w := &bytes.Buffer{} + boff := nr * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) + var newline bool + for i, c := range n.children { + if i%8 == 0 { + newline = true + } + if c != nil { + v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) + } + } + } + return w.String() +} + +var ( + trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ + "printValues": printValues, + "printIndex": printIndex, + "title": strings.Title, + "dec": func(x int) int { return x - 1 }, + "psize": func(n int) string { + return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) + }, + }).Parse(trieTemplate)) + lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) +) + +// TODO: consider the return type of lookup. It could be uint64, even if the +// internal value type is smaller. We will have to verify this with the +// performance of unicode/norm, which is very sensitive to such changes. +const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} +// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. +type {{.Name}}Trie struct { {{if $multi}} + ascii []{{.ValueType}} // index for ASCII bytes + utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 +{{end}}} + +func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} + h := {{.Name}}TrieHandles[i] + return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } +} + +type {{.Name}}TrieHandle struct { + ascii, multi {{.IndexType}} +} + +// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes +var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ +{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} +{{end}}}{{else}} + return &{{.Name}}Trie{} +} +{{end}} +// lookupValue determines the type of block n and looks up the value for b. +func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { + switch { {{range $i, $c := .Compactions}} + {{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} + n -= {{$c.Offset}}{{end}} + return {{print $b.ValueType}}({{$c.Handler}}){{end}} + } +} + +// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes +// The third block is the zero block. +var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { +{{range $i, $v := .ValueBlocks}}{{printValues $i $v}} +{{end}}} + +// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes +// Block 0 is the zero block. +var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { +{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} +{{end}}} +` + +// TODO: consider allowing zero-length strings after evaluating performance with +// unicode/norm. +const lookupTemplate = ` +// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return {{.ASCIIBlock}}[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = {{.Name}}Index[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return {{.ASCIIBlock}}[c0] + } + i := {{.StarterBlock}}[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} +` diff --git a/vendor/golang.org/x/text/internal/triegen/triegen.go b/vendor/golang.org/x/text/internal/triegen/triegen.go new file mode 100644 index 0000000..adb0108 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/triegen.go @@ -0,0 +1,494 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package triegen implements a code generator for a trie for associating +// unsigned integer values with UTF-8 encoded runes. +// +// Many of the go.text packages use tries for storing per-rune information. A +// trie is especially useful if many of the runes have the same value. If this +// is the case, many blocks can be expected to be shared allowing for +// information on many runes to be stored in little space. +// +// As most of the lookups are done directly on []byte slices, the tries use the +// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to +// runes and contributes a little bit to better performance. It also naturally +// provides a fast path for ASCII. +// +// Space is also an issue. There are many code points defined in Unicode and as +// a result tables can get quite large. So every byte counts. The triegen +// package automatically chooses the smallest integer values to represent the +// tables. Compacters allow further compression of the trie by allowing for +// alternative representations of individual trie blocks. +// +// triegen allows generating multiple tries as a single structure. This is +// useful when, for example, one wants to generate tries for several languages +// that have a lot of values in common. Some existing libraries for +// internationalization store all per-language data as a dynamically loadable +// chunk. The go.text packages are designed with the assumption that the user +// typically wants to compile in support for all supported languages, in line +// with the approach common to Go to create a single standalone binary. The +// multi-root trie approach can give significant storage savings in this +// scenario. +// +// triegen generates both tables and code. The code is optimized to use the +// automatically chosen data types. The following code is generated for a Trie +// or multiple Tries named "foo": +// - type fooTrie +// The trie type. +// +// - func newFooTrie(x int) *fooTrie +// Trie constructor, where x is the index of the trie passed to Gen. +// +// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int) +// The lookup method, where uintX is automatically chosen. +// +// - func lookupString, lookupUnsafe and lookupStringUnsafe +// Variants of the above. +// +// - var fooValues and fooIndex and any tables generated by Compacters. +// The core trie data. +// +// - var fooTrieHandles +// Indexes of starter blocks in case of multiple trie roots. +// +// It is recommended that users test the generated trie by checking the returned +// value for every rune. Such exhaustive tests are possible as the the number of +// runes in Unicode is limited. +package triegen // import "golang.org/x/text/internal/triegen" + +// TODO: Arguably, the internally optimized data types would not have to be +// exposed in the generated API. We could also investigate not generating the +// code, but using it through a package. We would have to investigate the impact +// on performance of making such change, though. For packages like unicode/norm, +// small changes like this could tank performance. + +import ( + "encoding/binary" + "fmt" + "hash/crc64" + "io" + "log" + "unicode/utf8" +) + +// builder builds a set of tries for associating values with runes. The set of +// tries can share common index and value blocks. +type builder struct { + Name string + + // ValueType is the type of the trie values looked up. + ValueType string + + // ValueSize is the byte size of the ValueType. + ValueSize int + + // IndexType is the type of trie index values used for all UTF-8 bytes of + // a rune except the last one. + IndexType string + + // IndexSize is the byte size of the IndexType. + IndexSize int + + // SourceType is used when generating the lookup functions. If the user + // requests StringSupport, all lookup functions will be generated for + // string input as well. + SourceType string + + Trie []*Trie + + IndexBlocks []*node + ValueBlocks [][]uint64 + Compactions []compaction + Checksum uint64 + + ASCIIBlock string + StarterBlock string + + indexBlockIdx map[uint64]int + valueBlockIdx map[uint64]nodeIndex + asciiBlockIdx map[uint64]int + + // Stats are used to fill out the template. + Stats struct { + NValueEntries int + NValueBytes int + NIndexEntries int + NIndexBytes int + NHandleBytes int + } + + err error +} + +// A nodeIndex encodes the index of a node, which is defined by the compaction +// which stores it and an index within the compaction. For internal nodes, the +// compaction is always 0. +type nodeIndex struct { + compaction int + index int +} + +// compaction keeps track of stats used for the compaction. +type compaction struct { + c Compacter + blocks []*node + maxHandle uint32 + totalSize int + + // Used by template-based generator and thus exported. + Cutoff uint32 + Offset uint32 + Handler string +} + +func (b *builder) setError(err error) { + if b.err == nil { + b.err = err + } +} + +// An Option can be passed to Gen. +type Option func(b *builder) error + +// Compact configures the trie generator to use the given Compacter. +func Compact(c Compacter) Option { + return func(b *builder) error { + b.Compactions = append(b.Compactions, compaction{ + c: c, + Handler: c.Handler() + "(n, b)"}) + return nil + } +} + +// Gen writes Go code for a shared trie lookup structure to w for the given +// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will +// return the *nameTrie for tries[x]. A value can be looked up by using one of +// the various lookup methods defined on nameTrie. It returns the table size of +// the generated trie. +func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { + // The index contains two dummy blocks, followed by the zero block. The zero + // block is at offset 0x80, so that the offset for the zero block for + // continuation bytes is 0. + b := &builder{ + Name: name, + Trie: tries, + IndexBlocks: []*node{{}, {}, {}}, + Compactions: []compaction{{ + Handler: name + "Values[n<<6+uint32(b)]", + }}, + // The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero + // block. + indexBlockIdx: map[uint64]int{0: 0}, + valueBlockIdx: map[uint64]nodeIndex{0: {}}, + asciiBlockIdx: map[uint64]int{}, + } + b.Compactions[0].c = (*simpleCompacter)(b) + + for _, f := range opts { + if err := f(b); err != nil { + return 0, err + } + } + b.build() + if b.err != nil { + return 0, b.err + } + if err = b.print(w); err != nil { + return 0, err + } + return b.Size(), nil +} + +// A Trie represents a single root node of a trie. A builder may build several +// overlapping tries at once. +type Trie struct { + root *node + + hiddenTrie +} + +// hiddenTrie contains values we want to be visible to the template generator, +// but hidden from the API documentation. +type hiddenTrie struct { + Name string + Checksum uint64 + ASCIIIndex int + StarterIndex int +} + +// NewTrie returns a new trie root. +func NewTrie(name string) *Trie { + return &Trie{ + &node{ + children: make([]*node, blockSize), + values: make([]uint64, utf8.RuneSelf), + }, + hiddenTrie{Name: name}, + } +} + +// Gen is a convenience wrapper around the Gen func passing t as the only trie +// and uses the name passed to NewTrie. It returns the size of the generated +// tables. +func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { + return Gen(w, t.Name, []*Trie{t}, opts...) +} + +// node is a node of the intermediate trie structure. +type node struct { + // children holds this node's children. It is always of length 64. + // A child node may be nil. + children []*node + + // values contains the values of this node. If it is non-nil, this node is + // either a root or leaf node: + // For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. + // For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF]. + values []uint64 + + index nodeIndex +} + +// Insert associates value with the given rune. Insert will panic if a non-zero +// value is passed for an invalid rune. +func (t *Trie) Insert(r rune, value uint64) { + if value == 0 { + return + } + s := string(r) + if []rune(s)[0] != r && value != 0 { + // Note: The UCD tables will always assign what amounts to a zero value + // to a surrogate. Allowing a zero value for an illegal rune allows + // users to iterate over [0..MaxRune] without having to explicitly + // exclude surrogates, which would be tedious. + panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) + } + if len(s) == 1 { + // It is a root node value (ASCII). + t.root.values[s[0]] = value + return + } + + n := t.root + for ; len(s) > 1; s = s[1:] { + if n.children == nil { + n.children = make([]*node, blockSize) + } + p := s[0] % blockSize + c := n.children[p] + if c == nil { + c = &node{} + n.children[p] = c + } + if len(s) > 2 && c.values != nil { + log.Fatalf("triegen: insert(%U): found internal node with values", r) + } + n = c + } + if n.values == nil { + n.values = make([]uint64, blockSize) + } + if n.children != nil { + log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) + } + n.values[s[0]-0x80] = value +} + +// Size returns the number of bytes the generated trie will take to store. It +// needs to be exported as it is used in the templates. +func (b *builder) Size() int { + // Index blocks. + sz := len(b.IndexBlocks) * blockSize * b.IndexSize + + // Skip the first compaction, which represents the normal value blocks, as + // its totalSize does not account for the ASCII blocks, which are managed + // separately. + sz += len(b.ValueBlocks) * blockSize * b.ValueSize + for _, c := range b.Compactions[1:] { + sz += c.totalSize + } + + // TODO: this computation does not account for the fixed overhead of a using + // a compaction, either code or data. As for data, though, the typical + // overhead of data is in the order of bytes (2 bytes for cases). Further, + // the savings of using a compaction should anyway be substantial for it to + // be worth it. + + // For multi-root tries, we also need to account for the handles. + if len(b.Trie) > 1 { + sz += 2 * b.IndexSize * len(b.Trie) + } + return sz +} + +func (b *builder) build() { + // Compute the sizes of the values. + var vmax uint64 + for _, t := range b.Trie { + vmax = maxValue(t.root, vmax) + } + b.ValueType, b.ValueSize = getIntType(vmax) + + // Compute all block allocations. + // TODO: first compute the ASCII blocks for all tries and then the other + // nodes. ASCII blocks are more restricted in placement, as they require two + // blocks to be placed consecutively. Processing them first may improve + // sharing (at least one zero block can be expected to be saved.) + for _, t := range b.Trie { + b.Checksum += b.buildTrie(t) + } + + // Compute the offsets for all the Compacters. + offset := uint32(0) + for i := range b.Compactions { + c := &b.Compactions[i] + c.Offset = offset + offset += c.maxHandle + 1 + c.Cutoff = offset + } + + // Compute the sizes of indexes. + // TODO: different byte positions could have different sizes. So far we have + // not found a case where this is beneficial. + imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) + for _, ib := range b.IndexBlocks { + if x := uint64(ib.index.index); x > imax { + imax = x + } + } + b.IndexType, b.IndexSize = getIntType(imax) +} + +func maxValue(n *node, max uint64) uint64 { + if n == nil { + return max + } + for _, c := range n.children { + max = maxValue(c, max) + } + for _, v := range n.values { + if max < v { + max = v + } + } + return max +} + +func getIntType(v uint64) (string, int) { + switch { + case v < 1<<8: + return "uint8", 1 + case v < 1<<16: + return "uint16", 2 + case v < 1<<32: + return "uint32", 4 + } + return "uint64", 8 +} + +const ( + blockSize = 64 + + // Subtract two blocks to offset 0x80, the first continuation byte. + blockOffset = 2 + + // Subtract three blocks to offset 0xC0, the first non-ASCII starter. + rootBlockOffset = 3 +) + +var crcTable = crc64.MakeTable(crc64.ISO) + +func (b *builder) buildTrie(t *Trie) uint64 { + n := t.root + + // Get the ASCII offset. For the first trie, the ASCII block will be at + // position 0. + hasher := crc64.New(crcTable) + binary.Write(hasher, binary.BigEndian, n.values) + hash := hasher.Sum64() + + v, ok := b.asciiBlockIdx[hash] + if !ok { + v = len(b.ValueBlocks) + b.asciiBlockIdx[hash] = v + + b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) + if v == 0 { + // Add the zero block at position 2 so that it will be assigned a + // zero reference in the lookup blocks. + // TODO: always do this? This would allow us to remove a check from + // the trie lookup, but at the expense of extra space. Analyze + // performance for unicode/norm. + b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) + } + } + t.ASCIIIndex = v + + // Compute remaining offsets. + t.Checksum = b.computeOffsets(n, true) + // We already subtracted the normal blockOffset from the index. Subtract the + // difference for starter bytes. + t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) + return t.Checksum +} + +func (b *builder) computeOffsets(n *node, root bool) uint64 { + // For the first trie, the root lookup block will be at position 3, which is + // the offset for UTF-8 non-ASCII starter bytes. + first := len(b.IndexBlocks) == rootBlockOffset + if first { + b.IndexBlocks = append(b.IndexBlocks, n) + } + + // We special-case the cases where all values recursively are 0. This allows + // for the use of a zero block to which all such values can be directed. + hash := uint64(0) + if n.children != nil || n.values != nil { + hasher := crc64.New(crcTable) + for _, c := range n.children { + var v uint64 + if c != nil { + v = b.computeOffsets(c, false) + } + binary.Write(hasher, binary.BigEndian, v) + } + binary.Write(hasher, binary.BigEndian, n.values) + hash = hasher.Sum64() + } + + if first { + b.indexBlockIdx[hash] = rootBlockOffset - blockOffset + } + + // Compacters don't apply to internal nodes. + if n.children != nil { + v, ok := b.indexBlockIdx[hash] + if !ok { + v = len(b.IndexBlocks) - blockOffset + b.IndexBlocks = append(b.IndexBlocks, n) + b.indexBlockIdx[hash] = v + } + n.index = nodeIndex{0, v} + } else { + h, ok := b.valueBlockIdx[hash] + if !ok { + bestI, bestSize := 0, blockSize*b.ValueSize + for i, c := range b.Compactions[1:] { + if sz, ok := c.c.Size(n.values); ok && bestSize > sz { + bestI, bestSize = i+1, sz + } + } + c := &b.Compactions[bestI] + c.totalSize += bestSize + v := c.c.Store(n.values) + if c.maxHandle < v { + c.maxHandle = v + } + h = nodeIndex{bestI, int(v)} + b.valueBlockIdx[hash] = h + } + n.index = h + } + return hash +} diff --git a/vendor/golang.org/x/text/internal/ucd/ucd.go b/vendor/golang.org/x/text/internal/ucd/ucd.go new file mode 100644 index 0000000..2b0d1a1 --- /dev/null +++ b/vendor/golang.org/x/text/internal/ucd/ucd.go @@ -0,0 +1,363 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ucd provides a parser for Unicode Character Database files, the +// format of which is defined in http://www.unicode.org/reports/tr44/. See +// http://www.unicode.org/Public/UCD/latest/ucd/ for example files. +// +// It currently does not support substitutions of missing fields. +package ucd // import "golang.org/x/text/internal/ucd" + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "log" + "regexp" + "strconv" + "strings" +) + +// UnicodeData.txt fields. +const ( + CodePoint = iota + Name + GeneralCategory + CanonicalCombiningClass + BidiClass + DecompMapping + DecimalValue + DigitValue + NumericValue + BidiMirrored + Unicode1Name + ISOComment + SimpleUppercaseMapping + SimpleLowercaseMapping + SimpleTitlecaseMapping +) + +// Parse calls f for each entry in the given reader of a UCD file. It will close +// the reader upon return. It will call log.Fatal if any error occurred. +// +// This implements the most common usage pattern of using Parser. +func Parse(r io.ReadCloser, f func(p *Parser)) { + defer r.Close() + + p := New(r) + for p.Next() { + f(p) + } + if err := p.Err(); err != nil { + r.Close() // os.Exit will cause defers not to be called. + log.Fatal(err) + } +} + +// An Option is used to configure a Parser. +type Option func(p *Parser) + +func keepRanges(p *Parser) { + p.keepRanges = true +} + +var ( + // KeepRanges prevents the expansion of ranges. The raw ranges can be + // obtained by calling Range(0) on the parser. + KeepRanges Option = keepRanges +) + +// The Part option register a handler for lines starting with a '@'. The text +// after a '@' is available as the first field. Comments are handled as usual. +func Part(f func(p *Parser)) Option { + return func(p *Parser) { + p.partHandler = f + } +} + +// A Parser parses Unicode Character Database (UCD) files. +type Parser struct { + scanner *bufio.Scanner + + keepRanges bool // Don't expand rune ranges in field 0. + + err error + comment []byte + field [][]byte + // parsedRange is needed in case Range(0) is called more than once for one + // field. In some cases this requires scanning ahead. + parsedRange bool + rangeStart, rangeEnd rune + + partHandler func(p *Parser) +} + +func (p *Parser) setError(err error) { + if p.err == nil { + p.err = err + } +} + +func (p *Parser) getField(i int) []byte { + if i >= len(p.field) { + p.setError(fmt.Errorf("ucd: index of field %d out of bounds", i)) + return nil + } + return p.field[i] +} + +// Err returns a non-nil error if any error occurred during parsing. +func (p *Parser) Err() error { + return p.err +} + +// New returns a Parser for the given Reader. +func New(r io.Reader, o ...Option) *Parser { + p := &Parser{ + scanner: bufio.NewScanner(r), + } + for _, f := range o { + f(p) + } + return p +} + +// Next parses the next line in the file. It returns true if a line was parsed +// and false if it reached the end of the file. +func (p *Parser) Next() bool { + if !p.keepRanges && p.rangeStart < p.rangeEnd { + p.rangeStart++ + return true + } + p.comment = nil + p.field = p.field[:0] + p.parsedRange = false + + for p.scanner.Scan() { + b := p.scanner.Bytes() + if len(b) == 0 || b[0] == '#' { + continue + } + + // Parse line + if i := bytes.IndexByte(b, '#'); i != -1 { + p.comment = bytes.TrimSpace(b[i+1:]) + b = b[:i] + } + if b[0] == '@' { + if p.partHandler != nil { + p.field = append(p.field, bytes.TrimSpace(b[1:])) + p.partHandler(p) + p.field = p.field[:0] + } + p.comment = nil + continue + } + for { + i := bytes.IndexByte(b, ';') + if i == -1 { + p.field = append(p.field, bytes.TrimSpace(b)) + break + } + p.field = append(p.field, bytes.TrimSpace(b[:i])) + b = b[i+1:] + } + if !p.keepRanges { + p.rangeStart, p.rangeEnd = p.getRange(0) + } + return true + } + p.setError(p.scanner.Err()) + return false +} + +func parseRune(b []byte) (rune, error) { + if len(b) > 2 && b[0] == 'U' && b[1] == '+' { + b = b[2:] + } + x, err := strconv.ParseUint(string(b), 16, 32) + return rune(x), err +} + +func (p *Parser) parseRune(b []byte) rune { + x, err := parseRune(b) + p.setError(err) + return x +} + +// Rune parses and returns field i as a rune. +func (p *Parser) Rune(i int) rune { + if i > 0 || p.keepRanges { + return p.parseRune(p.getField(i)) + } + return p.rangeStart +} + +// Runes interprets and returns field i as a sequence of runes. +func (p *Parser) Runes(i int) (runes []rune) { + add := func(b []byte) { + if b = bytes.TrimSpace(b); len(b) > 0 { + runes = append(runes, p.parseRune(b)) + } + } + for b := p.getField(i); ; { + i := bytes.IndexByte(b, ' ') + if i == -1 { + add(b) + break + } + add(b[:i]) + b = b[i+1:] + } + return +} + +var ( + errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") + + // reRange matches one line of a legacy rune range. + reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") +) + +// Range parses and returns field i as a rune range. A range is inclusive at +// both ends. If the field only has one rune, first and last will be identical. +// It supports the legacy format for ranges used in UnicodeData.txt. +func (p *Parser) Range(i int) (first, last rune) { + if !p.keepRanges { + return p.rangeStart, p.rangeStart + } + return p.getRange(i) +} + +func (p *Parser) getRange(i int) (first, last rune) { + b := p.getField(i) + if k := bytes.Index(b, []byte("..")); k != -1 { + return p.parseRune(b[:k]), p.parseRune(b[k+2:]) + } + // The first field may not be a rune, in which case we may ignore any error + // and set the range as 0..0. + x, err := parseRune(b) + if err != nil { + // Disable range parsing henceforth. This ensures that an error will be + // returned if the user subsequently will try to parse this field as + // a Rune. + p.keepRanges = true + } + // Special case for UnicodeData that was retained for backwards compatibility. + if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { + if p.parsedRange { + return p.rangeStart, p.rangeEnd + } + mf := reRange.FindStringSubmatch(p.scanner.Text()) + if mf == nil || !p.scanner.Scan() { + p.setError(errIncorrectLegacyRange) + return x, x + } + // Using Bytes would be more efficient here, but Text is a lot easier + // and this is not a frequent case. + ml := reRange.FindStringSubmatch(p.scanner.Text()) + if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { + p.setError(errIncorrectLegacyRange) + return x, x + } + p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) + p.parsedRange = true + return p.rangeStart, p.rangeEnd + } + return x, x +} + +// bools recognizes all valid UCD boolean values. +var bools = map[string]bool{ + "": false, + "N": false, + "No": false, + "F": false, + "False": false, + "Y": true, + "Yes": true, + "T": true, + "True": true, +} + +// Bool parses and returns field i as a boolean value. +func (p *Parser) Bool(i int) bool { + b := p.getField(i) + for s, v := range bools { + if bstrEq(b, s) { + return v + } + } + p.setError(strconv.ErrSyntax) + return false +} + +// Int parses and returns field i as an integer value. +func (p *Parser) Int(i int) int { + x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) + p.setError(err) + return int(x) +} + +// Uint parses and returns field i as an unsigned integer value. +func (p *Parser) Uint(i int) uint { + x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) + p.setError(err) + return uint(x) +} + +// Float parses and returns field i as a decimal value. +func (p *Parser) Float(i int) float64 { + x, err := strconv.ParseFloat(string(p.getField(i)), 64) + p.setError(err) + return x +} + +// String parses and returns field i as a string value. +func (p *Parser) String(i int) string { + return string(p.getField(i)) +} + +// Strings parses and returns field i as a space-separated list of strings. +func (p *Parser) Strings(i int) []string { + ss := strings.Split(string(p.getField(i)), " ") + for i, s := range ss { + ss[i] = strings.TrimSpace(s) + } + return ss +} + +// Comment returns the comments for the current line. +func (p *Parser) Comment() string { + return string(p.comment) +} + +var errUndefinedEnum = errors.New("ucd: undefined enum value") + +// Enum interprets and returns field i as a value that must be one of the values +// in enum. +func (p *Parser) Enum(i int, enum ...string) string { + b := p.getField(i) + for _, s := range enum { + if bstrEq(b, s) { + return s + } + } + p.setError(errUndefinedEnum) + return "" +} + +func bstrEq(b []byte, s string) bool { + if len(b) != len(s) { + return false + } + for i, c := range b { + if c != s[i] { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go new file mode 100644 index 0000000..0c2f730 --- /dev/null +++ b/vendor/golang.org/x/text/transform/transform.go @@ -0,0 +1,661 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transform provides reader and writer wrappers that transform the +// bytes passing through as well as various transformations. Example +// transformations provided by other packages include normalization and +// conversion between character sets. +package transform // import "golang.org/x/text/transform" + +import ( + "bytes" + "errors" + "io" + "unicode/utf8" +) + +var ( + // ErrShortDst means that the destination buffer was too short to + // receive all of the transformed bytes. + ErrShortDst = errors.New("transform: short destination buffer") + + // ErrShortSrc means that the source buffer has insufficient data to + // complete the transformation. + ErrShortSrc = errors.New("transform: short source buffer") + + // errInconsistentByteCount means that Transform returned success (nil + // error) but also returned nSrc inconsistent with the src argument. + errInconsistentByteCount = errors.New("transform: inconsistent byte count returned") + + // errShortInternal means that an internal buffer is not large enough + // to make progress and the Transform operation must be aborted. + errShortInternal = errors.New("transform: short internal buffer") +) + +// Transformer transforms bytes. +type Transformer interface { + // Transform writes to dst the transformed bytes read from src, and + // returns the number of dst bytes written and src bytes read. The + // atEOF argument tells whether src represents the last bytes of the + // input. + // + // Callers should always process the nDst bytes produced and account + // for the nSrc bytes consumed before considering the error err. + // + // A nil error means that all of the transformed bytes (whether freshly + // transformed from src or left over from previous Transform calls) + // were written to dst. A nil error can be returned regardless of + // whether atEOF is true. If err is nil then nSrc must equal len(src); + // the converse is not necessarily true. + // + // ErrShortDst means that dst was too short to receive all of the + // transformed bytes. ErrShortSrc means that src had insufficient data + // to complete the transformation. If both conditions apply, then + // either error may be returned. Other than the error conditions listed + // here, implementations are free to report other errors that arise. + Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) + + // Reset resets the state and allows a Transformer to be reused. + Reset() +} + +// NopResetter can be embedded by implementations of Transformer to add a nop +// Reset method. +type NopResetter struct{} + +// Reset implements the Reset method of the Transformer interface. +func (NopResetter) Reset() {} + +// Reader wraps another io.Reader by transforming the bytes read. +type Reader struct { + r io.Reader + t Transformer + err error + + // dst[dst0:dst1] contains bytes that have been transformed by t but + // not yet copied out via Read. + dst []byte + dst0, dst1 int + + // src[src0:src1] contains bytes that have been read from r but not + // yet transformed through t. + src []byte + src0, src1 int + + // transformComplete is whether the transformation is complete, + // regardless of whether or not it was successful. + transformComplete bool +} + +const defaultBufSize = 4096 + +// NewReader returns a new Reader that wraps r by transforming the bytes read +// via t. It calls Reset on t. +func NewReader(r io.Reader, t Transformer) *Reader { + t.Reset() + return &Reader{ + r: r, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Read implements the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + n, err := 0, error(nil) + for { + // Copy out any transformed bytes and return the final error if we are done. + if r.dst0 != r.dst1 { + n = copy(p, r.dst[r.dst0:r.dst1]) + r.dst0 += n + if r.dst0 == r.dst1 && r.transformComplete { + return n, r.err + } + return n, nil + } else if r.transformComplete { + return 0, r.err + } + + // Try to transform some source bytes, or to flush the transformer if we + // are out of source bytes. We do this even if r.r.Read returned an error. + // As the io.Reader documentation says, "process the n > 0 bytes returned + // before considering the error". + if r.src0 != r.src1 || r.err != nil { + r.dst0 = 0 + r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF) + r.src0 += n + + switch { + case err == nil: + if r.src0 != r.src1 { + r.err = errInconsistentByteCount + } + // The Transform call was successful; we are complete if we + // cannot read more bytes into src. + r.transformComplete = r.err != nil + continue + case err == ErrShortDst && (r.dst1 != 0 || n != 0): + // Make room in dst by copying out, and try again. + continue + case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil: + // Read more bytes into src via the code below, and try again. + default: + r.transformComplete = true + // The reader error (r.err) takes precedence over the + // transformer error (err) unless r.err is nil or io.EOF. + if r.err == nil || r.err == io.EOF { + r.err = err + } + continue + } + } + + // Move any untransformed source bytes to the start of the buffer + // and read more bytes. + if r.src0 != 0 { + r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1]) + } + n, r.err = r.r.Read(r.src[r.src1:]) + r.src1 += n + } +} + +// TODO: implement ReadByte (and ReadRune??). + +// Writer wraps another io.Writer by transforming the bytes read. +// The user needs to call Close to flush unwritten bytes that may +// be buffered. +type Writer struct { + w io.Writer + t Transformer + dst []byte + + // src[:n] contains bytes that have not yet passed through t. + src []byte + n int +} + +// NewWriter returns a new Writer that wraps w by transforming the bytes written +// via t. It calls Reset on t. +func NewWriter(w io.Writer, t Transformer) *Writer { + t.Reset() + return &Writer{ + w: w, + t: t, + dst: make([]byte, defaultBufSize), + src: make([]byte, defaultBufSize), + } +} + +// Write implements the io.Writer interface. If there are not enough +// bytes available to complete a Transform, the bytes will be buffered +// for the next write. Call Close to convert the remaining bytes. +func (w *Writer) Write(data []byte) (n int, err error) { + src := data + if w.n > 0 { + // Append bytes from data to the last remainder. + // TODO: limit the amount copied on first try. + n = copy(w.src[w.n:], data) + w.n += n + src = w.src[:w.n] + } + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, false) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return n, werr + } + src = src[nSrc:] + if w.n == 0 { + n += nSrc + } else if len(src) <= n { + // Enough bytes from w.src have been consumed. We make src point + // to data instead to reduce the copying. + w.n = 0 + n -= len(src) + src = data[n:] + if n < len(data) && (err == nil || err == ErrShortSrc) { + continue + } + } + switch err { + case ErrShortDst: + // This error is okay as long as we are making progress. + if nDst > 0 || nSrc > 0 { + continue + } + case ErrShortSrc: + if len(src) < len(w.src) { + m := copy(w.src, src) + // If w.n > 0, bytes from data were already copied to w.src and n + // was already set to the number of bytes consumed. + if w.n == 0 { + n += m + } + w.n = m + err = nil + } else if nDst > 0 || nSrc > 0 { + // Not enough buffer to store the remainder. Keep processing as + // long as there is progress. Without this case, transforms that + // require a lookahead larger than the buffer may result in an + // error. This is not something one may expect to be common in + // practice, but it may occur when buffers are set to small + // sizes during testing. + continue + } + case nil: + if w.n > 0 { + err = errInconsistentByteCount + } + } + return n, err + } +} + +// Close implements the io.Closer interface. +func (w *Writer) Close() error { + src := w.src[:w.n] + for { + nDst, nSrc, err := w.t.Transform(w.dst, src, true) + if _, werr := w.w.Write(w.dst[:nDst]); werr != nil { + return werr + } + if err != ErrShortDst { + return err + } + src = src[nSrc:] + } +} + +type nop struct{ NopResetter } + +func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := copy(dst, src) + if n < len(src) { + err = ErrShortDst + } + return n, n, err +} + +type discard struct{ NopResetter } + +func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return 0, len(src), nil +} + +var ( + // Discard is a Transformer for which all Transform calls succeed + // by consuming all bytes and writing nothing. + Discard Transformer = discard{} + + // Nop is a Transformer that copies src to dst. + Nop Transformer = nop{} +) + +// chain is a sequence of links. A chain with N Transformers has N+1 links and +// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst +// buffers given to chain.Transform and the middle N-1 buffers are intermediate +// buffers owned by the chain. The i'th link transforms bytes from the i'th +// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer +// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N). +type chain struct { + link []link + err error + // errStart is the index at which the error occurred plus 1. Processing + // errStart at this level at the next call to Transform. As long as + // errStart > 0, chain will not consume any more source bytes. + errStart int +} + +func (c *chain) fatalError(errIndex int, err error) { + if i := errIndex + 1; i > c.errStart { + c.errStart = i + c.err = err + } +} + +type link struct { + t Transformer + // b[p:n] holds the bytes to be transformed by t. + b []byte + p int + n int +} + +func (l *link) src() []byte { + return l.b[l.p:l.n] +} + +func (l *link) dst() []byte { + return l.b[l.n:] +} + +// Chain returns a Transformer that applies t in sequence. +func Chain(t ...Transformer) Transformer { + if len(t) == 0 { + return nop{} + } + c := &chain{link: make([]link, len(t)+1)} + for i, tt := range t { + c.link[i].t = tt + } + // Allocate intermediate buffers. + b := make([][defaultBufSize]byte, len(t)-1) + for i := range b { + c.link[i+1].b = b[i][:] + } + return c +} + +// Reset resets the state of Chain. It calls Reset on all the Transformers. +func (c *chain) Reset() { + for i, l := range c.link { + if l.t != nil { + l.t.Reset() + } + c.link[i].p, c.link[i].n = 0, 0 + } +} + +// Transform applies the transformers of c in sequence. +func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + // Set up src and dst in the chain. + srcL := &c.link[0] + dstL := &c.link[len(c.link)-1] + srcL.b, srcL.p, srcL.n = src, 0, len(src) + dstL.b, dstL.n = dst, 0 + var lastFull, needProgress bool // for detecting progress + + // i is the index of the next Transformer to apply, for i in [low, high]. + // low is the lowest index for which c.link[low] may still produce bytes. + // high is the highest index for which c.link[high] has a Transformer. + // The error returned by Transform determines whether to increase or + // decrease i. We try to completely fill a buffer before converting it. + for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; { + in, out := &c.link[i], &c.link[i+1] + nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i) + out.n += nDst + in.p += nSrc + if i > 0 && in.p == in.n { + in.p, in.n = 0, 0 + } + needProgress, lastFull = lastFull, false + switch err0 { + case ErrShortDst: + // Process the destination buffer next. Return if we are already + // at the high index. + if i == high { + return dstL.n, srcL.p, ErrShortDst + } + if out.n != 0 { + i++ + // If the Transformer at the next index is not able to process any + // source bytes there is nothing that can be done to make progress + // and the bytes will remain unprocessed. lastFull is used to + // detect this and break out of the loop with a fatal error. + lastFull = true + continue + } + // The destination buffer was too small, but is completely empty. + // Return a fatal error as this transformation can never complete. + c.fatalError(i, errShortInternal) + case ErrShortSrc: + if i == 0 { + // Save ErrShortSrc in err. All other errors take precedence. + err = ErrShortSrc + break + } + // Source bytes were depleted before filling up the destination buffer. + // Verify we made some progress, move the remaining bytes to the errStart + // and try to get more source bytes. + if needProgress && nSrc == 0 || in.n-in.p == len(in.b) { + // There were not enough source bytes to proceed while the source + // buffer cannot hold any more bytes. Return a fatal error as this + // transformation can never complete. + c.fatalError(i, errShortInternal) + break + } + // in.b is an internal buffer and we can make progress. + in.p, in.n = 0, copy(in.b, in.src()) + fallthrough + case nil: + // if i == low, we have depleted the bytes at index i or any lower levels. + // In that case we increase low and i. In all other cases we decrease i to + // fetch more bytes before proceeding to the next index. + if i > low { + i-- + continue + } + default: + c.fatalError(i, err0) + } + // Exhausted level low or fatal error: increase low and continue + // to process the bytes accepted so far. + i++ + low = i + } + + // If c.errStart > 0, this means we found a fatal error. We will clear + // all upstream buffers. At this point, no more progress can be made + // downstream, as Transform would have bailed while handling ErrShortDst. + if c.errStart > 0 { + for i := 1; i < c.errStart; i++ { + c.link[i].p, c.link[i].n = 0, 0 + } + err, c.errStart, c.err = c.err, 0, nil + } + return dstL.n, srcL.p, err +} + +// RemoveFunc returns a Transformer that removes from the input all runes r for +// which f(r) is true. Illegal bytes in the input are replaced by RuneError. +func RemoveFunc(f func(r rune) bool) Transformer { + return removeF(f) +} + +type removeF func(r rune) bool + +func (removeF) Reset() {} + +// Transform implements the Transformer interface. +func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] { + + if r = rune(src[0]); r < utf8.RuneSelf { + sz = 1 + } else { + r, sz = utf8.DecodeRune(src) + + if sz == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src) { + err = ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(r) { + if nDst+3 > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], "\uFFFD") + } + nSrc++ + continue + } + } + + if !t(r) { + if nDst+sz > len(dst) { + err = ErrShortDst + break + } + nDst += copy(dst[nDst:], src[:sz]) + } + nSrc += sz + } + return +} + +// grow returns a new []byte that is longer than b, and copies the first n bytes +// of b to the start of the new slice. +func grow(b []byte, n int) []byte { + m := len(b) + if m <= 32 { + m = 64 + } else if m <= 256 { + m *= 2 + } else { + m += m >> 1 + } + buf := make([]byte, m) + copy(buf, b[:n]) + return buf +} + +const initialBufSize = 128 + +// String returns a string with the result of converting s[:n] using t, where +// n <= len(s). If err == nil, n will be len(s). It calls Reset on t. +func String(t Transformer, s string) (result string, n int, err error) { + t.Reset() + if s == "" { + // Fast path for the common case for empty input. Results in about a + // 86% reduction of running time for BenchmarkStringLowerEmpty. + if _, _, err := t.Transform(nil, nil, true); err == nil { + return "", 0, nil + } + } + + // Allocate only once. Note that both dst and src escape when passed to + // Transform. + buf := [2 * initialBufSize]byte{} + dst := buf[:initialBufSize:initialBufSize] + src := buf[initialBufSize : 2*initialBufSize] + + // The input string s is transformed in multiple chunks (starting with a + // chunk size of initialBufSize). nDst and nSrc are per-chunk (or + // per-Transform-call) indexes, pDst and pSrc are overall indexes. + nDst, nSrc := 0, 0 + pDst, pSrc := 0, 0 + + // pPrefix is the length of a common prefix: the first pPrefix bytes of the + // result will equal the first pPrefix bytes of s. It is not guaranteed to + // be the largest such value, but if pPrefix, len(result) and len(s) are + // all equal after the final transform (i.e. calling Transform with atEOF + // being true returned nil error) then we don't need to allocate a new + // result string. + pPrefix := 0 + for { + // Invariant: pDst == pPrefix && pSrc == pPrefix. + + n := copy(src, s[pSrc:]) + nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // TODO: let transformers implement an optional Spanner interface, akin + // to norm's QuickSpan. This would even allow us to avoid any allocation. + if !bytes.Equal(dst[:nDst], src[:nSrc]) { + break + } + pPrefix = pSrc + if err == ErrShortDst { + // A buffer can only be short if a transformer modifies its input. + break + } else if err == ErrShortSrc { + if nSrc == 0 { + // No progress was made. + break + } + // Equal so far and !atEOF, so continue checking. + } else if err != nil || pPrefix == len(s) { + return string(s[:pPrefix]), pPrefix, err + } + } + // Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc. + + // We have transformed the first pSrc bytes of the input s to become pDst + // transformed bytes. Those transformed bytes are discontiguous: the first + // pPrefix of them equal s[:pPrefix] and the last nDst of them equal + // dst[:nDst]. We copy them around, into a new dst buffer if necessary, so + // that they become one contiguous slice: dst[:pDst]. + if pPrefix != 0 { + newDst := dst + if pDst > len(newDst) { + newDst = make([]byte, len(s)+nDst-nSrc) + } + copy(newDst[pPrefix:pDst], dst[:nDst]) + copy(newDst[:pPrefix], s[:pPrefix]) + dst = newDst + } + + // Prevent duplicate Transform calls with atEOF being true at the end of + // the input. Also return if we have an unrecoverable error. + if (err == nil && pSrc == len(s)) || + (err != nil && err != ErrShortDst && err != ErrShortSrc) { + return string(dst[:pDst]), pSrc, err + } + + // Transform the remaining input, growing dst and src buffers as necessary. + for { + n := copy(src, s[pSrc:]) + nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) + pDst += nDst + pSrc += nSrc + + // If we got ErrShortDst or ErrShortSrc, do not grow as long as we can + // make progress. This may avoid excessive allocations. + if err == ErrShortDst { + if nDst == 0 { + dst = grow(dst, pDst) + } + } else if err == ErrShortSrc { + if nSrc == 0 { + src = grow(src, 0) + } + } else if err != nil || pSrc == len(s) { + return string(dst[:pDst]), pSrc, err + } + } +} + +// Bytes returns a new byte slice with the result of converting b[:n] using t, +// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t. +func Bytes(t Transformer, b []byte) (result []byte, n int, err error) { + return doAppend(t, 0, make([]byte, len(b)), b) +} + +// Append appends the result of converting src[:n] using t to dst, where +// n <= len(src), If err == nil, n will be len(src). It calls Reset on t. +func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) { + if len(dst) == cap(dst) { + n := len(src) + len(dst) // It is okay for this to be 0. + b := make([]byte, n) + dst = b[:copy(b, dst)] + } + return doAppend(t, len(dst), dst[:cap(dst)], src) +} + +func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) { + t.Reset() + pSrc := 0 + for { + nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true) + pDst += nDst + pSrc += nSrc + if err != ErrShortDst { + return dst[:pDst], pSrc, err + } + + // Grow the destination buffer, but do not grow as long as we can make + // progress. This may avoid excessive allocations. + if nDst == 0 { + dst = grow(dst, pDst) + } + } +} diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go new file mode 100644 index 0000000..2182179 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/base.go @@ -0,0 +1,110 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cldr provides a parser for LDML and related XML formats. +// This package is inteded to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr + +import ( + "encoding/xml" + "regexp" + "strconv" +) + +// Elem is implemented by every XML element. +type Elem interface { + setEnclosing(Elem) + setName(string) + enclosing() Elem + + GetCommon() *Common +} + +type hidden struct { + CharData string `xml:",chardata"` + Alias *struct { + Common + Source string `xml:"source,attr"` + Path string `xml:"path,attr"` + } `xml:"alias"` + Def *struct { + Common + Choice string `xml:"choice,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + } `xml:"default"` +} + +// Common holds several of the most common attributes and sub elements +// of an XML element. +type Common struct { + XMLName xml.Name + name string + enclElem Elem + Type string `xml:"type,attr,omitempty"` + Reference string `xml:"reference,attr,omitempty"` + Alt string `xml:"alt,attr,omitempty"` + ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` + Draft string `xml:"draft,attr,omitempty"` + hidden +} + +// Default returns the default type to select from the enclosed list +// or "" if no default value is specified. +func (e *Common) Default() string { + if e.Def == nil { + return "" + } + if e.Def.Choice != "" { + return e.Def.Choice + } else if e.Def.Type != "" { + // Type is still used by the default element in collation. + return e.Def.Type + } + return "" +} + +// GetCommon returns e. It is provided such that Common implements Elem. +func (e *Common) GetCommon() *Common { + return e +} + +// Data returns the character data accumulated for this element. +func (e *Common) Data() string { + e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) + return e.CharData +} + +func (e *Common) setName(s string) { + e.name = s +} + +func (e *Common) enclosing() Elem { + return e.enclElem +} + +func (e *Common) setEnclosing(en Elem) { + e.enclElem = en +} + +// Escape characters that can be escaped without further escaping the string. +var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) + +// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. +// It assumes the input string is correctly formatted. +func replaceUnicode(s string) string { + if s[1] == '#' { + r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) + return string(r) + } + r, _, _, _ := strconv.UnquoteChar(s, 0) + return string(r) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go new file mode 100644 index 0000000..ea3fe13 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go @@ -0,0 +1,130 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run makexml.go -output xml.go + +// Package cldr provides a parser for LDML and related XML formats. +// This package is inteded to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr // import "golang.org/x/text/unicode/cldr" + +import ( + "fmt" + "sort" +) + +// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. +type CLDR struct { + parent map[string][]string + locale map[string]*LDML + resolved map[string]*LDML + bcp47 *LDMLBCP47 + supp *SupplementalData +} + +func makeCLDR() *CLDR { + return &CLDR{ + parent: make(map[string][]string), + locale: make(map[string]*LDML), + resolved: make(map[string]*LDML), + bcp47: &LDMLBCP47{}, + supp: &SupplementalData{}, + } +} + +// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. +func (cldr *CLDR) BCP47() *LDMLBCP47 { + return nil +} + +// Draft indicates the draft level of an element. +type Draft int + +const ( + Approved Draft = iota + Contributed + Provisional + Unconfirmed +) + +var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} + +// ParseDraft returns the Draft value corresponding to the given string. The +// empty string corresponds to Approved. +func ParseDraft(level string) (Draft, error) { + if level == "" { + return Approved, nil + } + for i, s := range drafts { + if level == s { + return Unconfirmed - Draft(i), nil + } + } + return Approved, fmt.Errorf("cldr: unknown draft level %q", level) +} + +func (d Draft) String() string { + return drafts[len(drafts)-1-int(d)] +} + +// SetDraftLevel sets which draft levels to include in the evaluated LDML. +// Any draft element for which the draft level is higher than lev will be excluded. +// If multiple draft levels are available for a single element, the one with the +// lowest draft level will be selected, unless preferDraft is true, in which case +// the highest draft will be chosen. +// It is assumed that the underlying LDML is canonicalized. +func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { + // TODO: implement + cldr.resolved = make(map[string]*LDML) +} + +// RawLDML returns the LDML XML for id in unresolved form. +// id must be one of the strings returned by Locales. +func (cldr *CLDR) RawLDML(loc string) *LDML { + return cldr.locale[loc] +} + +// LDML returns the fully resolved LDML XML for loc, which must be one of +// the strings returned by Locales. +func (cldr *CLDR) LDML(loc string) (*LDML, error) { + return cldr.resolve(loc) +} + +// Supplemental returns the parsed supplemental data. If no such data was parsed, +// nil is returned. +func (cldr *CLDR) Supplemental() *SupplementalData { + return cldr.supp +} + +// Locales returns the locales for which there exist files. +// Valid sublocales for which there is no file are not included. +// The root locale is always sorted first. +func (cldr *CLDR) Locales() []string { + loc := []string{"root"} + hasRoot := false + for l, _ := range cldr.locale { + if l == "root" { + hasRoot = true + continue + } + loc = append(loc, l) + } + sort.Strings(loc[1:]) + if !hasRoot { + return loc[1:] + } + return loc +} + +// Get fills in the fields of x based on the XPath path. +func Get(e Elem, path string) (res Elem, err error) { + return walkXPath(e, path) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go new file mode 100644 index 0000000..80ee28d --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/collate.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// RuleProcessor can be passed to Collator's Process method, which +// parses the rules and calls the respective method for each rule found. +type RuleProcessor interface { + Reset(anchor string, before int) error + Insert(level int, str, context, extend string) error + Index(id string) +} + +const ( + // cldrIndex is a Unicode-reserved sentinel value used to mark the start + // of a grouping within an index. + // We ignore any rule that starts with this rune. + // See http://unicode.org/reports/tr35/#Collation_Elements for details. + cldrIndex = "\uFDD0" + + // specialAnchor is the format in which to represent logical reset positions, + // such as "first tertiary ignorable". + specialAnchor = "<%s/>" +) + +// Process parses the rules for the tailorings of this collation +// and calls the respective methods of p for each rule found. +func (c Collation) Process(p RuleProcessor) (err error) { + if len(c.Cr) > 0 { + if len(c.Cr) > 1 { + return fmt.Errorf("multiple cr elements, want 0 or 1") + } + return processRules(p, c.Cr[0].Data()) + } + if c.Rules.Any != nil { + return c.processXML(p) + } + return errors.New("no tailoring data") +} + +// processRules parses rules in the Collation Rule Syntax defined in +// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. +func processRules(p RuleProcessor, s string) (err error) { + chk := func(s string, e error) string { + if err == nil { + err = e + } + return s + } + i := 0 // Save the line number for use after the loop. + scanner := bufio.NewScanner(strings.NewReader(s)) + for ; scanner.Scan() && err == nil; i++ { + for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { + level := 5 + var ch byte + switch ch, s = s[0], s[1:]; ch { + case '&': // followed by or '[' ']' + if s = skipSpace(s); consume(&s, '[') { + s = chk(parseSpecialAnchor(p, s)) + } else { + s = chk(parseAnchor(p, 0, s)) + } + case '<': // sort relation '<'{1,4}, optionally followed by '*'. + for level = 1; consume(&s, '<'); level++ { + } + if level > 4 { + err = fmt.Errorf("level %d > 4", level) + } + fallthrough + case '=': // identity relation, optionally followed by *. + if consume(&s, '*') { + s = chk(parseSequence(p, level, s)) + } else { + s = chk(parseOrder(p, level, s)) + } + default: + chk("", fmt.Errorf("illegal operator %q", ch)) + break + } + } + } + if chk("", scanner.Err()); err != nil { + return fmt.Errorf("%d: %v", i, err) + } + return nil +} + +// parseSpecialAnchor parses the anchor syntax which is either of the form +// ['before' ] +// or +// [