aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml3
-rw-r--r--README.md49
-rw-r--r--cmd/cashierd/main.go14
-rw-r--r--example-server.conf8
-rw-r--r--server/config/config.go70
-rw-r--r--server/helpers/vault/vault.go55
-rw-r--r--server/store/store.go4
-rw-r--r--server/util/util.go (renamed from server/certutil/util.go)2
-rw-r--r--server/util/util_test.go (renamed from server/certutil/util_test.go)2
-rw-r--r--server/wkfs/s3fs/s3.go (renamed from server/fs/s3.go)2
-rw-r--r--server/wkfs/vaultfs/vault.go91
-rw-r--r--vendor/github.com/fatih/structs/LICENSE21
-rw-r--r--vendor/github.com/fatih/structs/README.md163
-rw-r--r--vendor/github.com/fatih/structs/field.go141
-rw-r--r--vendor/github.com/fatih/structs/structs.go579
-rw-r--r--vendor/github.com/fatih/structs/tags.go32
-rw-r--r--vendor/github.com/hashicorp/go-cleanhttp/LICENSE363
-rw-r--r--vendor/github.com/hashicorp/go-cleanhttp/README.md30
-rw-r--r--vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go53
-rw-r--r--vendor/github.com/hashicorp/go-cleanhttp/doc.go20
-rw-r--r--vendor/github.com/hashicorp/go-rootcerts/LICENSE363
-rw-r--r--vendor/github.com/hashicorp/go-rootcerts/Makefile8
-rw-r--r--vendor/github.com/hashicorp/go-rootcerts/README.md43
-rw-r--r--vendor/github.com/hashicorp/go-rootcerts/doc.go9
-rw-r--r--vendor/github.com/hashicorp/go-rootcerts/rootcerts.go103
-rw-r--r--vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go12
-rw-r--r--vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go48
-rw-r--r--vendor/github.com/hashicorp/hcl/Makefile1
-rw-r--r--vendor/github.com/hashicorp/hcl/appveyor.yml3
-rw-r--r--vendor/github.com/hashicorp/hcl/decoder.go59
-rw-r--r--vendor/github.com/hashicorp/vault/LICENSE363
-rw-r--r--vendor/github.com/hashicorp/vault/api/SPEC.md611
-rw-r--r--vendor/github.com/hashicorp/vault/api/auth.go11
-rw-r--r--vendor/github.com/hashicorp/vault/api/auth_token.go223
-rw-r--r--vendor/github.com/hashicorp/vault/api/client.go416
-rw-r--r--vendor/github.com/hashicorp/vault/api/help.go25
-rw-r--r--vendor/github.com/hashicorp/vault/api/logical.go176
-rw-r--r--vendor/github.com/hashicorp/vault/api/request.go71
-rw-r--r--vendor/github.com/hashicorp/vault/api/response.go72
-rw-r--r--vendor/github.com/hashicorp/vault/api/secret.go68
-rw-r--r--vendor/github.com/hashicorp/vault/api/ssh.go38
-rw-r--r--vendor/github.com/hashicorp/vault/api/ssh_agent.go257
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys.go11
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_audit.go114
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_auth.go87
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_capabilities.go43
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_generate_root.go77
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_init.go54
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_leader.go20
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_lease.go48
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_mounts.go142
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_policy.go95
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_rekey.go202
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_rotate.go30
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_seal.go59
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_stepdown.go10
-rw-r--r--vendor/github.com/hashicorp/vault/helper/compressutil/compress.go159
-rw-r--r--vendor/github.com/hashicorp/vault/helper/jsonutil/json.go99
-rw-r--r--vendor/github.com/mitchellh/go-homedir/LICENSE21
-rw-r--r--vendor/github.com/mitchellh/go-homedir/README.md14
-rw-r--r--vendor/github.com/mitchellh/go-homedir/homedir.go137
-rw-r--r--vendor/github.com/sethgrid/pester/LICENSE.md21
-rw-r--r--vendor/github.com/sethgrid/pester/README.md126
-rw-r--r--vendor/github.com/sethgrid/pester/main.go423
-rw-r--r--vendor/vendor.json86
65 files changed, 6708 insertions, 52 deletions
diff --git a/.travis.yml b/.travis.yml
index 0567d0b..413fb89 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,10 +4,9 @@ services:
- mongodb
env:
- - GO15VENDOREXPERIMENT=1 MYSQL_TEST_CONFIG="mysql:user:passwd:localhost" MONGO_TEST_CONFIG="mongo:user:passwd:localhost"
+ - MYSQL_TEST_CONFIG="mysql:user:passwd:localhost" MONGO_TEST_CONFIG="mongo:user:passwd:localhost"
go:
- - 1.6.3
- 1.7
- tip
diff --git a/README.md b/README.md
index e7a5c79..8227449 100644
--- a/README.md
+++ b/README.md
@@ -15,6 +15,7 @@
- [Provider-specific options](#provider-specific-options)
- [ssh](#ssh)
- [aws](#aws)
+ - [vault](#vault)
- [Usage](#usage)
- [Using cashier](#using-cashier)
- [Configuring SSH](#configuring-ssh)
@@ -79,25 +80,34 @@ docker run -it --rm -p 10000:10000 --name cashier -v $(pwd):/cashier nsheridan/c
# Requirements
## Server
-Go 1.6 or later. May work with earlier versions but not tested.
+Go 1.7 or later. May work with earlier versions.
## Client
-OpenSSH 5.6 or newer.
-A working SSH agent.
-I have only tested this on Linux & OSX.
+- OpenSSH 5.6 or newer.
+- A working SSH agent.
+
+Note: I have only tested this on Linux & OSX.
# Configuration
Configuration is divided into different sections: `server`, `auth`, `ssh`, and `aws`.
+## A note on files:
+For any option that takes a file path as a parameter (e.g. SSH signing key, TLS key, TLS cert), the path can be one of:
+
+- A relative or absolute filesystem path e.g. `/data/ssh_signing_key`, `tls/server.key`.
+- An AWS S3 bucket + object path starting with `/s3/` e.g. `/s3/my-bucket/ssh_signing_key`. You should add an [aws](#aws) config as needed.
+- A Google GCS bucket + object path starting with `/gcs/` e.g. `/gcs/my-bucket/ssh_signing_key`.
+- A [Vault](https://www.vaultproject.io) path + key starting with `/vault/` e.g. `/vault/secret/cashier/ssh_signing_key`. You should add a [vault](#vault) config as needed.
+
## server
- `use_tls` : boolean. If this is set then `tls_key` and `tls_cert` are required.
-- `tls_key` : string. Path to the TLS key.
-- `tls_cert` : string. Path to the TLS cert.
+- `tls_key` : string. Path to the TLS key. See the [note](#a-note-on-files) on files above.
+- `tls_cert` : string. Path to the TLS cert. See the [note](#a-note-on-files) on files above.
- `address` : string. IP address to listen on. If unset the server listens on all addresses.
- `port` : int. Port to listen on.
- `user` : string. User to which the server drops privileges to.
-- `cookie_secret`: string. Authentication key for the session cookie.
-- `csrf_secret`: string. Authentication key for CSRF protection.
+- `cookie_secret`: string. Authentication key for the session cookie. This can be a secret stored in a [vault](https://www.vaultproject.io/) using the form `/vault/path/key` e.g. `/vault/secret/cashier/cookie_secret`.
+- `csrf_secret`: string. Authentication key for CSRF protection. This can be a secret stored in a [vault](https://www.vaultproject.io/) using the form `/vault/path/key` e.g. `/vault/secret/cashier/csrf_secret`.
- `http_logfile`: string. Path to the HTTP request log. Logs are written in the [Common Log Format](https://en.wikipedia.org/wiki/Common_Log_Format). If not set logs are written to stderr.
- `datastore`: string. Datastore connection string. See [Datastore](#datastore).
@@ -131,8 +141,8 @@ Note that dbinit has no support for replica sets.
## auth
- `provider` : string. Name of the oauth provider. Valid providers are currently "google" and "github".
-- `oauth_client_id` : string. Oauth Client ID.
-- `oauth_client_secret` : string. Oauth secret.
+- `oauth_client_id` : string. Oauth Client ID. This can be a secret stored in a [vault](https://www.vaultproject.io/) using the form `/vault/path/key` e.g. `/vault/secret/cashier/oauth_client_id`.
+- `oauth_client_secret` : string. Oauth secret. This can be a secret stored in a [vault](https://www.vaultproject.io/) using the form `/vault/path/key` e.g. `/vault/secret/cashier/oauth_client_secret`.
- `oauth_callback_url` : string. URL that the Oauth provider will redirect to after user authorisation. The path is hardcoded to `"/auth/callback"` in the source.
- `provider_opts` : object. Additional options for the provider.
- `users_whitelist` : array of strings. Optional list of whitelisted usernames. If missing, all users of your current domain/organization are allowed to authenticate against cashierd. For Google auth a user is an email address. For GitHub auth a user is a GitHub username.
@@ -153,27 +163,34 @@ auth {
}
```
+Supported options:
+
+
| Provider | Option | Notes |
|---------:|-------------:|----------------------------------------------------------------------------------------------------------------------------------------|
| Google | domain | If this is unset then you must whitelist individual email addresses using `users_whitelist`. |
| Github | organization | If this is unset then you must whitelist individual users using `users_whitelist`. The oauth client and secrets should be issued by the specified organization. |
-Supported options:
-
## ssh
-- `signing_key`: string. Path to the signing ssh private key you created earlier. This can be a S3 or GCS path using `/s3/<bucket>/<path/to/key>` or `/gcs/<bucket>/<path/to/key>` as appropriate. For S3 you should add an [aws](#aws) config as needed.
+- `signing_key`: string. Path to the signing ssh private key you created earlier. See the [note](#a-note-on-files) on files above.
- `additional_principals`: array of string. By default certificates will have one principal set - the username portion of the requester's email address. If `additional_principals` is set, these will be added to the certificate e.g. if your production machines use shared user accounts.
- `max_age`: string. If set the server will not issue certificates with an expiration value longer than this, regardless of what the client requests. Must be a valid Go [`time.Duration`](https://golang.org/pkg/time/#ParseDuration) string.
- `permissions`: array of string. Actions the certificate can perform. See the [`-O` option to `ssh-keygen(1)`](http://man.openbsd.org/OpenBSD-current/man1/ssh-keygen.1) for a complete list.
## aws
-AWS configuration is only needed for accessing signing keys stored on S3, and isn't required even then.
+AWS configuration is only needed for accessing signing keys stored on S3, and isn't totally necessary even then.
The S3 client can be configured using any of [the usual AWS-SDK means](https://github.com/aws/aws-sdk-go/wiki/configuring-sdk) - environment variables, IAM roles etc.
It's strongly recommended that signing keys stored on S3 be locked down to specific IAM roles and encrypted using KMS.
- `region`: string. AWS region the bucket resides in, e.g. `us-east-1`.
-- `access_key`: string. AWS Access Key ID.
-- `secret_key`: string. AWS Secret Key.
+- `access_key`: string. AWS Access Key ID. This can be a secret stored in a [vault](https://www.vaultproject.io/) using the form `/vault/path/key` e.g. `/vault/secret/cashier/aws_access_key`.
+- `secret_key`: string. AWS Secret Key. This can be a secret stored in a [vault](https://www.vaultproject.io/) using the form `/vault/path/key` e.g. `/vault/secret/cashier/aws_secret_key`.
+
+## vault
+Vault support is currently a work-in-progress.
+
+- `address`: string. URL to the vault server.
+- `token`: string. Auth token for the vault.
# Usage
Cashier comes in two parts, a [cli](cmd/cashier) and a [server](cmd/cashierd).
diff --git a/cmd/cashierd/main.go b/cmd/cashierd/main.go
index 563f4fd..de8b45f 100644
--- a/cmd/cashierd/main.go
+++ b/cmd/cashierd/main.go
@@ -28,13 +28,14 @@ import (
"github.com/nsheridan/cashier/server/auth"
"github.com/nsheridan/cashier/server/auth/github"
"github.com/nsheridan/cashier/server/auth/google"
- "github.com/nsheridan/cashier/server/certutil"
"github.com/nsheridan/cashier/server/config"
- "github.com/nsheridan/cashier/server/fs"
"github.com/nsheridan/cashier/server/signer"
"github.com/nsheridan/cashier/server/static"
"github.com/nsheridan/cashier/server/store"
"github.com/nsheridan/cashier/server/templates"
+ "github.com/nsheridan/cashier/server/util"
+ "github.com/nsheridan/cashier/server/wkfs/s3fs"
+ "github.com/nsheridan/cashier/server/wkfs/vaultfs"
"github.com/sid77/drop"
)
@@ -167,7 +168,7 @@ func signHandler(a *appContext, w http.ResponseWriter, r *http.Request) (int, er
}
json.NewEncoder(w).Encode(&lib.SignResponse{
Status: "ok",
- Response: certutil.GetPublicKey(cert),
+ Response: util.GetPublicKey(cert),
})
return http.StatusOK, nil
}
@@ -333,7 +334,10 @@ func main() {
log.Fatal(err)
}
- fs.Register(config.AWS)
+ // Register well-known filesystems.
+ s3fs.Register(config.AWS)
+ vaultfs.Register(config.Vault)
+
signer, err := signer.New(config.SSH)
if err != nil {
log.Fatal(err)
@@ -378,7 +382,7 @@ func main() {
case "github":
authprovider, err = github.New(config.Auth)
default:
- log.Fatalln("Unknown provider %s", config.Auth.Provider)
+ log.Fatalf("Unknown provider %s\n", config.Auth.Provider)
}
if err != nil {
log.Fatal(err)
diff --git a/example-server.conf b/example-server.conf
index fcb6558..9a20c9d 100644
--- a/example-server.conf
+++ b/example-server.conf
@@ -32,10 +32,16 @@ ssh {
permissions = ["permit-pty", "permit-X11-forwarding", "permit-agent-forwarding", "permit-port-forwarding", "permit-user-rc"] # Permissions associated with a certificate
}
-# Optional AWS config. if an aws config is present, the signing key can be read from S3 using the syntax `/s3/bucket/path/to/signing.key`.
+# Optional AWS config. if an aws config is present, then files (e.g. signing key or tls cert) can be read from S3 using the syntax `/s3/bucket/path/to/signing.key`.
# These can also be set configured using the standard aws-sdk environment variables, IAM roles etc. https://github.com/aws/aws-sdk-go/wiki/configuring-sdk
aws {
region = "eu-west-1"
access_key = "abcdef"
secret_key = "xyz123"
}
+
+# Optional Vault config. If a vault config is present then files (e.g. signing key or tls cert) can be read from a vault server using the syntax `/vault/secret/service/key_name`.
+vault {
+ address = "https://127.0.0.1:8200"
+ token = "83f01274-c6f0-4dae-aab9-13a6fc62772e"
+}
diff --git a/server/config/config.go b/server/config/config.go
index 3587e9f..9678f6d 100644
--- a/server/config/config.go
+++ b/server/config/config.go
@@ -7,6 +7,7 @@ import (
"strconv"
"github.com/hashicorp/go-multierror"
+ "github.com/nsheridan/cashier/server/helpers/vault"
"github.com/spf13/viper"
)
@@ -16,6 +17,7 @@ type Config struct {
Auth *Auth `mapstructure:"auth"`
SSH *SSH `mapstructure:"ssh"`
AWS *AWS `mapstructure:"aws"`
+ Vault *Vault `mapstructure:"vault"`
}
// unmarshalled holds the raw config.
@@ -24,6 +26,7 @@ type unmarshalled struct {
Auth []Auth `mapstructure:"auth"`
SSH []SSH `mapstructure:"ssh"`
AWS []AWS `mapstructure:"aws"`
+ Vault []Vault `mapstructure:"vault"`
}
// Server holds the configuration specific to the web server and sessions.
@@ -66,21 +69,31 @@ type AWS struct {
SecretKey string `mapstructure:"secret_key"`
}
+// Vault holds Hashicorp Vault configuration.
+type Vault struct {
+ Address string `mapstructure:"address"`
+ Token string `mapstructure:"token"`
+}
+
func verifyConfig(u *unmarshalled) error {
var err error
if len(u.SSH) == 0 {
- err = multierror.Append(errors.New("missing ssh config block"))
+ err = multierror.Append(errors.New("missing ssh config section"))
}
if len(u.Auth) == 0 {
- err = multierror.Append(errors.New("missing auth config block"))
+ err = multierror.Append(errors.New("missing auth config section"))
}
if len(u.Server) == 0 {
- err = multierror.Append(errors.New("missing server config block"))
+ err = multierror.Append(errors.New("missing server config section"))
}
if len(u.AWS) == 0 {
// AWS config is optional
u.AWS = append(u.AWS, AWS{})
}
+ if len(u.Vault) == 0 {
+ // Vault config is optional
+ u.Vault = append(u.Vault, Vault{})
+ }
return err
}
@@ -106,6 +119,53 @@ func setFromEnv(u *unmarshalled) {
}
}
+func setFromVault(u *unmarshalled) error {
+ if len(u.Vault) == 0 || u.Vault[0].Token == "" || u.Vault[0].Address == "" {
+ return nil
+ }
+ v, err := vault.NewClient(u.Vault[0].Address, u.Vault[0].Token)
+ if err != nil {
+ return err
+ }
+ get := func(value string) (string, error) {
+ if value[:7] == "/vault/" {
+ return v.Read(value)
+ }
+ return value, nil
+ }
+ if len(u.Auth) > 0 {
+ u.Auth[0].OauthClientID, err = get(u.Auth[0].OauthClientID)
+ if err != nil {
+ err = multierror.Append(err)
+ }
+ u.Auth[0].OauthClientSecret, err = get(u.Auth[0].OauthClientSecret)
+ if err != nil {
+ err = multierror.Append(err)
+ }
+ }
+ if len(u.Server) > 0 {
+ u.Server[0].CSRFSecret, err = get(u.Server[0].CSRFSecret)
+ if err != nil {
+ err = multierror.Append(err)
+ }
+ u.Server[0].CookieSecret, err = get(u.Server[0].CookieSecret)
+ if err != nil {
+ err = multierror.Append(err)
+ }
+ }
+ if len(u.AWS) > 0 {
+ u.AWS[0].AccessKey, err = get(u.AWS[0].AccessKey)
+ if err != nil {
+ err = multierror.Append(err)
+ }
+ u.AWS[0].SecretKey, err = get(u.AWS[0].SecretKey)
+ if err != nil {
+ err = multierror.Append(err)
+ }
+ }
+ return err
+}
+
// ReadConfig parses a JSON configuration file into a Config struct.
func ReadConfig(r io.Reader) (*Config, error) {
u := &unmarshalled{}
@@ -118,6 +178,9 @@ func ReadConfig(r io.Reader) (*Config, error) {
return nil, err
}
setFromEnv(u)
+ if err := setFromVault(u); err != nil {
+ return nil, err
+ }
if err := verifyConfig(u); err != nil {
return nil, err
}
@@ -126,5 +189,6 @@ func ReadConfig(r io.Reader) (*Config, error) {
Auth: &u.Auth[0],
SSH: &u.SSH[0],
AWS: &u.AWS[0],
+ Vault: &u.Vault[0],
}, nil
}
diff --git a/server/helpers/vault/vault.go b/server/helpers/vault/vault.go
new file mode 100644
index 0000000..bec18b9
--- /dev/null
+++ b/server/helpers/vault/vault.go
@@ -0,0 +1,55 @@
+package vault
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/vault/api"
+)
+
+// NewClient returns a new vault client.
+func NewClient(address, token string) (*Client, error) {
+ config := &api.Config{
+ Address: address,
+ }
+ client, err := api.NewClient(config)
+ if err != nil {
+ return nil, err
+ }
+ client.SetToken(token)
+ return &Client{
+ vault: client,
+ }, nil
+}
+
+func parseName(name string) (path, key string) {
+ name = strings.TrimPrefix(name, "/vault/")
+ i := strings.LastIndex(name, "/")
+ if i < 0 {
+ return name, ""
+ }
+ return name[:i], name[i+1:]
+}
+
+// Client is a simple client for vault.
+type Client struct {
+ vault *api.Client
+}
+
+// Read returns a secret for a given path and key of the form `/vault/secret/path/key`.
+// If the requested key cannot be read the original string is returned along with an error.
+func (c *Client) Read(value string) (string, error) {
+ p, k := parseName(value)
+ data, err := c.vault.Logical().Read(p)
+ if err != nil {
+ return value, err
+ }
+ if data == nil {
+ return value, fmt.Errorf("no such key %s", k)
+ }
+ secret, ok := data.Data[k]
+ if !ok {
+ return value, fmt.Errorf("no such key %s", k)
+ }
+ return secret.(string), nil
+}
diff --git a/server/store/store.go b/server/store/store.go
index a846bda..c039d3c 100644
--- a/server/store/store.go
+++ b/server/store/store.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/crypto/ssh"
- "github.com/nsheridan/cashier/server/certutil"
+ "github.com/nsheridan/cashier/server/util"
)
// CertStorer records issued certs in a persistent store for audit and
@@ -40,6 +40,6 @@ func parseCertificate(cert *ssh.Certificate) *CertRecord {
Principals: cert.ValidPrincipals,
CreatedAt: parseTime(cert.ValidAfter),
Expires: parseTime(cert.ValidBefore),
- Raw: certutil.GetPublicKey(cert),
+ Raw: util.GetPublicKey(cert),
}
}
diff --git a/server/certutil/util.go b/server/util/util.go
index eb1900b..10f5eca 100644
--- a/server/certutil/util.go
+++ b/server/util/util.go
@@ -1,4 +1,4 @@
-package certutil
+package util
import "golang.org/x/crypto/ssh"
diff --git a/server/certutil/util_test.go b/server/util/util_test.go
index df42b90..d294d86 100644
--- a/server/certutil/util_test.go
+++ b/server/util/util_test.go
@@ -1,4 +1,4 @@
-package certutil
+package util
import (
"testing"
diff --git a/server/fs/s3.go b/server/wkfs/s3fs/s3.go
index e16e7d6..a71d874 100644
--- a/server/fs/s3.go
+++ b/server/wkfs/s3fs/s3.go
@@ -1,4 +1,4 @@
-package fs
+package s3fs
import (
"bytes"
diff --git a/server/wkfs/vaultfs/vault.go b/server/wkfs/vaultfs/vault.go
new file mode 100644
index 0000000..6f11057
--- /dev/null
+++ b/server/wkfs/vaultfs/vault.go
@@ -0,0 +1,91 @@
+package vaultfs
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "path"
+ "time"
+
+ "github.com/nsheridan/cashier/server/config"
+ "github.com/nsheridan/cashier/server/helpers/vault"
+ "go4.org/wkfs"
+)
+
+// Register the /vault/ filesystem as a well-known filesystem.
+func Register(vc *config.Vault) {
+ client, err := vault.NewClient(vc.Address, vc.Token)
+ if err != nil {
+ registerBrokenFS(err)
+ return
+ }
+ wkfs.RegisterFS("/vault/", &vaultFS{
+ client: client,
+ })
+}
+
+func registerBrokenFS(err error) {
+ wkfs.RegisterFS("/vault/", &vaultFS{
+ err: err,
+ })
+}
+
+type vaultFS struct {
+ err error
+ client *vault.Client
+}
+
+// Open opens the named file for reading.
+func (fs *vaultFS) Open(name string) (wkfs.File, error) {
+ secret, err := fs.client.Read(name)
+ if err != nil {
+ return nil, err
+ }
+ return &file{
+ name: name,
+ Reader: bytes.NewReader([]byte(secret)),
+ }, nil
+}
+
+func (fs *vaultFS) Stat(name string) (os.FileInfo, error) { return fs.Lstat(name) }
+func (fs *vaultFS) Lstat(name string) (os.FileInfo, error) {
+ secret, err := fs.client.Read(name)
+ if err != nil {
+ return nil, err
+ }
+ return &statInfo{
+ name: path.Base(name),
+ size: int64(len(secret)),
+ }, nil
+}
+
+func (fs *vaultFS) MkdirAll(path string, perm os.FileMode) error { return nil }
+
+func (fs *vaultFS) OpenFile(name string, flag int, perm os.FileMode) (wkfs.FileWriter, error) {
+ return nil, errors.New("not implemented")
+}
+
+type statInfo struct {
+ name string
+ size int64
+ isDir bool
+ modtime time.Time
+}
+
+func (si *statInfo) IsDir() bool { return si.isDir }
+func (si *statInfo) ModTime() time.Time { return si.modtime }
+func (si *statInfo) Mode() os.FileMode { return 0644 }
+func (si *statInfo) Name() string { return path.Base(si.name) }
+func (si *statInfo) Size() int64 { return si.size }
+func (si *statInfo) Sys() interface{} { return nil }
+
+type file struct {
+ name string
+ *bytes.Reader
+}
+
+func (*file) Close() error { return nil }
+func (f *file) Name() string { return path.Base(f.name) }
+func (f *file) Stat() (os.FileInfo, error) {
+ return nil, errors.New("Stat not implemented on /vault/ files")
+}
diff --git a/vendor/github.com/fatih/structs/LICENSE b/vendor/github.com/fatih/structs/LICENSE
new file mode 100644
index 0000000..34504e4
--- /dev/null
+++ b/vendor/github.com/fatih/structs/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Fatih Arslan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE. \ No newline at end of file
diff --git a/vendor/github.com/fatih/structs/README.md b/vendor/github.com/fatih/structs/README.md
new file mode 100644
index 0000000..44e0100
--- /dev/null
+++ b/vendor/github.com/fatih/structs/README.md
@@ -0,0 +1,163 @@
+# Structs [![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/fatih/structs) [![Build Status](http://img.shields.io/travis/fatih/structs.svg?style=flat-square)](https://travis-ci.org/fatih/structs) [![Coverage Status](http://img.shields.io/coveralls/fatih/structs.svg?style=flat-square)](https://coveralls.io/r/fatih/structs)
+
+Structs contains various utilities to work with Go (Golang) structs. It was
+initially used by me to convert a struct into a `map[string]interface{}`. With
+time I've added other utilities for structs. It's basically a high level
+package based on primitives from the reflect package. Feel free to add new
+functions or improve the existing code.
+
+## Install
+
+```bash
+go get github.com/fatih/structs
+```
+
+## Usage and Examples
+
+Just like the standard lib `strings`, `bytes` and co packages, `structs` has
+many global functions to manipulate or organize your struct data. Lets define
+and declare a struct:
+
+```go
+type Server struct {
+ Name string `json:"name,omitempty"`
+ ID int
+ Enabled bool
+ users []string // not exported
+ http.Server // embedded
+}
+
+server := &Server{
+ Name: "gopher",
+ ID: 123456,
+ Enabled: true,
+}
+```
+
+```go
+// Convert a struct to a map[string]interface{}
+// => {"Name":"gopher", "ID":123456, "Enabled":true}
+m := structs.Map(server)
+
+// Convert the values of a struct to a []interface{}
+// => ["gopher", 123456, true]
+v := structs.Values(server)
+
+// Convert the names of a struct to a []string
+// (see "Names methods" for more info about fields)
+n := structs.Names(server)
+
+// Convert the values of a struct to a []*Field
+// (see "Field methods" for more info about fields)
+f := structs.Fields(server)
+
+// Return the struct name => "Server"
+n := structs.Name(server)
+
+// Check if any field of a struct is initialized or not.
+h := structs.HasZero(server)
+
+// Check if all fields of a struct is initialized or not.
+z := structs.IsZero(server)
+
+// Check if server is a struct or a pointer to struct
+i := structs.IsStruct(server)
+```
+
+### Struct methods
+
+The structs functions can be also used as independent methods by creating a new
+`*structs.Struct`. This is handy if you want to have more control over the
+structs (such as retrieving a single Field).
+
+```go
+// Create a new struct type:
+s := structs.New(server)
+
+m := s.Map() // Get a map[string]interface{}
+v := s.Values() // Get a []interface{}
+f := s.Fields() // Get a []*Field
+n := s.Names() // Get a []string
+f := s.Field(name) // Get a *Field based on the given field name
+f, ok := s.FieldOk(name) // Get a *Field based on the given field name
+n := s.Name() // Get the struct name
+h := s.HasZero() // Check if any field is initialized
+z := s.IsZero() // Check if all fields are initialized
+```
+
+### Field methods
+
+We can easily examine a single Field for more detail. Below you can see how we
+get and interact with various field methods:
+
+
+```go
+s := structs.New(server)
+
+// Get the Field struct for the "Name" field
+name := s.Field("Name")
+
+// Get the underlying value, value => "gopher"
+value := name.Value().(string)
+
+// Set the field's value
+name.Set("another gopher")
+
+// Get the field's kind, kind => "string"
+name.Kind()
+
+// Check if the field is exported or not
+if name.IsExported() {
+ fmt.Println("Name field is exported")
+}
+
+// Check if the value is a zero value, such as "" for string, 0 for int
+if !name.IsZero() {
+ fmt.Println("Name is initialized")
+}
+
+// Check if the field is an anonymous (embedded) field
+if !name.IsEmbedded() {
+ fmt.Println("Name is not an embedded field")
+}
+
+// Get the Field's tag value for tag name "json", tag value => "name,omitempty"
+tagValue := name.Tag("json")
+```
+
+Nested structs are supported too:
+
+```go
+addrField := s.Field("Server").Field("Addr")
+
+// Get the value for addr
+a := addrField.Value().(string)
+
+// Or get all fields
+httpServer := s.Field("Server").Fields()
+```
+
+We can also get a slice of Fields from the Struct type to iterate over all
+fields. This is handy if you wish to examine all fields:
+
+```go
+s := structs.New(server)
+
+for _, f := range s.Fields() {
+ fmt.Printf("field name: %+v\n", f.Name())
+
+ if f.IsExported() {
+ fmt.Printf("value : %+v\n", f.Value())
+ fmt.Printf("is zero : %+v\n", f.IsZero())
+ }
+}
+```
+
+## Credits
+
+ * [Fatih Arslan](https://github.com/fatih)
+ * [Cihangir Savas](https://github.com/cihangir)
+
+## License
+
+The MIT License (MIT) - see LICENSE.md for more details
diff --git a/vendor/github.com/fatih/structs/field.go b/vendor/github.com/fatih/structs/field.go
new file mode 100644
index 0000000..e697832
--- /dev/null
+++ b/vendor/github.com/fatih/structs/field.go
@@ -0,0 +1,141 @@
+package structs
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+var (
+ errNotExported = errors.New("field is not exported")
+ errNotSettable = errors.New("field is not settable")
+)
+
+// Field represents a single struct field that encapsulates high level
+// functions around the field.
+type Field struct {
+ value reflect.Value
+ field reflect.StructField
+ defaultTag string
+}
+
+// Tag returns the value associated with key in the tag string. If there is no
+// such key in the tag, Tag returns the empty string.
+func (f *Field) Tag(key string) string {
+ return f.field.Tag.Get(key)
+}
+
+// Value returns the underlying value of the field. It panics if the field
+// is not exported.
+func (f *Field) Value() interface{} {
+ return f.value.Interface()
+}
+
+// IsEmbedded returns true if the given field is an anonymous field (embedded)
+func (f *Field) IsEmbedded() bool {
+ return f.field.Anonymous
+}
+
+// IsExported returns true if the given field is exported.
+func (f *Field) IsExported() bool {
+ return f.field.PkgPath == ""
+}
+
+// IsZero returns true if the given field is not initialized (has a zero value).
+// It panics if the field is not exported.
+func (f *Field) IsZero() bool {
+ zero := reflect.Zero(f.value.Type()).Interface()
+ current := f.Value()
+
+ return reflect.DeepEqual(current, zero)
+}
+
+// Name returns the name of the given field
+func (f *Field) Name() string {
+ return f.field.Name
+}
+
+// Kind returns the fields kind, such as "string", "map", "bool", etc ..
+func (f *Field) Kind() reflect.Kind {
+ return f.value.Kind()
+}
+
+// Set sets the field to given value v. It returns an error if the field is not
+// settable (not addressable or not exported) or if the given value's type
+// doesn't match the fields type.
+func (f *Field) Set(val interface{}) error {
+ // we can't set unexported fields, so be sure this field is exported
+ if !f.IsExported() {
+ return errNotExported
+ }
+
+ // do we get here? not sure...
+ if !f.value.CanSet() {
+ return errNotSettable
+ }
+
+ given := reflect.ValueOf(val)
+
+ if f.value.Kind() != given.Kind() {
+ return fmt.Errorf("wrong kind. got: %s want: %s", given.Kind(), f.value.Kind())
+ }
+
+ f.value.Set(given)
+ return nil
+}
+
+// Zero sets the field to its zero value. It returns an error if the field is not
+// settable (not addressable or not exported).
+func (f *Field) Zero() error {
+ zero := reflect.Zero(f.value.Type()).Interface()
+ return f.Set(zero)
+}
+
+// Fields returns a slice of Fields. This is particular handy to get the fields
+// of a nested struct . A struct tag with the content of "-" ignores the
+// checking of that particular field. Example:
+//
+// // Field is ignored by this package.
+// Field *http.Request `structs:"-"`
+//
+// It panics if field is not exported or if field's kind is not struct
+func (f *Field) Fields() []*Field {
+ return getFields(f.value, f.defaultTag)
+}
+
+// Field returns the field from a nested struct. It panics if the nested struct
+// is not exported or if the field was not found.
+func (f *Field) Field(name string) *Field {
+ field, ok := f.FieldOk(name)
+ if !ok {
+ panic("field not found")
+ }
+
+ return field
+}
+
+// FieldOk returns the field from a nested struct. The boolean returns whether
+// the field was found (true) or not (false).
+func (f *Field) FieldOk(name string) (*Field, bool) {
+ value := &f.value
+ // value must be settable so we need to make sure it holds the address of the
+ // variable and not a copy, so we can pass the pointer to strctVal instead of a
+ // copy (which is not assigned to any variable, hence not settable).
+ // see "https://blog.golang.org/laws-of-reflection#TOC_8."
+ if f.value.Kind() != reflect.Ptr {
+ a := f.value.Addr()
+ value = &a
+ }
+ v := strctVal(value.Interface())
+ t := v.Type()
+
+ field, ok := t.FieldByName(name)
+ if !ok {
+ return nil, false
+ }
+
+ return &Field{
+ field: field,
+ value: v.FieldByName(name),
+ }, true
+}
diff --git a/vendor/github.com/fatih/structs/structs.go b/vendor/github.com/fatih/structs/structs.go
new file mode 100644
index 0000000..06da620
--- /dev/null
+++ b/vendor/github.com/fatih/structs/structs.go
@@ -0,0 +1,579 @@
+// Package structs contains various utilities functions to work with structs.
+package structs
+
+import (
+ "fmt"
+
+ "reflect"
+)
+
+var (
+ // DefaultTagName is the default tag name for struct fields which provides
+ // a more granular to tweak certain structs. Lookup the necessary functions
+ // for more info.
+ DefaultTagName = "structs" // struct's field default tag name
+)
+
+// Struct encapsulates a struct type to provide several high level functions
+// around the struct.
+type Struct struct {
+ raw interface{}
+ value reflect.Value
+ TagName string
+}
+
+// New returns a new *Struct with the struct s. It panics if the s's kind is
+// not struct.
+func New(s interface{}) *Struct {
+ return &Struct{
+ raw: s,
+ value: strctVal(s),
+ TagName: DefaultTagName,
+ }
+}
+
+// Map converts the given struct to a map[string]interface{}, where the keys
+// of the map are the field names and the values of the map the associated
+// values of the fields. The default key string is the struct field name but
+// can be changed in the struct field's tag value. The "structs" key in the
+// struct's field tag value is the key name. Example:
+//
+// // Field appears in map as key "myName".
+// Name string `structs:"myName"`
+//
+// A tag value with the content of "-" ignores that particular field. Example:
+//
+// // Field is ignored by this package.
+// Field bool `structs:"-"`
+//
+// A tag value with the content of "string" uses the stringer to get the value. Example:
+//
+// // The value will be output of Animal's String() func.
+// // Map will panic if Animal does not implement String().
+// Field *Animal `structs:"field,string"`
+//
+// A tag value with the option of "flatten" used in a struct field is to flatten its fields
+// in the output map. Example:
+//
+// // The FieldStruct's fields will be flattened into the output map.
+// FieldStruct time.Time `structs:",flatten"`
+//
+// A tag value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+// // Field is not processed further by this package.
+// Field time.Time `structs:"myName,omitnested"`
+// Field *http.Request `structs:",omitnested"`
+//
+// A tag value with the option of "omitempty" ignores that particular field if
+// the field value is empty. Example:
+//
+// // Field appears in map as key "myName", but the field is
+// // skipped if empty.
+// Field string `structs:"myName,omitempty"`
+//
+// // Field appears in map as key "Field" (the default), but
+// // the field is skipped if empty.
+// Field string `structs:",omitempty"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields will be neglected.
+func (s *Struct) Map() map[string]interface{} {
+ out := make(map[string]interface{})
+ s.FillMap(out)
+ return out
+}
+
+// FillMap is the same as Map. Instead of returning the output, it fills the
+// given map.
+func (s *Struct) FillMap(out map[string]interface{}) {
+ if out == nil {
+ return
+ }
+
+ fields := s.structFields()
+
+ for _, field := range fields {
+ name := field.Name
+ val := s.value.FieldByName(name)
+ isSubStruct := false
+ var finalVal interface{}
+
+ tagName, tagOpts := parseTag(field.Tag.Get(s.TagName))
+ if tagName != "" {
+ name = tagName
+ }
+
+ // if the value is a zero value and the field is marked as omitempty do
+ // not include
+ if tagOpts.Has("omitempty") {
+ zero := reflect.Zero(val.Type()).Interface()
+ current := val.Interface()
+
+ if reflect.DeepEqual(current, zero) {
+ continue
+ }
+ }
+
+ if !tagOpts.Has("omitnested") {
+ finalVal = s.nested(val)
+
+ v := reflect.ValueOf(val.Interface())
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Map, reflect.Struct:
+ isSubStruct = true
+ }
+ } else {
+ finalVal = val.Interface()
+ }
+
+ if tagOpts.Has("string") {
+ s, ok := val.Interface().(fmt.Stringer)
+ if ok {
+ out[name] = s.String()
+ }
+ continue
+ }
+
+ if isSubStruct && (tagOpts.Has("flatten")) {
+ for k := range finalVal.(map[string]interface{}) {
+ out[k] = finalVal.(map[string]interface{})[k]
+ }
+ } else {
+ out[name] = finalVal
+ }
+ }
+}
+
+// Values converts the given s struct's field values to a []interface{}. A
+// struct tag with the content of "-" ignores the that particular field.
+// Example:
+//
+// // Field is ignored by this package.
+// Field int `structs:"-"`
+//
+// A value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+// // Fields is not processed further by this package.
+// Field time.Time `structs:",omitnested"`
+// Field *http.Request `structs:",omitnested"`
+//
+// A tag value with the option of "omitempty" ignores that particular field and
+// is not added to the values if the field value is empty. Example:
+//
+// // Field is skipped if empty
+// Field string `structs:",omitempty"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields will be neglected.
+func (s *Struct) Values() []interface{} {
+ fields := s.structFields()
+
+ var t []interface{}
+
+ for _, field := range fields {
+ val := s.value.FieldByName(field.Name)
+
+ _, tagOpts := parseTag(field.Tag.Get(s.TagName))
+
+ // if the value is a zero value and the field is marked as omitempty do
+ // not include
+ if tagOpts.Has("omitempty") {
+ zero := reflect.Zero(val.Type()).Interface()
+ current := val.Interface()
+
+ if reflect.DeepEqual(current, zero) {
+ continue
+ }
+ }
+
+ if tagOpts.Has("string") {
+ s, ok := val.Interface().(fmt.Stringer)
+ if ok {
+ t = append(t, s.String())
+ }
+ continue
+ }
+
+ if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
+ // look out for embedded structs, and convert them to a
+ // []interface{} to be added to the final values slice
+ for _, embeddedVal := range Values(val.Interface()) {
+ t = append(t, embeddedVal)
+ }
+ } else {
+ t = append(t, val.Interface())
+ }
+ }
+
+ return t
+}
+
+// Fields returns a slice of Fields. A struct tag with the content of "-"
+// ignores the checking of that particular field. Example:
+//
+// // Field is ignored by this package.
+// Field bool `structs:"-"`
+//
+// It panics if s's kind is not struct.
+func (s *Struct) Fields() []*Field {
+ return getFields(s.value, s.TagName)
+}
+
+// Names returns a slice of field names. A struct tag with the content of "-"
+// ignores the checking of that particular field. Example:
+//
+// // Field is ignored by this package.
+// Field bool `structs:"-"`
+//
+// It panics if s's kind is not struct.
+func (s *Struct) Names() []string {
+ fields := getFields(s.value, s.TagName)
+
+ names := make([]string, len(fields))
+
+ for i, field := range fields {
+ names[i] = field.Name()
+ }
+
+ return names
+}
+
+func getFields(v reflect.Value, tagName string) []*Field {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ t := v.Type()
+
+ var fields []*Field
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+
+ if tag := field.Tag.Get(tagName); tag == "-" {
+ continue
+ }
+
+ f := &Field{
+ field: field,
+ value: v.FieldByName(field.Name),
+ }
+
+ fields = append(fields, f)
+
+ }
+
+ return fields
+}
+
+// Field returns a new Field struct that provides several high level functions
+// around a single struct field entity. It panics if the field is not found.
+func (s *Struct) Field(name string) *Field {
+ f, ok := s.FieldOk(name)
+ if !ok {
+ panic("field not found")
+ }
+
+ return f
+}
+
+// FieldOk returns a new Field struct that provides several high level functions
+// around a single struct field entity. The boolean returns true if the field
+// was found.
+func (s *Struct) FieldOk(name string) (*Field, bool) {
+ t := s.value.Type()
+
+ field, ok := t.FieldByName(name)
+ if !ok {
+ return nil, false
+ }
+
+ return &Field{
+ field: field,
+ value: s.value.FieldByName(name),
+ defaultTag: s.TagName,
+ }, true
+}
+
+// IsZero returns true if all fields in a struct is a zero value (not
+// initialized) A struct tag with the content of "-" ignores the checking of
+// that particular field. Example:
+//
+// // Field is ignored by this package.
+// Field bool `structs:"-"`
+//
+// A value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+// // Field is not processed further by this package.
+// Field time.Time `structs:"myName,omitnested"`
+// Field *http.Request `structs:",omitnested"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields will be neglected. It panics if s's kind is not struct.
+func (s *Struct) IsZero() bool {
+ fields := s.structFields()
+
+ for _, field := range fields {
+ val := s.value.FieldByName(field.Name)
+
+ _, tagOpts := parseTag(field.Tag.Get(s.TagName))
+
+ if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
+ ok := IsZero(val.Interface())
+ if !ok {
+ return false
+ }
+
+ continue
+ }
+
+ // zero value of the given field, such as "" for string, 0 for int
+ zero := reflect.Zero(val.Type()).Interface()
+
+ // current value of the given field
+ current := val.Interface()
+
+ if !reflect.DeepEqual(current, zero) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// HasZero returns true if a field in a struct is not initialized (zero value).
+// A struct tag with the content of "-" ignores the checking of that particular
+// field. Example:
+//
+// // Field is ignored by this package.
+// Field bool `structs:"-"`
+//
+// A value with the option of "omitnested" stops iterating further if the type
+// is a struct. Example:
+//
+// // Field is not processed further by this package.
+// Field time.Time `structs:"myName,omitnested"`
+// Field *http.Request `structs:",omitnested"`
+//
+// Note that only exported fields of a struct can be accessed, non exported
+// fields will be neglected. It panics if s's kind is not struct.
+func (s *Struct) HasZero() bool {
+ fields := s.structFields()
+
+ for _, field := range fields {
+ val := s.value.FieldByName(field.Name)
+
+ _, tagOpts := parseTag(field.Tag.Get(s.TagName))
+
+ if IsStruct(val.Interface()) && !tagOpts.Has("omitnested") {
+ ok := HasZero(val.Interface())
+ if ok {
+ return true
+ }
+
+ continue
+ }
+
+ // zero value of the given field, such as "" for string, 0 for int
+ zero := reflect.Zero(val.Type()).Interface()
+
+ // current value of the given field
+ current := val.Interface()
+
+ if reflect.DeepEqual(current, zero) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Name returns the structs's type name within its package. For more info refer
+// to Name() function.
+func (s *Struct) Name() string {
+ return s.value.Type().Name()
+}
+
+// structFields returns the exported struct fields for a given s struct. This
+// is a convenient helper method to avoid duplicate code in some of the
+// functions.
+func (s *Struct) structFields() []reflect.StructField {
+ t := s.value.Type()
+
+ var f []reflect.StructField
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ // we can't access the value of unexported fields
+ if field.PkgPath != "" {
+ continue
+ }
+
+ // don't check if it's omitted
+ if tag := field.Tag.Get(s.TagName); tag == "-" {
+ continue
+ }
+
+ f = append(f, field)
+ }
+
+ return f
+}
+
+func strctVal(s interface{}) reflect.Value {
+ v := reflect.ValueOf(s)
+
+ // if pointer get the underlying element≤
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ if v.Kind() != reflect.Struct {
+ panic("not struct")
+ }
+
+ return v
+}
+
+// Map converts the given struct to a map[string]interface{}. For more info
+// refer to Struct types Map() method. It panics if s's kind is not struct.
+func Map(s interface{}) map[string]interface{} {
+ return New(s).Map()
+}
+
+// FillMap is the same as Map. Instead of returning the output, it fills the
+// given map.
+func FillMap(s interface{}, out map[string]interface{}) {
+ New(s).FillMap(out)
+}
+
+// Values converts the given struct to a []interface{}. For more info refer to
+// Struct types Values() method. It panics if s's kind is not struct.
+func Values(s interface{}) []interface{} {
+ return New(s).Values()
+}
+
+// Fields returns a slice of *Field. For more info refer to Struct types
+// Fields() method. It panics if s's kind is not struct.
+func Fields(s interface{}) []*Field {
+ return New(s).Fields()
+}
+
+// Names returns a slice of field names. For more info refer to Struct types
+// Names() method. It panics if s's kind is not struct.
+func Names(s interface{}) []string {
+ return New(s).Names()
+}
+
+// IsZero returns true if all fields is equal to a zero value. For more info
+// refer to Struct types IsZero() method. It panics if s's kind is not struct.
+func IsZero(s interface{}) bool {
+ return New(s).IsZero()
+}
+
+// HasZero returns true if any field is equal to a zero value. For more info
+// refer to Struct types HasZero() method. It panics if s's kind is not struct.
+func HasZero(s interface{}) bool {
+ return New(s).HasZero()
+}
+
+// IsStruct returns true if the given variable is a struct or a pointer to
+// struct.
+func IsStruct(s interface{}) bool {
+ v := reflect.ValueOf(s)
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ // uninitialized zero value of a struct
+ if v.Kind() == reflect.Invalid {
+ return false
+ }
+
+ return v.Kind() == reflect.Struct
+}
+
+// Name returns the structs's type name within its package. It returns an
+// empty string for unnamed types. It panics if s's kind is not struct.
+func Name(s interface{}) string {
+ return New(s).Name()
+}
+
+// nested retrieves recursively all types for the given value and returns the
+// nested value.
+func (s *Struct) nested(val reflect.Value) interface{} {
+ var finalVal interface{}
+
+ v := reflect.ValueOf(val.Interface())
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ n := New(val.Interface())
+ n.TagName = s.TagName
+ m := n.Map()
+
+ // do not add the converted value if there are no exported fields, ie:
+ // time.Time
+ if len(m) == 0 {
+ finalVal = val.Interface()
+ } else {
+ finalVal = m
+ }
+ case reflect.Map:
+ v := val.Type().Elem()
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ // only iterate over struct types, ie: map[string]StructType,
+ // map[string][]StructType,
+ if v.Kind() == reflect.Struct ||
+ (v.Kind() == reflect.Slice && v.Elem().Kind() == reflect.Struct) {
+ m := make(map[string]interface{}, val.Len())
+ for _, k := range val.MapKeys() {
+ m[k.String()] = s.nested(val.MapIndex(k))
+ }
+ finalVal = m
+ break
+ }
+
+ // TODO(arslan): should this be optional?
+ finalVal = val.Interface()
+ case reflect.Slice, reflect.Array:
+ if val.Type().Kind() == reflect.Interface {
+ finalVal = val.Interface()
+ break
+ }
+
+ // TODO(arslan): should this be optional?
+ // do not iterate of non struct types, just pass the value. Ie: []int,
+ // []string, co... We only iterate further if it's a struct.
+ // i.e []foo or []*foo
+ if val.Type().Elem().Kind() != reflect.Struct &&
+ !(val.Type().Elem().Kind() == reflect.Ptr &&
+ val.Type().Elem().Elem().Kind() == reflect.Struct) {
+ finalVal = val.Interface()
+ break
+ }
+
+ slices := make([]interface{}, val.Len(), val.Len())
+ for x := 0; x < val.Len(); x++ {
+ slices[x] = s.nested(val.Index(x))
+ }
+ finalVal = slices
+ default:
+ finalVal = val.Interface()
+ }
+
+ return finalVal
+}
diff --git a/vendor/github.com/fatih/structs/tags.go b/vendor/github.com/fatih/structs/tags.go
new file mode 100644
index 0000000..8859341
--- /dev/null
+++ b/vendor/github.com/fatih/structs/tags.go
@@ -0,0 +1,32 @@
+package structs
+
+import "strings"
+
+// tagOptions contains a slice of tag options
+type tagOptions []string
+
+// Has returns true if the given optiton is available in tagOptions
+func (t tagOptions) Has(opt string) bool {
+ for _, tagOpt := range t {
+ if tagOpt == opt {
+ return true
+ }
+ }
+
+ return false
+}
+
+// parseTag splits a struct field's tag into its name and a list of options
+// which comes after a name. A tag is in the form of: "name,option1,option2".
+// The name can be neglectected.
+func parseTag(tag string) (string, tagOptions) {
+ // tag is one of followings:
+ // ""
+ // "name"
+ // "name,opt"
+ // "name,opt,opt2"
+ // ",opt"
+
+ res := strings.Split(tag, ",")
+ return res[0], res[1:]
+}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md
new file mode 100644
index 0000000..036e531
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md
@@ -0,0 +1,30 @@
+# cleanhttp
+
+Functions for accessing "clean" Go http.Client values
+
+-------------
+
+The Go standard library contains a default `http.Client` called
+`http.DefaultClient`. It is a common idiom in Go code to start with
+`http.DefaultClient` and tweak it as necessary, and in fact, this is
+encouraged; from the `http` package documentation:
+
+> The Client's Transport typically has internal state (cached TCP connections),
+so Clients should be reused instead of created as needed. Clients are safe for
+concurrent use by multiple goroutines.
+
+Unfortunately, this is a shared value, and it is not uncommon for libraries to
+assume that they are free to modify it at will. With enough dependencies, it
+can be very easy to encounter strange problems and race conditions due to
+manipulation of this shared value across libraries and goroutines (clients are
+safe for concurrent use, but writing values to the client struct itself is not
+protected).
+
+Making things worse is the fact that a bare `http.Client` will use a default
+`http.Transport` called `http.DefaultTransport`, which is another global value
+that behaves the same way. So it is not simply enough to replace
+`http.DefaultClient` with `&http.Client{}`.
+
+This repository provides some simple functions to get a "clean" `http.Client`
+-- one that uses the same default values as the Go standard library, but
+returns a client that does not share any state with other clients.
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
new file mode 100644
index 0000000..f4596d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
@@ -0,0 +1,53 @@
+package cleanhttp
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// DefaultTransport returns a new http.Transport with the same default values
+// as http.DefaultTransport, but with idle connections and keepalives disabled.
+func DefaultTransport() *http.Transport {
+ transport := DefaultPooledTransport()
+ transport.DisableKeepAlives = true
+ transport.MaxIdleConnsPerHost = -1
+ return transport
+}
+
+// DefaultPooledTransport returns a new http.Transport with similar default
+// values to http.DefaultTransport. Do not use this for transient transports as
+// it can leak file descriptors over time. Only use this for transports that
+// will be re-used for the same host(s).
+func DefaultPooledTransport() *http.Transport {
+ transport := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ DisableKeepAlives: false,
+ MaxIdleConnsPerHost: 1,
+ }
+ return transport
+}
+
+// DefaultClient returns a new http.Client with similar default values to
+// http.Client, but with a non-shared Transport, idle connections disabled, and
+// keepalives disabled.
+func DefaultClient() *http.Client {
+ return &http.Client{
+ Transport: DefaultTransport(),
+ }
+}
+
+// DefaultPooledClient returns a new http.Client with the same default values
+// as http.Client, but with a shared Transport. Do not use this function
+// for transient clients as it can leak file descriptors over time. Only use
+// this for clients that will be re-used for the same host(s).
+func DefaultPooledClient() *http.Client {
+ return &http.Client{
+ Transport: DefaultPooledTransport(),
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go
new file mode 100644
index 0000000..0584109
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go
@@ -0,0 +1,20 @@
+// Package cleanhttp offers convenience utilities for acquiring "clean"
+// http.Transport and http.Client structs.
+//
+// Values set on http.DefaultClient and http.DefaultTransport affect all
+// callers. This can have detrimental effects, esepcially in TLS contexts,
+// where client or root certificates set to talk to multiple endpoints can end
+// up displacing each other, leading to hard-to-debug issues. This package
+// provides non-shared http.Client and http.Transport structs to ensure that
+// the configuration will not be overwritten by other parts of the application
+// or dependencies.
+//
+// The DefaultClient and DefaultTransport functions disable idle connections
+// and keepalives. Without ensuring that idle connections are closed before
+// garbage collection, short-term clients/transports can leak file descriptors,
+// eventually leading to "too many open files" errors. If you will be
+// connecting to the same hosts repeatedly from the same client, you can use
+// DefaultPooledClient to receive a client that has connection pooling
+// semantics similar to http.DefaultClient.
+//
+package cleanhttp
diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile
new file mode 100644
index 0000000..c3989e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile
@@ -0,0 +1,8 @@
+TEST?=./...
+
+test:
+ go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4
+ go vet $(TEST)
+ go test $(TEST) -race
+
+.PHONY: test
diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md
new file mode 100644
index 0000000..f5abffc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/README.md
@@ -0,0 +1,43 @@
+# rootcerts
+
+Functions for loading root certificates for TLS connections.
+
+-----
+
+Go's standard library `crypto/tls` provides a common mechanism for configuring
+TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool
+of certificates for the client to use as a trust store when verifying server
+certificates.
+
+This library contains utility functions for loading certificates destined for
+that field, as well as one other important thing:
+
+When the `RootCAs` field is `nil`, the standard library attempts to load the
+host's root CA set. This behavior is OS-specific, and the Darwin
+implementation contains [a bug that prevents trusted certificates from the
+System and Login keychains from being loaded][1]. This library contains
+Darwin-specific behavior that works around that bug.
+
+[1]: https://github.com/golang/go/issues/14514
+
+## Example Usage
+
+Here's a snippet demonstrating how this library is meant to be used:
+
+```go
+func httpClient() (*http.Client, error)
+ tlsConfig := &tls.Config{}
+ err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
+ CAFile: os.Getenv("MYAPP_CAFILE"),
+ CAPath: os.Getenv("MYAPP_CAPATH"),
+ })
+ if err != nil {
+ return nil, err
+ }
+ c := cleanhttp.DefaultClient()
+ t := cleanhttp.DefaultTransport()
+ t.TLSClientConfig = tlsConfig
+ c.Transport = t
+ return c, nil
+}
+```
diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go
new file mode 100644
index 0000000..b55cc62
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go
@@ -0,0 +1,9 @@
+// Package rootcerts contains functions to aid in loading CA certificates for
+// TLS connections.
+//
+// In addition, its default behavior on Darwin works around an open issue [1]
+// in Go's crypto/x509 that prevents certicates from being loaded from the
+// System or Login keychains.
+//
+// [1] https://github.com/golang/go/issues/14514
+package rootcerts
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
new file mode 100644
index 0000000..aeb30ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
@@ -0,0 +1,103 @@
+package rootcerts
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// Config determines where LoadCACerts will load certificates from. When both
+// CAFile and CAPath are blank, this library's functions will either load
+// system roots explicitly and return them, or set the CertPool to nil to allow
+// Go's standard library to load system certs.
+type Config struct {
+ // CAFile is a path to a PEM-encoded certificate file or bundle. Takes
+ // precedence over CAPath.
+ CAFile string
+
+ // CAPath is a path to a directory populated with PEM-encoded certificates.
+ CAPath string
+}
+
+// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the
+// Config specified.
+func ConfigureTLS(t *tls.Config, c *Config) error {
+ if t == nil {
+ return nil
+ }
+ pool, err := LoadCACerts(c)
+ if err != nil {
+ return err
+ }
+ t.RootCAs = pool
+ return nil
+}
+
+// LoadCACerts loads a CertPool based on the Config specified.
+func LoadCACerts(c *Config) (*x509.CertPool, error) {
+ if c == nil {
+ c = &Config{}
+ }
+ if c.CAFile != "" {
+ return LoadCAFile(c.CAFile)
+ }
+ if c.CAPath != "" {
+ return LoadCAPath(c.CAPath)
+ }
+
+ return LoadSystemCAs()
+}
+
+// LoadCAFile loads a single PEM-encoded file from the path specified.
+func LoadCAFile(caFile string) (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+
+ pem, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, fmt.Errorf("Error loading CA File: %s", err)
+ }
+
+ ok := pool.AppendCertsFromPEM(pem)
+ if !ok {
+ return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile)
+ }
+
+ return pool, nil
+}
+
+// LoadCAPath walks the provided path and loads all certificates encounted into
+// a pool.
+func LoadCAPath(caPath string) (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+ walkFn := func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if info.IsDir() {
+ return nil
+ }
+
+ pem, err := ioutil.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("Error loading file from CAPath: %s", err)
+ }
+
+ ok := pool.AppendCertsFromPEM(pem)
+ if !ok {
+ return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path)
+ }
+
+ return nil
+ }
+
+ err := filepath.Walk(caPath, walkFn)
+ if err != nil {
+ return nil, err
+ }
+
+ return pool, nil
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
new file mode 100644
index 0000000..66b1472
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
@@ -0,0 +1,12 @@
+// +build !darwin
+
+package rootcerts
+
+import "crypto/x509"
+
+// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that
+// default behavior of standard TLS config libraries is triggered, which is to
+// load system certs.
+func LoadSystemCAs() (*x509.CertPool, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
new file mode 100644
index 0000000..a9a0406
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
@@ -0,0 +1,48 @@
+package rootcerts
+
+import (
+ "crypto/x509"
+ "os/exec"
+ "path"
+
+ "github.com/mitchellh/go-homedir"
+)
+
+// LoadSystemCAs has special behavior on Darwin systems to work around
+func LoadSystemCAs() (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+
+ for _, keychain := range certKeychains() {
+ err := addCertsFromKeychain(pool, keychain)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return pool, nil
+}
+
+func addCertsFromKeychain(pool *x509.CertPool, keychain string) error {
+ cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain)
+ data, err := cmd.Output()
+ if err != nil {
+ return err
+ }
+
+ pool.AppendCertsFromPEM(data)
+
+ return nil
+}
+
+func certKeychains() []string {
+ keychains := []string{
+ "/System/Library/Keychains/SystemRootCertificates.keychain",
+ "/Library/Keychains/System.keychain",
+ }
+ home, err := homedir.Dir()
+ if err == nil {
+ loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain")
+ keychains = append(keychains, loginKeychain)
+ }
+ return keychains
+}
diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile
index ad404a8..84fd743 100644
--- a/vendor/github.com/hashicorp/hcl/Makefile
+++ b/vendor/github.com/hashicorp/hcl/Makefile
@@ -6,6 +6,7 @@ fmt: generate
go fmt ./...
test: generate
+ go get -t ./...
go test $(TEST) $(TESTARGS)
generate:
diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml
index e70f03b..3c8cdf8 100644
--- a/vendor/github.com/hashicorp/hcl/appveyor.yml
+++ b/vendor/github.com/hashicorp/hcl/appveyor.yml
@@ -12,5 +12,8 @@ install:
go version
go env
+
+ go get -t ./...
+
build_script:
- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
index a6938fe..c8a077d 100644
--- a/vendor/github.com/hashicorp/hcl/decoder.go
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -409,7 +409,6 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
if result.Kind() == reflect.Interface {
result = result.Elem()
}
-
// Create the slice if it isn't nil
resultType := result.Type()
resultElemType := resultType.Elem()
@@ -443,6 +442,12 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
// Decode
val := reflect.Indirect(reflect.New(resultElemType))
+
+ // if item is an object that was decoded from ambiguous JSON and
+ // flattened, make sure it's expanded if it needs to decode into a
+ // defined structure.
+ item := expandObject(item, val)
+
if err := d.decode(fieldName, item, val); err != nil {
return err
}
@@ -455,6 +460,57 @@ func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value)
return nil
}
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+ item, ok := node.(*ast.ObjectItem)
+ if !ok {
+ return node
+ }
+
+ elemType := result.Type()
+
+ // our target type must be a struct
+ switch elemType.Kind() {
+ case reflect.Ptr:
+ switch elemType.Elem().Kind() {
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+
+ // A list value will have a key and field name. If it had more fields,
+ // it wouldn't have been flattened.
+ if len(item.Keys) != 2 {
+ return node
+ }
+
+ keyToken := item.Keys[0].Token
+ item.Keys = item.Keys[1:]
+
+ // we need to un-flatten the ast enough to decode
+ newNode := &ast.ObjectItem{
+ Keys: []*ast.ObjectKey{
+ &ast.ObjectKey{
+ Token: keyToken,
+ },
+ },
+ Val: &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{item},
+ },
+ },
+ }
+
+ return newNode
+}
+
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
@@ -606,6 +662,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
// match (only object with the field), then we decode it exactly.
// If it is a prefix match, then we decode the matches.
filter := list.Filter(fieldName)
+
prefixMatches := filter.Children()
matches := filter.Elem()
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
diff --git a/vendor/github.com/hashicorp/vault/LICENSE b/vendor/github.com/hashicorp/vault/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/vault/api/SPEC.md b/vendor/github.com/hashicorp/vault/api/SPEC.md
new file mode 100644
index 0000000..15345f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/SPEC.md
@@ -0,0 +1,611 @@
+FORMAT: 1A
+
+# vault
+
+The Vault API gives you full access to the Vault project.
+
+If you're browsing this API specifiction in GitHub or in raw
+format, please excuse some of the odd formatting. This document
+is in api-blueprint format that is read by viewers such as
+Apiary.
+
+## Sealed vs. Unsealed
+
+Whenever an individual Vault server is started, it is started
+in the _sealed_ state. In this state, it knows where its data
+is located, but the data is encrypted and Vault doesn't have the
+encryption keys to access it. Before Vault can operate, it must
+be _unsealed_.
+
+**Note:** Sealing/unsealing has no relationship to _authentication_
+which is separate and still required once the Vault is unsealed.
+
+Instead of being sealed with a single key, we utilize
+[Shamir's Secret Sharing](http://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing)
+to shard a key into _n_ parts such that _t_ parts are required
+to reconstruct the original key, where `t <= n`. This means that
+Vault itself doesn't know the original key, and no single person
+has the original key (unless `n = 1`, or `t` parts are given to
+a single person).
+
+Unsealing is done via an unauthenticated
+[unseal API](#reference/seal/unseal/unseal). This API takes a single
+master shard and progresses the unsealing process. Once all shards
+are given, the Vault is either unsealed or resets the unsealing
+process if the key was invalid.
+
+The entire seal/unseal state is server-wide. This allows multiple
+distinct operators to use the unseal API (or more likely the
+`vault unseal` command) from separate computers/networks and never
+have to transmit their key in order to unseal the vault in a
+distributed fashion.
+
+## Transport
+
+The API is expected to be accessed over a TLS connection at
+all times, with a valid certificate that is verified by a well
+behaved client.
+
+## Authentication
+
+Once the Vault is unsealed, every other operation requires
+authentication. There are multiple methods for authentication
+that can be enabled (see
+[authentication](#reference/authentication)).
+
+Authentication is done with the login endpoint. The login endpoint
+returns an access token that is set as the `X-Vault-Token` header.
+
+## Help
+
+To retrieve the help for any API within Vault, including mounted
+backends, credential providers, etc. then append `?help=1` to any
+URL. If you have valid permission to access the path, then the help text
+will be returned with the following structure:
+
+ {
+ "help": "help text"
+ }
+
+## Error Response
+
+A common JSON structure is always returned to return errors:
+
+ {
+ "errors": [
+ "message",
+ "another message"
+ ]
+ }
+
+This structure will be sent down for any non-20x HTTP status.
+
+## HTTP Status Codes
+
+The following HTTP status codes are used throughout the API.
+
+- `200` - Success with data.
+- `204` - Success, no data returned.
+- `400` - Invalid request, missing or invalid data.
+- `403` - Forbidden, your authentication details are either
+ incorrect or you don't have access to this feature.
+- `404` - Invalid path. This can both mean that the path truly
+ doesn't exist or that you don't have permission to view a
+ specific path. We use 404 in some cases to avoid state leakage.
+- `429` - Rate limit exceeded. Try again after waiting some period
+ of time.
+- `500` - Internal server error. An internal error has occurred,
+ try again later. If the error persists, report a bug.
+- `503` - Vault is down for maintenance or is currently sealed.
+ Try again later.
+
+# Group Initialization
+
+## Initialization [/sys/init]
+### Initialization Status [GET]
+Returns the status of whether the vault is initialized or not. The
+vault doesn't have to be unsealed for this operation.
+
++ Response 200 (application/json)
+
+ {
+ "initialized": true
+ }
+
+### Initialize [POST]
+Initialize the vault. This is an unauthenticated request to initially
+setup a new vault. Although this is unauthenticated, it is still safe:
+data cannot be in vault prior to initialization, and any future
+authentication will fail if you didn't initialize it yourself.
+Additionally, once initialized, a vault cannot be reinitialized.
+
+This API is the only time Vault will ever be aware of your keys, and
+the only time the keys will ever be returned in one unit. Care should
+be taken to ensure that the output of this request is never logged,
+and that the keys are properly distributed.
+
+The response also contains the initial root token that can be used
+as authentication in order to initially configure Vault once it is
+unsealed. Just as with the unseal keys, this is the only time Vault is
+ever aware of this token.
+
++ Request (application/json)
+
+ {
+ "secret_shares": 5,
+ "secret_threshold": 3,
+ }
+
++ Response 200 (application/json)
+
+ {
+ "keys": ["one", "two", "three"],
+ "root_token": "foo"
+ }
+
+# Group Seal/Unseal
+
+## Seal Status [/sys/seal-status]
+### Seal Status [GET]
+Returns the status of whether the vault is currently
+sealed or not, as well as the progress of unsealing.
+
+The response has the following attributes:
+
+- sealed (boolean) - If true, the vault is sealed. Otherwise,
+ it is unsealed.
+- t (int) - The "t" value for the master key, or the number
+ of shards needed total to unseal the vault.
+- n (int) - The "n" value for the master key, or the total
+ number of shards of the key distributed.
+- progress (int) - The number of master key shards that have
+ been entered so far towards unsealing the vault.
+
++ Response 200 (application/json)
+
+ {
+ "sealed": true,
+ "t": 3,
+ "n": 5,
+ "progress": 1
+ }
+
+## Seal [/sys/seal]
+### Seal [PUT]
+Seal the vault.
+
+Sealing the vault locks Vault from any future operations on any
+secrets or system configuration until the vault is once again
+unsealed. Internally, sealing throws away the keys to access the
+encrypted vault data, so Vault is unable to access the data without
+unsealing to get the encryption keys.
+
++ Response 204
+
+## Unseal [/sys/unseal]
+### Unseal [PUT]
+Unseal the vault.
+
+Unseal the vault by entering a portion of the master key. The
+response object will tell you if the unseal is complete or
+only partial.
+
+If the vault is already unsealed, this does nothing. It is
+not an error, the return value just says the vault is unsealed.
+Due to the architecture of Vault, we cannot validate whether
+any portion of the unseal key given is valid until all keys
+are inputted, therefore unsealing an already unsealed vault
+is still a success even if the input key is invalid.
+
++ Request (application/json)
+
+ {
+ "key": "value"
+ }
+
++ Response 200 (application/json)
+
+ {
+ "sealed": true,
+ "t": 3,
+ "n": 5,
+ "progress": 1
+ }
+
+# Group Authentication
+
+## List Auth Methods [/sys/auth]
+### List all auth methods [GET]
+Lists all available authentication methods.
+
+This returns the name of the authentication method as well as
+a human-friendly long-form help text for the method that can be
+shown to the user as documentation.
+
++ Response 200 (application/json)
+
+ {
+ "token": {
+ "type": "token",
+ "description": "Token authentication"
+ },
+ "oauth": {
+ "type": "oauth",
+ "description": "OAuth authentication"
+ }
+ }
+
+## Single Auth Method [/sys/auth/{id}]
+
++ Parameters
+ + id (required, string) ... The ID of the auth method.
+
+### Enable an auth method [PUT]
+Enables an authentication method.
+
+The body of the request depends on the authentication method
+being used. Please reference the documentation for the specific
+authentication method you're enabling in order to determine what
+parameters you must give it.
+
+If an authentication method is already enabled, then this can be
+used to change the configuration, including even the type of
+the configuration.
+
++ Request (application/json)
+
+ {
+ "type": "type",
+ "key": "value",
+ "key2": "value2"
+ }
+
++ Response 204
+
+### Disable an auth method [DELETE]
+Disables an authentication method. Previously authenticated sessions
+are immediately invalidated.
+
++ Response 204
+
+# Group Policies
+
+Policies are named permission sets that identities returned by
+credential stores are bound to. This separates _authentication_
+from _authorization_.
+
+## Policies [/sys/policy]
+### List all Policies [GET]
+
+List all the policies.
+
++ Response 200 (application/json)
+
+ {
+ "policies": ["root"]
+ }
+
+## Single Policy [/sys/policy/{id}]
+
++ Parameters
+ + id (required, string) ... The name of the policy
+
+### Upsert [PUT]
+
+Create or update a policy with the given ID.
+
++ Request (application/json)
+
+ {
+ "rules": "HCL"
+ }
+
++ Response 204
+
+### Delete [DELETE]
+
+Delete a policy with the given ID. Any identities bound to this
+policy will immediately become "deny all" despite already being
+authenticated.
+
++ Response 204
+
+# Group Mounts
+
+Logical backends are mounted at _mount points_, similar to
+filesystems. This allows you to mount the "aws" logical backend
+at the "aws-us-east" path, so all access is at `/aws-us-east/keys/foo`
+for example. This enables multiple logical backends to be enabled.
+
+## Mounts [/sys/mounts]
+### List all mounts [GET]
+
+Lists all the active mount points.
+
++ Response 200 (application/json)
+
+ {
+ "aws": {
+ "type": "aws",
+ "description": "AWS"
+ },
+ "pg": {
+ "type": "postgresql",
+ "description": "PostgreSQL dynamic users"
+ }
+ }
+
+## Single Mount [/sys/mounts/{path}]
+### New Mount [POST]
+
+Mount a logical backend to a new path.
+
+Configuration for this new backend is done via the normal
+read/write mechanism once it is mounted.
+
++ Request (application/json)
+
+ {
+ "type": "aws",
+ "description": "EU AWS tokens"
+ }
+
++ Response 204
+
+### Unmount [DELETE]
+
+Unmount a mount point.
+
++ Response 204
+
+## Remount [/sys/remount]
+### Remount [POST]
+
+Move an already-mounted backend to a new path.
+
++ Request (application/json)
+
+ {
+ "from": "aws",
+ "to": "aws-east"
+ }
+
++ Response 204
+
+# Group Audit Backends
+
+Audit backends are responsible for shuttling the audit logs that
+Vault generates to a durable system for future querying. By default,
+audit logs are not stored anywhere.
+
+## Audit Backends [/sys/audit]
+### List Enabled Audit Backends [GET]
+
+List all the enabled audit backends
+
++ Response 200 (application/json)
+
+ {
+ "file": {
+ "type": "file",
+ "description": "Send audit logs to a file",
+ "options": {}
+ }
+ }
+
+## Single Audit Backend [/sys/audit/{path}]
+
++ Parameters
+ + path (required, string) ... The path where the audit backend is mounted
+
+### Enable [PUT]
+
+Enable an audit backend.
+
++ Request (application/json)
+
+ {
+ "type": "file",
+ "description": "send to a file",
+ "options": {
+ "path": "/var/log/vault.audit.log"
+ }
+ }
+
++ Response 204
+
+### Disable [DELETE]
+
+Disable an audit backend.
+
++ Request (application/json)
+
++ Response 204
+
+# Group Secrets
+
+## Generic [/{mount}/{path}]
+
+This group documents the general format of reading and writing
+to Vault. The exact structure of the keyspace is defined by the
+logical backends in use, so documentation related to
+a specific backend should be referenced for details on what keys
+and routes are expected.
+
+The path for examples are `/prefix/path`, but in practice
+these will be defined by the backends that are mounted. For
+example, reading an AWS key might be at the `/aws/root` path.
+These paths are defined by the logical backends.
+
++ Parameters
+ + mount (required, string) ... The mount point for the
+ logical backend. Example: `aws`.
+ + path (optional, string) ... The path within the backend
+ to read or write data.
+
+### Read [GET]
+
+Read data from vault.
+
+The data read from the vault can either be a secret or
+arbitrary configuration data. The type of data returned
+depends on the path, and is defined by the logical backend.
+
+If the return value is a secret, then the return structure
+is a mixture of arbitrary key/value along with the following
+fields which are guaranteed to exist:
+
+- `lease_id` (string) - A unique ID used for renewal and
+ revocation.
+
+- `renewable` (bool) - If true, then this key can be renewed.
+ If a key can't be renewed, then a new key must be requested
+ after the lease duration period.
+
+- `lease_duration` (int) - The time in seconds that a secret is
+ valid for before it must be renewed.
+
+- `lease_duration_max` (int) - The maximum amount of time in
+ seconds that a secret is valid for. This will always be
+ greater than or equal to `lease_duration`. The difference
+ between this and `lease_duration` is an overlap window
+ where multiple keys may be valid.
+
+If the return value is not a secret, then the return structure
+is an arbitrary JSON object.
+
++ Response 200 (application/json)
+
+ {
+ "lease_id": "UUID",
+ "lease_duration": 3600,
+ "key": "value"
+ }
+
+### Write [PUT]
+
+Write data to vault.
+
+The behavior and arguments to the write are defined by
+the logical backend.
+
++ Request (application/json)
+
+ {
+ "key": "value"
+ }
+
++ Response 204
+
+# Group Lease Management
+
+## Renew Key [/sys/renew/{id}]
+
++ Parameters
+ + id (required, string) ... The `lease_id` of the secret
+ to renew.
+
+### Renew [PUT]
+
++ Response 200 (application/json)
+
+ {
+ "lease_id": "...",
+ "lease_duration": 3600,
+ "access_key": "foo",
+ "secret_key": "bar"
+ }
+
+## Revoke Key [/sys/revoke/{id}]
+
++ Parameters
+ + id (required, string) ... The `lease_id` of the secret
+ to revoke.
+
+### Revoke [PUT]
+
++ Response 204
+
+# Group Backend: AWS
+
+## Root Key [/aws/root]
+### Set the Key [PUT]
+
+Set the root key that the logical backend will use to create
+new secrets, IAM policies, etc.
+
++ Request (application/json)
+
+ {
+ "access_key": "key",
+ "secret_key": "key",
+ "region": "us-east-1"
+ }
+
++ Response 204
+
+## Policies [/aws/policies]
+### List Policies [GET]
+
+List all the policies that can be used to create keys.
+
++ Response 200 (application/json)
+
+ [{
+ "name": "root",
+ "description": "Root access"
+ }, {
+ "name": "web-deploy",
+ "description": "Enough permissions to deploy the web app."
+ }]
+
+## Single Policy [/aws/policies/{name}]
+
++ Parameters
+ + name (required, string) ... Name of the policy.
+
+### Read [GET]
+
+Read a policy.
+
++ Response 200 (application/json)
+
+ {
+ "policy": "base64-encoded policy"
+ }
+
+### Upsert [PUT]
+
+Create or update a policy.
+
++ Request (application/json)
+
+ {
+ "policy": "base64-encoded policy"
+ }
+
++ Response 204
+
+### Delete [DELETE]
+
+Delete the policy with the given name.
+
++ Response 204
+
+## Generate Access Keys [/aws/keys/{policy}]
+### Create [GET]
+
+This generates a new keypair for the given policy.
+
++ Parameters
+ + policy (required, string) ... The policy under which to create
+ the key pair.
+
++ Response 200 (application/json)
+
+ {
+ "lease_id": "...",
+ "lease_duration": 3600,
+ "access_key": "foo",
+ "secret_key": "bar"
+ }
diff --git a/vendor/github.com/hashicorp/vault/api/auth.go b/vendor/github.com/hashicorp/vault/api/auth.go
new file mode 100644
index 0000000..da870c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/auth.go
@@ -0,0 +1,11 @@
+package api
+
+// Auth is used to perform credential backend related operations.
+type Auth struct {
+ c *Client
+}
+
+// Auth is used to return the client for credential-backend API calls.
+func (c *Client) Auth() *Auth {
+ return &Auth{c: c}
+}
diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go
new file mode 100644
index 0000000..aff10f4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/auth_token.go
@@ -0,0 +1,223 @@
+package api
+
+// TokenAuth is used to perform token backend operations on Vault
+type TokenAuth struct {
+ c *Client
+}
+
+// Token is used to return the client for token-backend API calls
+func (a *Auth) Token() *TokenAuth {
+ return &TokenAuth{c: a.c}
+}
+
+func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) {
+ r := c.c.NewRequest("POST", "/v1/auth/token/create")
+ if err := r.SetJSONBody(opts); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) {
+ r := c.c.NewRequest("POST", "/v1/auth/token/create-orphan")
+ if err := r.SetJSONBody(opts); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) {
+ r := c.c.NewRequest("POST", "/v1/auth/token/create/"+roleName)
+ if err := r.SetJSONBody(opts); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *TokenAuth) Lookup(token string) (*Secret, error) {
+ r := c.c.NewRequest("POST", "/v1/auth/token/lookup")
+ if err := r.SetJSONBody(map[string]interface{}{
+ "token": token,
+ }); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) {
+ r := c.c.NewRequest("POST", "/v1/auth/token/lookup-accessor")
+ if err := r.SetJSONBody(map[string]interface{}{
+ "accessor": accessor,
+ }); err != nil {
+ return nil, err
+ }
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *TokenAuth) LookupSelf() (*Secret, error) {
+ r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self")
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) {
+ r := c.c.NewRequest("PUT", "/v1/auth/token/renew")
+ if err := r.SetJSONBody(map[string]interface{}{
+ "token": token,
+ "increment": increment,
+ }); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) {
+ r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self")
+
+ body := map[string]interface{}{"increment": increment}
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
+// RevokeAccessor revokes a token associated with the given accessor
+// along with all the child tokens.
+func (c *TokenAuth) RevokeAccessor(accessor string) error {
+ r := c.c.NewRequest("POST", "/v1/auth/token/revoke-accessor")
+ if err := r.SetJSONBody(map[string]interface{}{
+ "accessor": accessor,
+ }); err != nil {
+ return err
+ }
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+// RevokeOrphan revokes a token without revoking the tree underneath it (so
+// child tokens are orphaned rather than revoked)
+func (c *TokenAuth) RevokeOrphan(token string) error {
+ r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-orphan")
+ if err := r.SetJSONBody(map[string]interface{}{
+ "token": token,
+ }); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+// RevokeSelf revokes the token making the call. The `token` parameter is kept
+// for backwards compatibility but is ignored; only the client's set token has
+// an effect.
+func (c *TokenAuth) RevokeSelf(token string) error {
+ r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+// RevokeTree is the "normal" revoke operation that revokes the given token and
+// the entire tree underneath -- all of its child tokens, their child tokens,
+// etc.
+func (c *TokenAuth) RevokeTree(token string) error {
+ r := c.c.NewRequest("PUT", "/v1/auth/token/revoke")
+ if err := r.SetJSONBody(map[string]interface{}{
+ "token": token,
+ }); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+// TokenCreateRequest is the options structure for creating a token.
+type TokenCreateRequest struct {
+ ID string `json:"id,omitempty"`
+ Policies []string `json:"policies,omitempty"`
+ Metadata map[string]string `json:"meta,omitempty"`
+ Lease string `json:"lease,omitempty"`
+ TTL string `json:"ttl,omitempty"`
+ ExplicitMaxTTL string `json:"explicit_max_ttl,omitempty"`
+ Period string `json:"period,omitempty"`
+ NoParent bool `json:"no_parent,omitempty"`
+ NoDefaultPolicy bool `json:"no_default_policy,omitempty"`
+ DisplayName string `json:"display_name"`
+ NumUses int `json:"num_uses"`
+ Renewable *bool `json:"renewable,omitempty"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go
new file mode 100644
index 0000000..4aee40c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/client.go
@@ -0,0 +1,416 @@
+package api
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/go-rootcerts"
+ "github.com/sethgrid/pester"
+)
+
+const EnvVaultAddress = "VAULT_ADDR"
+const EnvVaultCACert = "VAULT_CACERT"
+const EnvVaultCAPath = "VAULT_CAPATH"
+const EnvVaultClientCert = "VAULT_CLIENT_CERT"
+const EnvVaultClientKey = "VAULT_CLIENT_KEY"
+const EnvVaultInsecure = "VAULT_SKIP_VERIFY"
+const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME"
+const EnvVaultWrapTTL = "VAULT_WRAP_TTL"
+const EnvVaultMaxRetries = "VAULT_MAX_RETRIES"
+
+// WrappingLookupFunc is a function that, given an HTTP verb and a path,
+// returns an optional string duration to be used for response wrapping (e.g.
+// "15s", or simply "15"). The path will not begin with "/v1/" or "v1/" or "/",
+// however, end-of-path forward slashes are not trimmed, so must match your
+// called path precisely.
+type WrappingLookupFunc func(operation, path string) string
+
+// Config is used to configure the creation of the client.
+type Config struct {
+ // Address is the address of the Vault server. This should be a complete
+ // URL such as "http://vault.example.com". If you need a custom SSL
+ // cert or want to enable insecure mode, you need to specify a custom
+ // HttpClient.
+ Address string
+
+ // HttpClient is the HTTP client to use, which will currently always have the
+ // same values as http.DefaultClient. This is used to control redirect behavior.
+ HttpClient *http.Client
+
+ redirectSetup sync.Once
+
+ // MaxRetries controls the maximum number of times to retry when a 5xx error
+ // occurs. Set to 0 or less to disable retrying.
+ MaxRetries int
+}
+
+// TLSConfig contains the parameters needed to configure TLS on the HTTP client
+// used to communicate with Vault.
+type TLSConfig struct {
+ // CACert is the path to a PEM-encoded CA cert file to use to verify the
+ // Vault server SSL certificate.
+ CACert string
+
+ // CAPath is the path to a directory of PEM-encoded CA cert files to verify
+ // the Vault server SSL certificate.
+ CAPath string
+
+ // ClientCert is the path to the certificate for Vault communication
+ ClientCert string
+
+ // ClientKey is the path to the private key for Vault communication
+ ClientKey string
+
+ // TLSServerName, if set, is used to set the SNI host when connecting via
+ // TLS.
+ TLSServerName string
+
+ // Insecure enables or disables SSL verification
+ Insecure bool
+}
+
+// DefaultConfig returns a default configuration for the client. It is
+// safe to modify the return value of this function.
+//
+// The default Address is https://127.0.0.1:8200, but this can be overridden by
+// setting the `VAULT_ADDR` environment variable.
+func DefaultConfig() *Config {
+ config := &Config{
+ Address: "https://127.0.0.1:8200",
+
+ HttpClient: cleanhttp.DefaultClient(),
+ }
+ config.HttpClient.Timeout = time.Second * 60
+ transport := config.HttpClient.Transport.(*http.Transport)
+ transport.TLSHandshakeTimeout = 10 * time.Second
+ transport.TLSClientConfig = &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ }
+
+ if v := os.Getenv(EnvVaultAddress); v != "" {
+ config.Address = v
+ }
+
+ config.MaxRetries = pester.DefaultClient.MaxRetries
+
+ return config
+}
+
+// ConfigureTLS takes a set of TLS configurations and applies those to the the HTTP client.
+func (c *Config) ConfigureTLS(t *TLSConfig) error {
+
+ if c.HttpClient == nil {
+ return fmt.Errorf("config HTTP Client must be set")
+ }
+
+ var clientCert tls.Certificate
+ foundClientCert := false
+ if t.CACert != "" || t.CAPath != "" || t.ClientCert != "" || t.ClientKey != "" || t.Insecure {
+ if t.ClientCert != "" && t.ClientKey != "" {
+ var err error
+ clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey)
+ if err != nil {
+ return err
+ }
+ foundClientCert = true
+ } else if t.ClientCert != "" || t.ClientKey != "" {
+ return fmt.Errorf("Both client cert and client key must be provided")
+ }
+ }
+
+ clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig
+ rootConfig := &rootcerts.Config{
+ CAFile: t.CACert,
+ CAPath: t.CAPath,
+ }
+ if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil {
+ return err
+ }
+
+ clientTLSConfig.InsecureSkipVerify = t.Insecure
+
+ if foundClientCert {
+ clientTLSConfig.Certificates = []tls.Certificate{clientCert}
+ }
+ if t.TLSServerName != "" {
+ clientTLSConfig.ServerName = t.TLSServerName
+ }
+
+ return nil
+}
+
+// ReadEnvironment reads configuration information from the
+// environment. If there is an error, no configuration value
+// is updated.
+func (c *Config) ReadEnvironment() error {
+ var envAddress string
+ var envCACert string
+ var envCAPath string
+ var envClientCert string
+ var envClientKey string
+ var envInsecure bool
+ var envTLSServerName string
+ var envMaxRetries *uint64
+
+ // Parse the environment variables
+ if v := os.Getenv(EnvVaultAddress); v != "" {
+ envAddress = v
+ }
+ if v := os.Getenv(EnvVaultMaxRetries); v != "" {
+ maxRetries, err := strconv.ParseUint(v, 10, 32)
+ if err != nil {
+ return err
+ }
+ envMaxRetries = &maxRetries
+ }
+ if v := os.Getenv(EnvVaultCACert); v != "" {
+ envCACert = v
+ }
+ if v := os.Getenv(EnvVaultCAPath); v != "" {
+ envCAPath = v
+ }
+ if v := os.Getenv(EnvVaultClientCert); v != "" {
+ envClientCert = v
+ }
+ if v := os.Getenv(EnvVaultClientKey); v != "" {
+ envClientKey = v
+ }
+ if v := os.Getenv(EnvVaultInsecure); v != "" {
+ var err error
+ envInsecure, err = strconv.ParseBool(v)
+ if err != nil {
+ return fmt.Errorf("Could not parse VAULT_SKIP_VERIFY")
+ }
+ }
+ if v := os.Getenv(EnvVaultTLSServerName); v != "" {
+ envTLSServerName = v
+ }
+
+ // Configure the HTTP clients TLS configuration.
+ t := &TLSConfig{
+ CACert: envCACert,
+ CAPath: envCAPath,
+ ClientCert: envClientCert,
+ ClientKey: envClientKey,
+ TLSServerName: envTLSServerName,
+ Insecure: envInsecure,
+ }
+ if err := c.ConfigureTLS(t); err != nil {
+ return err
+ }
+
+ if envAddress != "" {
+ c.Address = envAddress
+ }
+
+ if envMaxRetries != nil {
+ c.MaxRetries = int(*envMaxRetries) + 1
+ }
+
+ return nil
+}
+
+// Client is the client to the Vault API. Create a client with
+// NewClient.
+type Client struct {
+ addr *url.URL
+ config *Config
+ token string
+ wrappingLookupFunc WrappingLookupFunc
+}
+
+// NewClient returns a new client for the given configuration.
+//
+// If the environment variable `VAULT_TOKEN` is present, the token will be
+// automatically added to the client. Otherwise, you must manually call
+// `SetToken()`.
+func NewClient(c *Config) (*Client, error) {
+ if c == nil {
+ c = DefaultConfig()
+ if err := c.ReadEnvironment(); err != nil {
+ return nil, fmt.Errorf("error reading environment: %v", err)
+ }
+ }
+
+ u, err := url.Parse(c.Address)
+ if err != nil {
+ return nil, err
+ }
+
+ if c.HttpClient == nil {
+ c.HttpClient = DefaultConfig().HttpClient
+ }
+
+ redirFunc := func() {
+ // Ensure redirects are not automatically followed
+ // Note that this is sane for the API client as it has its own
+ // redirect handling logic (and thus also for command/meta),
+ // but in e.g. http_test actual redirect handling is necessary
+ c.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ // Returning this value causes the Go net library to not close the
+ // response body and nil out the error. Otherwise pester tries
+ // three times on every redirect because it sees an error from this
+ // function being passed through.
+ return http.ErrUseLastResponse
+ }
+ }
+
+ c.redirectSetup.Do(redirFunc)
+
+ client := &Client{
+ addr: u,
+ config: c,
+ }
+
+ if token := os.Getenv("VAULT_TOKEN"); token != "" {
+ client.SetToken(token)
+ }
+
+ return client, nil
+}
+
+// Sets the address of Vault in the client. The format of address should be
+// "<Scheme>://<Host>:<Port>". Setting this on a client will override the
+// value of VAULT_ADDR environment variable.
+func (c *Client) SetAddress(addr string) error {
+ var err error
+ if c.addr, err = url.Parse(addr); err != nil {
+ return fmt.Errorf("failed to set address: %v", err)
+ }
+
+ return nil
+}
+
+// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
+// for a given operation and path
+func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) {
+ c.wrappingLookupFunc = lookupFunc
+}
+
+// Token returns the access token being used by this client. It will
+// return the empty string if there is no token set.
+func (c *Client) Token() string {
+ return c.token
+}
+
+// SetToken sets the token directly. This won't perform any auth
+// verification, it simply sets the token properly for future requests.
+func (c *Client) SetToken(v string) {
+ c.token = v
+}
+
+// ClearToken deletes the token if it is set or does nothing otherwise.
+func (c *Client) ClearToken() {
+ c.token = ""
+}
+
+// NewRequest creates a new raw request object to query the Vault server
+// configured for this client. This is an advanced method and generally
+// doesn't need to be called externally.
+func (c *Client) NewRequest(method, path string) *Request {
+ req := &Request{
+ Method: method,
+ URL: &url.URL{
+ Scheme: c.addr.Scheme,
+ Host: c.addr.Host,
+ Path: path,
+ },
+ ClientToken: c.token,
+ Params: make(map[string][]string),
+ }
+
+ var lookupPath string
+ switch {
+ case strings.HasPrefix(path, "/v1/"):
+ lookupPath = strings.TrimPrefix(path, "/v1/")
+ case strings.HasPrefix(path, "v1/"):
+ lookupPath = strings.TrimPrefix(path, "v1/")
+ default:
+ lookupPath = path
+ }
+ if c.wrappingLookupFunc != nil {
+ req.WrapTTL = c.wrappingLookupFunc(method, lookupPath)
+ } else {
+ req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath)
+ }
+
+ return req
+}
+
+// RawRequest performs the raw request given. This request may be against
+// a Vault server not configured with this client. This is an advanced operation
+// that generally won't need to be called externally.
+func (c *Client) RawRequest(r *Request) (*Response, error) {
+ redirectCount := 0
+START:
+ req, err := r.ToHTTP()
+ if err != nil {
+ return nil, err
+ }
+
+ client := pester.NewExtendedClient(c.config.HttpClient)
+ client.Backoff = pester.LinearJitterBackoff
+ client.MaxRetries = c.config.MaxRetries
+
+ var result *Response
+ resp, err := client.Do(req)
+ if resp != nil {
+ result = &Response{Response: resp}
+ }
+ if err != nil {
+ if strings.Contains(err.Error(), "tls: oversized") {
+ err = fmt.Errorf(
+ "%s\n\n"+
+ "This error usually means that the server is running with TLS disabled\n"+
+ "but the client is configured to use TLS. Please either enable TLS\n"+
+ "on the server or run the client with -address set to an address\n"+
+ "that uses the http protocol:\n\n"+
+ " vault <command> -address http://<address>\n\n"+
+ "You can also set the VAULT_ADDR environment variable:\n\n\n"+
+ " VAULT_ADDR=http://<address> vault <command>\n\n"+
+ "where <address> is replaced by the actual address to the server.",
+ err)
+ }
+ return result, err
+ }
+
+ // Check for a redirect, only allowing for a single redirect
+ if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 {
+ // Parse the updated location
+ respLoc, err := resp.Location()
+ if err != nil {
+ return result, err
+ }
+
+ // Ensure a protocol downgrade doesn't happen
+ if req.URL.Scheme == "https" && respLoc.Scheme != "https" {
+ return result, fmt.Errorf("redirect would cause protocol downgrade")
+ }
+
+ // Update the request
+ r.URL = respLoc
+
+ // Reset the request body if any
+ if err := r.ResetJSONBody(); err != nil {
+ return result, err
+ }
+
+ // Retry the request
+ redirectCount++
+ goto START
+ }
+
+ if err := result.Error(); err != nil {
+ return result, err
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go
new file mode 100644
index 0000000..b9ae100
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/help.go
@@ -0,0 +1,25 @@
+package api
+
+import (
+ "fmt"
+)
+
+// Help reads the help information for the given path.
+func (c *Client) Help(path string) (*Help, error) {
+ r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path))
+ r.Params.Add("help", "1")
+ resp, err := c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result Help
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+type Help struct {
+ Help string `json:"help"`
+ SeeAlso []string `json:"see_also"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go
new file mode 100644
index 0000000..f1cea7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/logical.go
@@ -0,0 +1,176 @@
+package api
+
+import (
+ "bytes"
+ "fmt"
+ "net/http"
+ "os"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+const (
+ wrappedResponseLocation = "cubbyhole/response"
+)
+
+var (
+ // The default TTL that will be used with `sys/wrapping/wrap`, can be
+ // changed
+ DefaultWrappingTTL = "5m"
+
+ // The default function used if no other function is set, which honors the
+ // env var and wraps `sys/wrapping/wrap`
+ DefaultWrappingLookupFunc = func(operation, path string) string {
+ if os.Getenv(EnvVaultWrapTTL) != "" {
+ return os.Getenv(EnvVaultWrapTTL)
+ }
+
+ if (operation == "PUT" || operation == "POST") && path == "sys/wrapping/wrap" {
+ return DefaultWrappingTTL
+ }
+
+ return ""
+ }
+)
+
+// Logical is used to perform logical backend operations on Vault.
+type Logical struct {
+ c *Client
+}
+
+// Logical is used to return the client for logical-backend API calls.
+func (c *Client) Logical() *Logical {
+ return &Logical{c: c}
+}
+
+func (c *Logical) Read(path string) (*Secret, error) {
+ r := c.c.NewRequest("GET", "/v1/"+path)
+ resp, err := c.c.RawRequest(r)
+ if resp != nil {
+ defer resp.Body.Close()
+ }
+ if resp != nil && resp.StatusCode == 404 {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *Logical) List(path string) (*Secret, error) {
+ r := c.c.NewRequest("LIST", "/v1/"+path)
+ // Set this for broader compatibility, but we use LIST above to be able to
+ // handle the wrapping lookup function
+ r.Method = "GET"
+ r.Params.Set("list", "true")
+ resp, err := c.c.RawRequest(r)
+ if resp != nil {
+ defer resp.Body.Close()
+ }
+ if resp != nil && resp.StatusCode == 404 {
+ return nil, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) {
+ r := c.c.NewRequest("PUT", "/v1/"+path)
+ if err := r.SetJSONBody(data); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if resp != nil {
+ defer resp.Body.Close()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode == 200 {
+ return ParseSecret(resp.Body)
+ }
+
+ return nil, nil
+}
+
+func (c *Logical) Delete(path string) (*Secret, error) {
+ r := c.c.NewRequest("DELETE", "/v1/"+path)
+ resp, err := c.c.RawRequest(r)
+ if resp != nil {
+ defer resp.Body.Close()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode == 200 {
+ return ParseSecret(resp.Body)
+ }
+
+ return nil, nil
+}
+
+func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
+ var data map[string]interface{}
+ if wrappingToken != "" {
+ data = map[string]interface{}{
+ "token": wrappingToken,
+ }
+ }
+
+ r := c.c.NewRequest("PUT", "/v1/sys/wrapping/unwrap")
+ if err := r.SetJSONBody(data); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if resp != nil {
+ defer resp.Body.Close()
+ }
+ if err != nil && resp.StatusCode != 404 {
+ return nil, err
+ }
+
+ switch resp.StatusCode {
+ case http.StatusOK: // New method is supported
+ return ParseSecret(resp.Body)
+ case http.StatusNotFound: // Fall back to old method
+ default:
+ return nil, nil
+ }
+
+ if wrappingToken == "" {
+ origToken := c.c.Token()
+ defer c.c.SetToken(origToken)
+ c.c.SetToken(wrappingToken)
+ }
+
+ secret, err := c.Read(wrappedResponseLocation)
+ if err != nil {
+ return nil, fmt.Errorf("error reading %s: %s", wrappedResponseLocation, err)
+ }
+ if secret == nil {
+ return nil, fmt.Errorf("no value found at %s", wrappedResponseLocation)
+ }
+ if secret.Data == nil {
+ return nil, fmt.Errorf("\"data\" not found in wrapping response")
+ }
+ if _, ok := secret.Data["response"]; !ok {
+ return nil, fmt.Errorf("\"response\" not found in wrapping response \"data\" map")
+ }
+
+ wrappedSecret := new(Secret)
+ buf := bytes.NewBufferString(secret.Data["response"].(string))
+ if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil {
+ return nil, fmt.Errorf("error unmarshaling wrapped secret: %s", err)
+ }
+
+ return wrappedSecret, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go
new file mode 100644
index 0000000..8f22dd5
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/request.go
@@ -0,0 +1,71 @@
+package api
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+)
+
+// Request is a raw request configuration structure used to initiate
+// API requests to the Vault server.
+type Request struct {
+ Method string
+ URL *url.URL
+ Params url.Values
+ ClientToken string
+ WrapTTL string
+ Obj interface{}
+ Body io.Reader
+ BodySize int64
+}
+
+// SetJSONBody is used to set a request body that is a JSON-encoded value.
+func (r *Request) SetJSONBody(val interface{}) error {
+ buf := bytes.NewBuffer(nil)
+ enc := json.NewEncoder(buf)
+ if err := enc.Encode(val); err != nil {
+ return err
+ }
+
+ r.Obj = val
+ r.Body = buf
+ r.BodySize = int64(buf.Len())
+ return nil
+}
+
+// ResetJSONBody is used to reset the body for a redirect
+func (r *Request) ResetJSONBody() error {
+ if r.Body == nil {
+ return nil
+ }
+ return r.SetJSONBody(r.Obj)
+}
+
+// ToHTTP turns this request into a valid *http.Request for use with the
+// net/http package.
+func (r *Request) ToHTTP() (*http.Request, error) {
+ // Encode the query parameters
+ r.URL.RawQuery = r.Params.Encode()
+
+ // Create the HTTP request
+ req, err := http.NewRequest(r.Method, r.URL.RequestURI(), r.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.URL.Scheme = r.URL.Scheme
+ req.URL.Host = r.URL.Host
+ req.Host = r.URL.Host
+
+ if len(r.ClientToken) != 0 {
+ req.Header.Set("X-Vault-Token", r.ClientToken)
+ }
+
+ if len(r.WrapTTL) != 0 {
+ req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL)
+ }
+
+ return req, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go
new file mode 100644
index 0000000..7c8ac9f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/response.go
@@ -0,0 +1,72 @@
+package api
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+// Response is a raw response that wraps an HTTP response.
+type Response struct {
+ *http.Response
+}
+
+// DecodeJSON will decode the response body to a JSON structure. This
+// will consume the response body, but will not close it. Close must
+// still be called.
+func (r *Response) DecodeJSON(out interface{}) error {
+ return jsonutil.DecodeJSONFromReader(r.Body, out)
+}
+
+// Error returns an error response if there is one. If there is an error,
+// this will fully consume the response body, but will not close it. The
+// body must still be closed manually.
+func (r *Response) Error() error {
+ // 200 to 399 are okay status codes
+ if r.StatusCode >= 200 && r.StatusCode < 400 {
+ return nil
+ }
+
+ // We have an error. Let's copy the body into our own buffer first,
+ // so that if we can't decode JSON, we can at least copy it raw.
+ var bodyBuf bytes.Buffer
+ if _, err := io.Copy(&bodyBuf, r.Body); err != nil {
+ return err
+ }
+
+ // Decode the error response if we can. Note that we wrap the bodyBuf
+ // in a bytes.Reader here so that the JSON decoder doesn't move the
+ // read pointer for the original buffer.
+ var resp ErrorResponse
+ if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil {
+ // Ignore the decoding error and just drop the raw response
+ return fmt.Errorf(
+ "Error making API request.\n\n"+
+ "URL: %s %s\n"+
+ "Code: %d. Raw Message:\n\n%s",
+ r.Request.Method, r.Request.URL.String(),
+ r.StatusCode, bodyBuf.String())
+ }
+
+ var errBody bytes.Buffer
+ errBody.WriteString(fmt.Sprintf(
+ "Error making API request.\n\n"+
+ "URL: %s %s\n"+
+ "Code: %d. Errors:\n\n",
+ r.Request.Method, r.Request.URL.String(),
+ r.StatusCode))
+ for _, err := range resp.Errors {
+ errBody.WriteString(fmt.Sprintf("* %s", err))
+ }
+
+ return fmt.Errorf(errBody.String())
+}
+
+// ErrorResponse is the raw structure of errors when they're returned by the
+// HTTP API.
+type ErrorResponse struct {
+ Errors []string
+}
diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go
new file mode 100644
index 0000000..14924f9
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/secret.go
@@ -0,0 +1,68 @@
+package api
+
+import (
+ "io"
+ "time"
+
+ "github.com/hashicorp/vault/helper/jsonutil"
+)
+
+// Secret is the structure returned for every secret within Vault.
+type Secret struct {
+ // The request ID that generated this response
+ RequestID string `json:"request_id"`
+
+ LeaseID string `json:"lease_id"`
+ LeaseDuration int `json:"lease_duration"`
+ Renewable bool `json:"renewable"`
+
+ // Data is the actual contents of the secret. The format of the data
+ // is arbitrary and up to the secret backend.
+ Data map[string]interface{} `json:"data"`
+
+ // Warnings contains any warnings related to the operation. These
+ // are not issues that caused the command to fail, but that the
+ // client should be aware of.
+ Warnings []string `json:"warnings"`
+
+ // Auth, if non-nil, means that there was authentication information
+ // attached to this response.
+ Auth *SecretAuth `json:"auth,omitempty"`
+
+ // WrapInfo, if non-nil, means that the initial response was wrapped in the
+ // cubbyhole of the given token (which has a TTL of the given number of
+ // seconds)
+ WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"`
+}
+
+// SecretWrapInfo contains wrapping information if we have it. If what is
+// contained is an authentication token, the accessor for the token will be
+// available in WrappedAccessor.
+type SecretWrapInfo struct {
+ Token string `json:"token"`
+ TTL int `json:"ttl"`
+ CreationTime time.Time `json:"creation_time"`
+ WrappedAccessor string `json:"wrapped_accessor"`
+}
+
+// SecretAuth is the structure containing auth information if we have it.
+type SecretAuth struct {
+ ClientToken string `json:"client_token"`
+ Accessor string `json:"accessor"`
+ Policies []string `json:"policies"`
+ Metadata map[string]string `json:"metadata"`
+
+ LeaseDuration int `json:"lease_duration"`
+ Renewable bool `json:"renewable"`
+}
+
+// ParseSecret is used to parse a secret value from JSON from an io.Reader.
+func ParseSecret(r io.Reader) (*Secret, error) {
+ // First decode the JSON into a map[string]interface{}
+ var secret Secret
+ if err := jsonutil.DecodeJSONFromReader(r, &secret); err != nil {
+ return nil, err
+ }
+
+ return &secret, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go
new file mode 100644
index 0000000..7c3e56b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/ssh.go
@@ -0,0 +1,38 @@
+package api
+
+import "fmt"
+
+// SSH is used to return a client to invoke operations on SSH backend.
+type SSH struct {
+ c *Client
+ MountPoint string
+}
+
+// SSH returns the client for logical-backend API calls.
+func (c *Client) SSH() *SSH {
+ return c.SSHWithMountPoint(SSHHelperDefaultMountPoint)
+}
+
+// SSHWithMountPoint returns the client with specific SSH mount point.
+func (c *Client) SSHWithMountPoint(mountPoint string) *SSH {
+ return &SSH{
+ c: c,
+ MountPoint: mountPoint,
+ }
+}
+
+// Credential invokes the SSH backend API to create a credential to establish an SSH session.
+func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) {
+ r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role))
+ if err := r.SetJSONBody(data); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go
new file mode 100644
index 0000000..729fd99
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go
@@ -0,0 +1,257 @@
+package api
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/go-rootcerts"
+ "github.com/hashicorp/hcl"
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/mitchellh/mapstructure"
+)
+
+const (
+ // SSHHelperDefaultMountPoint is the default path at which SSH backend will be
+ // mounted in the Vault server.
+ SSHHelperDefaultMountPoint = "ssh"
+
+ // VerifyEchoRequest is the echo request message sent as OTP by the helper.
+ VerifyEchoRequest = "verify-echo-request"
+
+ // VerifyEchoResponse is the echo response message sent as a response to OTP
+ // matching echo request.
+ VerifyEchoResponse = "verify-echo-response"
+)
+
+// SSHHelper is a structure representing a vault-ssh-helper which can talk to vault server
+// in order to verify the OTP entered by the user. It contains the path at which
+// SSH backend is mounted at the server.
+type SSHHelper struct {
+ c *Client
+ MountPoint string
+}
+
+// SSHVerifyResponse is a structure representing the fields in Vault server's
+// response.
+type SSHVerifyResponse struct {
+ // Usually empty. If the request OTP is echo request message, this will
+ // be set to the corresponding echo response message.
+ Message string `json:"message" structs:"message" mapstructure:"message"`
+
+ // Username associated with the OTP
+ Username string `json:"username" structs:"username" mapstructure:"username"`
+
+ // IP associated with the OTP
+ IP string `json:"ip" structs:"ip" mapstructure:"ip"`
+
+ // Name of the role against which the OTP was issued
+ RoleName string `json:"role_name" structs:"role_name" mapstructure:"role_name"`
+}
+
+// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file.
+type SSHHelperConfig struct {
+ VaultAddr string `hcl:"vault_addr"`
+ SSHMountPoint string `hcl:"ssh_mount_point"`
+ CACert string `hcl:"ca_cert"`
+ CAPath string `hcl:"ca_path"`
+ AllowedCidrList string `hcl:"allowed_cidr_list"`
+ AllowedRoles string `hcl:"allowed_roles"`
+ TLSSkipVerify bool `hcl:"tls_skip_verify"`
+ TLSServerName string `hcl:"tls_server_name"`
+}
+
+// SetTLSParameters sets the TLS parameters for this SSH agent.
+func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) {
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: c.TLSSkipVerify,
+ MinVersion: tls.VersionTLS12,
+ RootCAs: certPool,
+ ServerName: c.TLSServerName,
+ }
+
+ transport := cleanhttp.DefaultTransport()
+ transport.TLSClientConfig = tlsConfig
+ clientConfig.HttpClient.Transport = transport
+}
+
+// Returns true if any of the following conditions are true:
+// * CA cert is configured
+// * CA path is configured
+// * configured to skip certificate verification
+// * TLS server name is configured
+//
+func (c *SSHHelperConfig) shouldSetTLSParameters() bool {
+ return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify
+}
+
+// NewClient returns a new client for the configuration. This client will be used by the
+// vault-ssh-helper to communicate with Vault server and verify the OTP entered by user.
+// If the configuration supplies Vault SSL certificates, then the client will
+// have TLS configured in its transport.
+func (c *SSHHelperConfig) NewClient() (*Client, error) {
+ // Creating a default client configuration for communicating with vault server.
+ clientConfig := DefaultConfig()
+
+ // Pointing the client to the actual address of vault server.
+ clientConfig.Address = c.VaultAddr
+
+ // Check if certificates are provided via config file.
+ if c.shouldSetTLSParameters() {
+ rootConfig := &rootcerts.Config{
+ CAFile: c.CACert,
+ CAPath: c.CAPath,
+ }
+ certPool, err := rootcerts.LoadCACerts(rootConfig)
+ if err != nil {
+ return nil, err
+ }
+ // Enable TLS on the HTTP client information
+ c.SetTLSParameters(clientConfig, certPool)
+ }
+
+ // Creating the client object for the given configuration
+ client, err := NewClient(clientConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ return client, nil
+}
+
+// LoadSSHHelperConfig loads ssh-helper's configuration from the file and populates the corresponding
+// in-memory structure.
+//
+// Vault address is a required parameter.
+// Mount point defaults to "ssh".
+func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) {
+ contents, err := ioutil.ReadFile(path)
+ if err != nil && !os.IsNotExist(err) {
+ return nil, multierror.Prefix(err, "ssh_helper:")
+ }
+ return ParseSSHHelperConfig(string(contents))
+}
+
+// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper
+// configuration.
+func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
+ root, err := hcl.Parse(string(contents))
+ if err != nil {
+ return nil, fmt.Errorf("ssh_helper: error parsing config: %s", err)
+ }
+
+ list, ok := root.Node.(*ast.ObjectList)
+ if !ok {
+ return nil, fmt.Errorf("ssh_helper: error parsing config: file doesn't contain a root object")
+ }
+
+ valid := []string{
+ "vault_addr",
+ "ssh_mount_point",
+ "ca_cert",
+ "ca_path",
+ "allowed_cidr_list",
+ "allowed_roles",
+ "tls_skip_verify",
+ "tls_server_name",
+ }
+ if err := checkHCLKeys(list, valid); err != nil {
+ return nil, multierror.Prefix(err, "ssh_helper:")
+ }
+
+ var c SSHHelperConfig
+ c.SSHMountPoint = SSHHelperDefaultMountPoint
+ if err := hcl.DecodeObject(&c, list); err != nil {
+ return nil, multierror.Prefix(err, "ssh_helper:")
+ }
+
+ if c.VaultAddr == "" {
+ return nil, fmt.Errorf("ssh_helper: missing config 'vault_addr'")
+ }
+ return &c, nil
+}
+
+// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend
+// mounted at default path ("ssh").
+func (c *Client) SSHHelper() *SSHHelper {
+ return c.SSHHelperWithMountPoint(SSHHelperDefaultMountPoint)
+}
+
+// SSHHelperWithMountPoint creates an SSHHelper object which can talk to Vault server with SSH backend
+// mounted at a specific mount point.
+func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper {
+ return &SSHHelper{
+ c: c,
+ MountPoint: mountPoint,
+ }
+}
+
+// Verify verifies if the key provided by user is present in Vault server. The response
+// will contain the IP address and username associated with the OTP. In case the
+// OTP matches the echo request message, instead of searching an entry for the OTP,
+// an echo response message is returned. This feature is used by ssh-helper to verify if
+// its configured correctly.
+func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) {
+ data := map[string]interface{}{
+ "otp": otp,
+ }
+ verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint)
+ r := c.c.NewRequest("PUT", verifyPath)
+ if err := r.SetJSONBody(data); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ secret, err := ParseSecret(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if secret.Data == nil {
+ return nil, nil
+ }
+
+ var verifyResp SSHVerifyResponse
+ err = mapstructure.Decode(secret.Data, &verifyResp)
+ if err != nil {
+ return nil, err
+ }
+ return &verifyResp, nil
+}
+
+func checkHCLKeys(node ast.Node, valid []string) error {
+ var list *ast.ObjectList
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ list = n
+ case *ast.ObjectType:
+ list = n.List
+ default:
+ return fmt.Errorf("cannot check HCL keys of type %T", n)
+ }
+
+ validMap := make(map[string]struct{}, len(valid))
+ for _, v := range valid {
+ validMap[v] = struct{}{}
+ }
+
+ var result error
+ for _, item := range list.Items {
+ key := item.Keys[0].Token.Value().(string)
+ if _, ok := validMap[key]; !ok {
+ result = multierror.Append(result, fmt.Errorf(
+ "invalid key '%s' on line %d", key, item.Assign.Line))
+ }
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys.go b/vendor/github.com/hashicorp/vault/api/sys.go
new file mode 100644
index 0000000..5fb1118
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys.go
@@ -0,0 +1,11 @@
+package api
+
+// Sys is used to perform system-related operations on Vault.
+type Sys struct {
+ c *Client
+}
+
+// Sys is used to return the client for sys-related API calls.
+func (c *Client) Sys() *Sys {
+ return &Sys{c: c}
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go
new file mode 100644
index 0000000..1ffdef8
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go
@@ -0,0 +1,114 @@
+package api
+
+import (
+ "fmt"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+func (c *Sys) AuditHash(path string, input string) (string, error) {
+ body := map[string]interface{}{
+ "input": input,
+ }
+
+ r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit-hash/%s", path))
+ if err := r.SetJSONBody(body); err != nil {
+ return "", err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ type d struct {
+ Hash string `json:"hash"`
+ }
+
+ var result d
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return "", err
+ }
+
+ return result.Hash, err
+}
+
+func (c *Sys) ListAudit() (map[string]*Audit, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/audit")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result map[string]interface{}
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ mounts := map[string]*Audit{}
+ for k, v := range result {
+ switch v.(type) {
+ case map[string]interface{}:
+ default:
+ continue
+ }
+ var res Audit
+ err = mapstructure.Decode(v, &res)
+ if err != nil {
+ return nil, err
+ }
+ // Not a mount, some other api.Secret data
+ if res.Type == "" {
+ continue
+ }
+ mounts[k] = &res
+ }
+
+ return mounts, nil
+}
+
+func (c *Sys) EnableAudit(
+ path string, auditType string, desc string, opts map[string]string) error {
+ body := map[string]interface{}{
+ "type": auditType,
+ "description": desc,
+ "options": opts,
+ }
+
+ r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path))
+ if err := r.SetJSONBody(body); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+func (c *Sys) DisableAudit(path string) error {
+ r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path))
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+// Structures for the requests/resposne are all down here. They aren't
+// individually documented because the map almost directly to the raw HTTP API
+// documentation. Please refer to that documentation for more details.
+
+type Audit struct {
+ Path string
+ Type string
+ Description string
+ Options map[string]string
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go
new file mode 100644
index 0000000..1940e84
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go
@@ -0,0 +1,87 @@
+package api
+
+import (
+ "fmt"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+func (c *Sys) ListAuth() (map[string]*AuthMount, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/auth")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result map[string]interface{}
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ mounts := map[string]*AuthMount{}
+ for k, v := range result {
+ switch v.(type) {
+ case map[string]interface{}:
+ default:
+ continue
+ }
+ var res AuthMount
+ err = mapstructure.Decode(v, &res)
+ if err != nil {
+ return nil, err
+ }
+ // Not a mount, some other api.Secret data
+ if res.Type == "" {
+ continue
+ }
+ mounts[k] = &res
+ }
+
+ return mounts, nil
+}
+
+func (c *Sys) EnableAuth(path, authType, desc string) error {
+ body := map[string]string{
+ "type": authType,
+ "description": desc,
+ }
+
+ r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path))
+ if err := r.SetJSONBody(body); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+func (c *Sys) DisableAuth(path string) error {
+ r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path))
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+// Structures for the requests/resposne are all down here. They aren't
+// individually documentd because the map almost directly to the raw HTTP API
+// documentation. Please refer to that documentation for more details.
+
+type AuthMount struct {
+ Type string `json:"type" structs:"type" mapstructure:"type"`
+ Description string `json:"description" structs:"description" mapstructure:"description"`
+ Config AuthConfigOutput `json:"config" structs:"config" mapstructure:"config"`
+}
+
+type AuthConfigOutput struct {
+ DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go
new file mode 100644
index 0000000..80f6218
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go
@@ -0,0 +1,43 @@
+package api
+
+import "fmt"
+
+func (c *Sys) CapabilitiesSelf(path string) ([]string, error) {
+ return c.Capabilities(c.c.Token(), path)
+}
+
+func (c *Sys) Capabilities(token, path string) ([]string, error) {
+ body := map[string]string{
+ "token": token,
+ "path": path,
+ }
+
+ reqPath := "/v1/sys/capabilities"
+ if token == c.c.Token() {
+ reqPath = fmt.Sprintf("%s-self", reqPath)
+ }
+
+ r := c.c.NewRequest("POST", reqPath)
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result map[string]interface{}
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ var capabilities []string
+ capabilitiesRaw := result["capabilities"].([]interface{})
+ for _, capability := range capabilitiesRaw {
+ capabilities = append(capabilities, capability.(string))
+ }
+ return capabilities, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go
new file mode 100644
index 0000000..8dc2095
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go
@@ -0,0 +1,77 @@
+package api
+
+func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/generate-root/attempt")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result GenerateRootStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) {
+ body := map[string]interface{}{
+ "otp": otp,
+ "pgp_key": pgpKey,
+ }
+
+ r := c.c.NewRequest("PUT", "/v1/sys/generate-root/attempt")
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result GenerateRootStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) GenerateRootCancel() error {
+ r := c.c.NewRequest("DELETE", "/v1/sys/generate-root/attempt")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) {
+ body := map[string]interface{}{
+ "key": shard,
+ "nonce": nonce,
+ }
+
+ r := c.c.NewRequest("PUT", "/v1/sys/generate-root/update")
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result GenerateRootStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+type GenerateRootStatusResponse struct {
+ Nonce string
+ Started bool
+ Progress int
+ Required int
+ Complete bool
+ EncodedRootToken string `json:"encoded_root_token"`
+ PGPFingerprint string `json:"pgp_fingerprint"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go
new file mode 100644
index 0000000..f824ab7
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_init.go
@@ -0,0 +1,54 @@
+package api
+
+func (c *Sys) InitStatus() (bool, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/init")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return false, err
+ }
+ defer resp.Body.Close()
+
+ var result InitStatusResponse
+ err = resp.DecodeJSON(&result)
+ return result.Initialized, err
+}
+
+func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) {
+ r := c.c.NewRequest("PUT", "/v1/sys/init")
+ if err := r.SetJSONBody(opts); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result InitResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+type InitRequest struct {
+ SecretShares int `json:"secret_shares"`
+ SecretThreshold int `json:"secret_threshold"`
+ StoredShares int `json:"stored_shares"`
+ PGPKeys []string `json:"pgp_keys"`
+ RecoveryShares int `json:"recovery_shares"`
+ RecoveryThreshold int `json:"recovery_threshold"`
+ RecoveryPGPKeys []string `json:"recovery_pgp_keys"`
+ RootTokenPGPKey string `json:"root_token_pgp_key"`
+}
+
+type InitStatusResponse struct {
+ Initialized bool
+}
+
+type InitResponse struct {
+ Keys []string `json:"keys"`
+ KeysB64 []string `json:"keys_base64"`
+ RecoveryKeys []string `json:"recovery_keys"`
+ RecoveryKeysB64 []string `json:"recovery_keys_base64"`
+ RootToken string `json:"root_token"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go
new file mode 100644
index 0000000..201ac73
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go
@@ -0,0 +1,20 @@
+package api
+
+func (c *Sys) Leader() (*LeaderResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/leader")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result LeaderResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+type LeaderResponse struct {
+ HAEnabled bool `json:"ha_enabled"`
+ IsSelf bool `json:"is_self"`
+ LeaderAddress string `json:"leader_address"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_lease.go b/vendor/github.com/hashicorp/vault/api/sys_lease.go
new file mode 100644
index 0000000..e5c19c4
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_lease.go
@@ -0,0 +1,48 @@
+package api
+
+func (c *Sys) Renew(id string, increment int) (*Secret, error) {
+ r := c.c.NewRequest("PUT", "/v1/sys/renew")
+
+ body := map[string]interface{}{
+ "increment": increment,
+ "lease_id": id,
+ }
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
+func (c *Sys) Revoke(id string) error {
+ r := c.c.NewRequest("PUT", "/v1/sys/revoke/"+id)
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) RevokePrefix(id string) error {
+ r := c.c.NewRequest("PUT", "/v1/sys/revoke-prefix/"+id)
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) RevokeForce(id string) error {
+ r := c.c.NewRequest("PUT", "/v1/sys/revoke-force/"+id)
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
new file mode 100644
index 0000000..ca5e427
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
@@ -0,0 +1,142 @@
+package api
+
+import (
+ "fmt"
+
+ "github.com/fatih/structs"
+ "github.com/mitchellh/mapstructure"
+)
+
+func (c *Sys) ListMounts() (map[string]*MountOutput, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/mounts")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result map[string]interface{}
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ mounts := map[string]*MountOutput{}
+ for k, v := range result {
+ switch v.(type) {
+ case map[string]interface{}:
+ default:
+ continue
+ }
+ var res MountOutput
+ err = mapstructure.Decode(v, &res)
+ if err != nil {
+ return nil, err
+ }
+ // Not a mount, some other api.Secret data
+ if res.Type == "" {
+ continue
+ }
+ mounts[k] = &res
+ }
+
+ return mounts, nil
+}
+
+func (c *Sys) Mount(path string, mountInfo *MountInput) error {
+ body := structs.Map(mountInfo)
+
+ r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path))
+ if err := r.SetJSONBody(body); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+func (c *Sys) Unmount(path string) error {
+ r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path))
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) Remount(from, to string) error {
+ body := map[string]interface{}{
+ "from": from,
+ "to": to,
+ }
+
+ r := c.c.NewRequest("POST", "/v1/sys/remount")
+ if err := r.SetJSONBody(body); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) TuneMount(path string, config MountConfigInput) error {
+ body := structs.Map(config)
+ r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path))
+ if err := r.SetJSONBody(body); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) {
+ r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path))
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result MountConfigOutput
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ return &result, err
+}
+
+type MountInput struct {
+ Type string `json:"type" structs:"type"`
+ Description string `json:"description" structs:"description"`
+ Config MountConfigInput `json:"config" structs:"config"`
+}
+
+type MountConfigInput struct {
+ DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+}
+
+type MountOutput struct {
+ Type string `json:"type" structs:"type"`
+ Description string `json:"description" structs:"description"`
+ Config MountConfigOutput `json:"config" structs:"config"`
+}
+
+type MountConfigOutput struct {
+ DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go
new file mode 100644
index 0000000..ba0e17f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go
@@ -0,0 +1,95 @@
+package api
+
+import "fmt"
+
+func (c *Sys) ListPolicies() ([]string, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/policy")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result map[string]interface{}
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return nil, err
+ }
+
+ var ok bool
+ if _, ok = result["policies"]; !ok {
+ return nil, fmt.Errorf("policies not found in response")
+ }
+
+ listRaw := result["policies"].([]interface{})
+ var policies []string
+
+ for _, val := range listRaw {
+ policies = append(policies, val.(string))
+ }
+
+ return policies, err
+}
+
+func (c *Sys) GetPolicy(name string) (string, error) {
+ r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policy/%s", name))
+ resp, err := c.c.RawRequest(r)
+ if resp != nil {
+ defer resp.Body.Close()
+ if resp.StatusCode == 404 {
+ return "", nil
+ }
+ }
+ if err != nil {
+ return "", err
+ }
+
+ var result map[string]interface{}
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return "", err
+ }
+
+ var ok bool
+ if _, ok = result["rules"]; !ok {
+ return "", fmt.Errorf("rules not found in response")
+ }
+
+ return result["rules"].(string), nil
+}
+
+func (c *Sys) PutPolicy(name, rules string) error {
+ body := map[string]string{
+ "rules": rules,
+ }
+
+ r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/policy/%s", name))
+ if err := r.SetJSONBody(body); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+func (c *Sys) DeletePolicy(name string) error {
+ r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policy/%s", name))
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+type getPoliciesResp struct {
+ Rules string `json:"rules"`
+}
+
+type listPoliciesResp struct {
+ Policies []string `json:"policies"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go
new file mode 100644
index 0000000..e6d039e
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go
@@ -0,0 +1,202 @@
+package api
+
+func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/rekey/init")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) {
+ r := c.c.NewRequest("PUT", "/v1/sys/rekey/init")
+ if err := r.SetJSONBody(config); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) {
+ r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/init")
+ if err := r.SetJSONBody(config); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyCancel() error {
+ r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) RekeyRecoveryKeyCancel() error {
+ r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) {
+ body := map[string]interface{}{
+ "key": shard,
+ "nonce": nonce,
+ }
+
+ r := c.c.NewRequest("PUT", "/v1/sys/rekey/update")
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyUpdateResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) {
+ body := map[string]interface{}{
+ "key": shard,
+ "nonce": nonce,
+ }
+
+ r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/update")
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyUpdateResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/rekey/backup")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyRetrieveResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-backup")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyRetrieveResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyDeleteBackup() error {
+ r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+
+ return err
+}
+
+func (c *Sys) RekeyDeleteRecoveryBackup() error {
+ r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-backup")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+
+ return err
+}
+
+type RekeyInitRequest struct {
+ SecretShares int `json:"secret_shares"`
+ SecretThreshold int `json:"secret_threshold"`
+ PGPKeys []string `json:"pgp_keys"`
+ Backup bool
+}
+
+type RekeyStatusResponse struct {
+ Nonce string
+ Started bool
+ T int
+ N int
+ Progress int
+ Required int
+ PGPFingerprints []string `json:"pgp_fingerprints"`
+ Backup bool
+}
+
+type RekeyUpdateResponse struct {
+ Nonce string
+ Complete bool
+ Keys []string
+ KeysB64 []string `json:"keys_base64"`
+ PGPFingerprints []string `json:"pgp_fingerprints"`
+ Backup bool
+}
+
+type RekeyRetrieveResponse struct {
+ Nonce string
+ Keys map[string][]string
+ KeysB64 map[string][]string `json:"keys_base64"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go
new file mode 100644
index 0000000..8108dce
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_rotate.go
@@ -0,0 +1,30 @@
+package api
+
+import "time"
+
+func (c *Sys) Rotate() error {
+ r := c.c.NewRequest("POST", "/v1/sys/rotate")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) KeyStatus() (*KeyStatus, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/key-status")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ result := new(KeyStatus)
+ err = resp.DecodeJSON(result)
+ return result, err
+}
+
+type KeyStatus struct {
+ Term int `json:"term"`
+ InstallTime time.Time `json:"install_time"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go
new file mode 100644
index 0000000..b80e33a
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go
@@ -0,0 +1,59 @@
+package api
+
+func (c *Sys) SealStatus() (*SealStatusResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/seal-status")
+ return sealStatusRequest(c, r)
+}
+
+func (c *Sys) Seal() error {
+ r := c.c.NewRequest("PUT", "/v1/sys/seal")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) {
+ body := map[string]interface{}{"reset": true}
+
+ r := c.c.NewRequest("PUT", "/v1/sys/unseal")
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ return sealStatusRequest(c, r)
+}
+
+func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) {
+ body := map[string]interface{}{"key": shard}
+
+ r := c.c.NewRequest("PUT", "/v1/sys/unseal")
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ return sealStatusRequest(c, r)
+}
+
+func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) {
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result SealStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+type SealStatusResponse struct {
+ Sealed bool `json:"sealed"`
+ T int `json:"t"`
+ N int `json:"n"`
+ Progress int `json:"progress"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go
new file mode 100644
index 0000000..421e5f1
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go
@@ -0,0 +1,10 @@
+package api
+
+func (c *Sys) StepDown() error {
+ r := c.c.NewRequest("PUT", "/v1/sys/step-down")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
new file mode 100644
index 0000000..e485f2f
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
@@ -0,0 +1,159 @@
+package compressutil
+
+import (
+ "bytes"
+ "compress/gzip"
+ "compress/lzw"
+ "fmt"
+ "io"
+)
+
+const (
+ // A byte value used as a canary prefix for the compressed information
+ // which is used to distinguish if a JSON input is compressed or not.
+ // The value of this constant should not be a first character of any
+ // valid JSON string.
+
+ // Byte value used as canary when using Gzip format
+ CompressionCanaryGzip byte = 'G'
+
+ // Byte value used as canary when using Lzw format
+ CompressionCanaryLzw byte = 'L'
+
+ CompressionTypeLzw = "lzw"
+
+ CompressionTypeGzip = "gzip"
+)
+
+// CompressionConfig is used to select a compression type to be performed by
+// Compress and Decompress utilities.
+// Supported types are:
+// * CompressionTypeLzw
+// * CompressionTypeGzip
+//
+// When using CompressionTypeGzip, the compression levels can also be chosen:
+// * gzip.DefaultCompression
+// * gzip.BestSpeed
+// * gzip.BestCompression
+type CompressionConfig struct {
+ // Type of the compression algorithm to be used
+ Type string
+
+ // When using Gzip format, the compression level to employ
+ GzipCompressionLevel int
+}
+
+// Compress places the canary byte in a buffer and uses the same buffer to fill
+// in the compressed information of the given input. The configuration supports
+// two type of compression: LZW and Gzip. When using Gzip compression format,
+// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will
+// be assumed.
+func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
+ var buf bytes.Buffer
+ var writer io.WriteCloser
+ var err error
+
+ if config == nil {
+ return nil, fmt.Errorf("config is nil")
+ }
+
+ // Write the canary into the buffer and create writer to compress the
+ // input data based on the configured type
+ switch config.Type {
+ case CompressionTypeLzw:
+ buf.Write([]byte{CompressionCanaryLzw})
+
+ writer = lzw.NewWriter(&buf, lzw.LSB, 8)
+ case CompressionTypeGzip:
+ buf.Write([]byte{CompressionCanaryGzip})
+
+ switch {
+ case config.GzipCompressionLevel == gzip.BestCompression,
+ config.GzipCompressionLevel == gzip.BestSpeed,
+ config.GzipCompressionLevel == gzip.DefaultCompression:
+ // These are valid compression levels
+ default:
+ // If compression level is set to NoCompression or to
+ // any invalid value, fallback to Defaultcompression
+ config.GzipCompressionLevel = gzip.DefaultCompression
+ }
+ writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel)
+ default:
+ return nil, fmt.Errorf("unsupported compression type")
+ }
+ if err != nil {
+ return nil, fmt.Errorf("failed to create a compression writer; err: %v", err)
+ }
+
+ if writer == nil {
+ return nil, fmt.Errorf("failed to create a compression writer")
+ }
+
+ // Compress the input and place it in the same buffer containing the
+ // canary byte.
+ if _, err = writer.Write(data); err != nil {
+ return nil, fmt.Errorf("failed to compress input data; err: %v", err)
+ }
+
+ // Close the io.WriteCloser
+ if err = writer.Close(); err != nil {
+ return nil, err
+ }
+
+ // Return the compressed bytes with canary byte at the start
+ return buf.Bytes(), nil
+}
+
+// Decompress checks if the first byte in the input matches the canary byte.
+// If the first byte is a canary byte, then the input past the canary byte
+// will be decompressed using the method specified in the given configuration.
+// If the first byte isn't a canary byte, then the utility returns a boolean
+// value indicating that the input was not compressed.
+func Decompress(data []byte) ([]byte, bool, error) {
+ var err error
+ var reader io.ReadCloser
+ if data == nil || len(data) == 0 {
+ return nil, false, fmt.Errorf("'data' being decompressed is empty")
+ }
+
+ switch {
+ case data[0] == CompressionCanaryGzip:
+ // If the first byte matches the canary byte, remove the canary
+ // byte and try to decompress the data that is after the canary.
+ if len(data) < 2 {
+ return nil, false, fmt.Errorf("invalid 'data' after the canary")
+ }
+ data = data[1:]
+ reader, err = gzip.NewReader(bytes.NewReader(data))
+ case data[0] == CompressionCanaryLzw:
+ // If the first byte matches the canary byte, remove the canary
+ // byte and try to decompress the data that is after the canary.
+ if len(data) < 2 {
+ return nil, false, fmt.Errorf("invalid 'data' after the canary")
+ }
+ data = data[1:]
+ reader = lzw.NewReader(bytes.NewReader(data), lzw.LSB, 8)
+ default:
+ // If the first byte doesn't match the canary byte, it means
+ // that the content was not compressed at all. Indicate the
+ // caller that the input was not compressed.
+ return nil, true, nil
+ }
+ if err != nil {
+ return nil, false, fmt.Errorf("failed to create a compression reader; err: %v", err)
+ }
+ if reader == nil {
+ return nil, false, fmt.Errorf("failed to create a compression reader")
+ }
+
+ // Close the io.ReadCloser
+ defer reader.Close()
+
+ // Read all the compressed data into a buffer
+ var buf bytes.Buffer
+ if _, err = io.Copy(&buf, reader); err != nil {
+ return nil, false, err
+ }
+
+ return buf.Bytes(), false, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go b/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
new file mode 100644
index 0000000..a96745b
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
@@ -0,0 +1,99 @@
+package jsonutil
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/hashicorp/vault/helper/compressutil"
+)
+
+// Encodes/Marshals the given object into JSON
+func EncodeJSON(in interface{}) ([]byte, error) {
+ if in == nil {
+ return nil, fmt.Errorf("input for encoding is nil")
+ }
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+ if err := enc.Encode(in); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// EncodeJSONAndCompress encodes the given input into JSON and compresses the
+// encoded value (using Gzip format BestCompression level, by default). A
+// canary byte is placed at the beginning of the returned bytes for the logic
+// in decompression method to identify compressed input.
+func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) {
+ if in == nil {
+ return nil, fmt.Errorf("input for encoding is nil")
+ }
+
+ // First JSON encode the given input
+ encodedBytes, err := EncodeJSON(in)
+ if err != nil {
+ return nil, err
+ }
+
+ if config == nil {
+ config = &compressutil.CompressionConfig{
+ Type: compressutil.CompressionTypeGzip,
+ GzipCompressionLevel: gzip.BestCompression,
+ }
+ }
+
+ return compressutil.Compress(encodedBytes, config)
+}
+
+// DecodeJSON tries to decompress the given data. The call to decompress, fails
+// if the content was not compressed in the first place, which is identified by
+// a canary byte before the compressed data. If the data is not compressed, it
+// is JSON decoded directly. Otherwise the decompressed data will be JSON
+// decoded.
+func DecodeJSON(data []byte, out interface{}) error {
+ if data == nil || len(data) == 0 {
+ return fmt.Errorf("'data' being decoded is nil")
+ }
+ if out == nil {
+ return fmt.Errorf("output parameter 'out' is nil")
+ }
+
+ // Decompress the data if it was compressed in the first place
+ decompressedBytes, uncompressed, err := compressutil.Decompress(data)
+ if err != nil {
+ return fmt.Errorf("failed to decompress JSON: err: %v", err)
+ }
+ if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) {
+ return fmt.Errorf("decompressed data being decoded is invalid")
+ }
+
+ // If the input supplied failed to contain the compression canary, it
+ // will be notified by the compression utility. Decode the decompressed
+ // input.
+ if !uncompressed {
+ data = decompressedBytes
+ }
+
+ return DecodeJSONFromReader(bytes.NewReader(data), out)
+}
+
+// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object
+func DecodeJSONFromReader(r io.Reader, out interface{}) error {
+ if r == nil {
+ return fmt.Errorf("'io.Reader' being decoded is nil")
+ }
+ if out == nil {
+ return fmt.Errorf("output parameter 'out' is nil")
+ }
+
+ dec := json.NewDecoder(r)
+
+ // While decoding JSON values, intepret the integer values as `json.Number`s instead of `float64`.
+ dec.UseNumber()
+
+ // Since 'out' is an interface representing a pointer, pass it to the decoder without an '&'
+ return dec.Decode(out)
+}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
new file mode 100644
index 0000000..f9c841a
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
new file mode 100644
index 0000000..d70706d
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/README.md
@@ -0,0 +1,14 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package requires
+cgo on Darwin systems. This means that any Go code that uses that package
+cannot cross compile. But 99% of the time the use for `os/user` is just to
+retrieve the home directory, which we can do for the current user without
+cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100644
index 0000000..8996b02
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -0,0 +1,137 @@
+package homedir
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.RWMutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+ if !DisableCache {
+ cacheLock.RLock()
+ cached := homedirCache
+ cacheLock.RUnlock()
+ if cached != "" {
+ return cached, nil
+ }
+ }
+
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+
+ var result string
+ var err error
+ if runtime.GOOS == "windows" {
+ result, err = dirWindows()
+ } else {
+ // Unix-like system, so just assume Unix
+ result, err = dirUnix()
+ }
+
+ if err != nil {
+ return "", err
+ }
+ homedirCache = result
+ return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+ if len(path) == 0 {
+ return path, nil
+ }
+
+ if path[0] != '~' {
+ return path, nil
+ }
+
+ if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+ return "", errors.New("cannot expand user-specific home dir")
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(dir, path[1:]), nil
+}
+
+func dirUnix() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ // If that fails, try getent
+ var stdout bytes.Buffer
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If "getent" is missing, ignore it
+ if err == exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd = exec.Command("sh", "-c", "cd && pwd")
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+
+ result := strings.TrimSpace(stdout.String())
+ if result == "" {
+ return "", errors.New("blank output when reading home directory")
+ }
+
+ return result, nil
+}
+
+func dirWindows() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ drive := os.Getenv("HOMEDRIVE")
+ path := os.Getenv("HOMEPATH")
+ home := drive + path
+ if drive == "" || path == "" {
+ home = os.Getenv("USERPROFILE")
+ }
+ if home == "" {
+ return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
+ }
+
+ return home, nil
+}
diff --git a/vendor/github.com/sethgrid/pester/LICENSE.md b/vendor/github.com/sethgrid/pester/LICENSE.md
new file mode 100644
index 0000000..4b49dda
--- /dev/null
+++ b/vendor/github.com/sethgrid/pester/LICENSE.md
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) SendGrid 2016
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/sethgrid/pester/README.md b/vendor/github.com/sethgrid/pester/README.md
new file mode 100644
index 0000000..e41f4d6
--- /dev/null
+++ b/vendor/github.com/sethgrid/pester/README.md
@@ -0,0 +1,126 @@
+# pester
+
+`pester` wraps Go's standard lib http client to provide several options to increase resiliency in your request. If you experience poor network conditions or requests could experience varied delays, you can now pester the endpoint for data.
+- Send out multiple requests and get the first back (only used for GET calls)
+- Retry on errors
+- Backoff
+
+### Simple Example
+Use `pester` where you would use the http client calls. By default, pester will use a concurrency of 1, and retry the endpoint 3 times with the `DefaultBackoff` strategy of waiting 1 second between retries.
+```go
+/* swap in replacement, just switch
+ http.{Get|Post|PostForm|Head|Do} to
+ pester.{Get|Post|PostForm|Head|Do}
+*/
+resp, err := pester.Get("http://sethammons.com")
+```
+
+### Backoff Strategy
+Provide your own backoff strategy, or use one of the provided built in strategies:
+- `DefaultBackoff`: 1 second
+- `LinearBackoff`: n seconds where n is the retry number
+- `LinearJitterBackoff`: n seconds where n is the retry number, +/- 0-33%
+- `ExponentialBackoff`: n seconds where n is 2^(retry number)
+- `ExponentialJitterBackoff`: n seconds where n is 2^(retry number), +/- 0-33%
+
+```go
+client := pester.New()
+client.Backoff = func(retry int) time.Duration {
+ // set up something dynamic or use a look up table
+ return time.Duration(retry) * time.Minute
+}
+```
+
+### Complete example
+For a complete and working example, see the sample directory.
+`pester` allows you to use a constructor to control:
+- backoff strategy
+- reties
+- concurrency
+- keeping a log for debugging
+```go
+package main
+
+import (
+ "log"
+ "net/http"
+ "strings"
+
+ "github.com/sethgrid/pester"
+)
+
+func main() {
+ log.Println("Starting...")
+
+ { // drop in replacement for http.Get and other client methods
+ resp, err := pester.Get("http://example.com")
+ if err != nil {
+ log.Println("error GETing example.com", err)
+ }
+ defer resp.Body.Close()
+ log.Printf("example.com %s", resp.Status)
+ }
+
+ { // control the resiliency
+ client := pester.New()
+ client.Concurrency = 3
+ client.MaxRetries = 5
+ client.Backoff = pester.ExponentialBackoff
+ client.KeepLog = true
+
+ resp, err := client.Get("http://example.com")
+ if err != nil {
+ log.Println("error GETing example.com", client.LogString())
+ }
+ defer resp.Body.Close()
+ log.Printf("example.com %s", resp.Status)
+ }
+
+ { // use the pester version of http.Client.Do
+ req, err := http.NewRequest("POST", "http://example.com", strings.NewReader("data"))
+ if err != nil {
+ log.Fatal("Unable to create a new http request", err)
+ }
+ resp, err := pester.Do(req)
+ if err != nil {
+ log.Println("error POSTing example.com", err)
+ }
+ defer resp.Body.Close()
+ log.Printf("example.com %s", resp.Status)
+ }
+}
+
+```
+
+### Example Log
+`pester` also allows you to control the resiliency and can optionally log the errors.
+```go
+c := pester.New()
+c.KeepLog = true
+
+nonExistantURL := "http://localhost:9000/foo"
+_, _ = c.Get(nonExistantURL)
+
+fmt.Println(c.LogString())
+/*
+Output:
+
+1432402837 Get [GET] http://localhost:9000/foo request-0 retry-0 error: Get http://localhost:9000/foo: dial tcp 127.0.0.1:9000: connection refused
+1432402838 Get [GET] http://localhost:9000/foo request-0 retry-1 error: Get http://localhost:9000/foo: dial tcp 127.0.0.1:9000: connection refused
+1432402839 Get [GET] http://localhost:9000/foo request-0 retry-2 error: Get http://localhost:9000/foo: dial tcp 127.0.0.1:9000: connection refused
+*/
+```
+
+### Tests
+
+You can run tests in the root directory with `$ go test`. There is a benchmark-like test available with `$ cd benchmarks; go test`.
+You can see `pester` in action with `$ cd sample; go run main.go`.
+
+For watching open file descriptors, you can run `watch "lsof -i -P | grep main"` if you started the app with `go run main.go`.
+I did this for watching for FD leaks. My method was to alter `sample/main.go` to only run one case (`pester.Get with set backoff stategy, concurrency and retries increased`)
+and adding a sleep after the result came back. This let me verify if FDs were getting left open when they should have closed. If you know a better way, let me know!
+I was able to see that FDs are now closing when they should :)
+
+![Are we there yet?](http://butchbellah.com/wp-content/uploads/2012/06/Are-We-There-Yet.jpg)
+
+Are we there yet? Are we there yet? Are we there yet? Are we there yet? ...
diff --git a/vendor/github.com/sethgrid/pester/main.go b/vendor/github.com/sethgrid/pester/main.go
new file mode 100644
index 0000000..8eb91fe
--- /dev/null
+++ b/vendor/github.com/sethgrid/pester/main.go
@@ -0,0 +1,423 @@
+package pester
+
+// pester provides additional resiliency over the standard http client methods by
+// allowing you to control concurrency, retries, and a backoff strategy.
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+// Client wraps the http client and exposes all the functionality of the http.Client.
+// Additionally, Client provides pester specific values for handling resiliency.
+type Client struct {
+ // wrap it to provide access to http built ins
+ hc *http.Client
+
+ Transport http.RoundTripper
+ CheckRedirect func(req *http.Request, via []*http.Request) error
+ Jar http.CookieJar
+ Timeout time.Duration
+
+ // pester specific
+ Concurrency int
+ MaxRetries int
+ Backoff BackoffStrategy
+ KeepLog bool
+
+ SuccessReqNum int
+ SuccessRetryNum int
+
+ wg *sync.WaitGroup
+
+ sync.Mutex
+ ErrLog []ErrEntry
+}
+
+// ErrEntry is used to provide the LogString() data and is populated
+// each time an error happens if KeepLog is set.
+// ErrEntry.Retry is deprecated in favor of ErrEntry.Attempt
+type ErrEntry struct {
+ Time time.Time
+ Method string
+ URL string
+ Verb string
+ Request int
+ Retry int
+ Attempt int
+ Err error
+}
+
+// result simplifies the channel communication for concurrent request handling
+type result struct {
+ resp *http.Response
+ err error
+ req int
+ retry int
+}
+
+// params represents all the params needed to run http client calls and pester errors
+type params struct {
+ method string
+ verb string
+ req *http.Request
+ url string
+ bodyType string
+ body io.Reader
+ data url.Values
+}
+
+// New constructs a new DefaultClient with sensible default values
+func New() *Client {
+ return &Client{
+ Concurrency: DefaultClient.Concurrency,
+ MaxRetries: DefaultClient.MaxRetries,
+ Backoff: DefaultClient.Backoff,
+ ErrLog: DefaultClient.ErrLog,
+ wg: &sync.WaitGroup{},
+ }
+}
+
+// NewExtendedClient allows you to pass in an http.Client that is previously set up
+// and extends it to have Pester's features of concurrency and retries.
+func NewExtendedClient(hc *http.Client) *Client {
+ c := New()
+ c.hc = hc
+ return c
+}
+
+// BackoffStrategy is used to determine how long a retry request should wait until attempted
+type BackoffStrategy func(retry int) time.Duration
+
+// DefaultClient provides sensible defaults
+var DefaultClient = &Client{Concurrency: 1, MaxRetries: 3, Backoff: DefaultBackoff, ErrLog: []ErrEntry{}}
+
+// DefaultBackoff always returns 1 second
+func DefaultBackoff(_ int) time.Duration {
+ return 1 * time.Second
+}
+
+// ExponentialBackoff returns ever increasing backoffs by a power of 2
+func ExponentialBackoff(i int) time.Duration {
+ return time.Duration(math.Pow(2, float64(i))) * time.Second
+}
+
+// ExponentialJitterBackoff returns ever increasing backoffs by a power of 2
+// with +/- 0-33% to prevent sychronized reuqests.
+func ExponentialJitterBackoff(i int) time.Duration {
+ return jitter(int(math.Pow(2, float64(i))))
+}
+
+// LinearBackoff returns increasing durations, each a second longer than the last
+func LinearBackoff(i int) time.Duration {
+ return time.Duration(i) * time.Second
+}
+
+// LinearJitterBackoff returns increasing durations, each a second longer than the last
+// with +/- 0-33% to prevent sychronized reuqests.
+func LinearJitterBackoff(i int) time.Duration {
+ return jitter(i)
+}
+
+// jitter keeps the +/- 0-33% logic in one place
+func jitter(i int) time.Duration {
+ ms := i * 1000
+
+ maxJitter := ms / 3
+
+ rand.Seed(time.Now().Unix())
+ jitter := rand.Intn(maxJitter + 1)
+
+ if rand.Intn(2) == 1 {
+ ms = ms + jitter
+ } else {
+ ms = ms - jitter
+ }
+
+ // a jitter of 0 messes up the time.Tick chan
+ if ms <= 0 {
+ ms = 1
+ }
+
+ return time.Duration(ms) * time.Millisecond
+}
+
+// Wait blocks until all pester requests have returned
+// Probably not that useful outside of testing.
+func (c *Client) Wait() {
+ c.wg.Wait()
+}
+
+// pester provides all the logic of retries, concurrency, backoff, and logging
+func (c *Client) pester(p params) (*http.Response, error) {
+ resultCh := make(chan result)
+ multiplexCh := make(chan result)
+ finishCh := make(chan struct{})
+
+ // track all requests that go out so we can close the late listener routine that closes late incoming response bodies
+ totalSentRequests := &sync.WaitGroup{}
+ totalSentRequests.Add(1)
+ defer totalSentRequests.Done()
+ allRequestsBackCh := make(chan struct{})
+ go func() {
+ totalSentRequests.Wait()
+ close(allRequestsBackCh)
+ }()
+
+ // GET calls should be idempotent and can make use
+ // of concurrency. Other verbs can mutate and should not
+ // make use of the concurrency feature
+ concurrency := c.Concurrency
+ if p.verb != "GET" {
+ concurrency = 1
+ }
+
+ c.Lock()
+ if c.hc == nil {
+ c.hc = &http.Client{}
+ c.hc.Transport = c.Transport
+ c.hc.CheckRedirect = c.CheckRedirect
+ c.hc.Jar = c.Jar
+ c.hc.Timeout = c.Timeout
+ }
+ c.Unlock()
+
+ // re-create the http client so we can leverage the std lib
+ httpClient := http.Client{
+ Transport: c.hc.Transport,
+ CheckRedirect: c.hc.CheckRedirect,
+ Jar: c.hc.Jar,
+ Timeout: c.hc.Timeout,
+ }
+
+ // if we have a request body, we need to save it for later
+ var originalRequestBody []byte
+ var originalBody []byte
+ var err error
+ if p.req != nil && p.req.Body != nil {
+ originalRequestBody, err = ioutil.ReadAll(p.req.Body)
+ if err != nil {
+ return &http.Response{}, errors.New("error reading request body")
+ }
+ p.req.Body.Close()
+ }
+ if p.body != nil {
+ originalBody, err = ioutil.ReadAll(p.body)
+ if err != nil {
+ return &http.Response{}, errors.New("error reading body")
+ }
+ }
+
+ AttemptLimit := c.MaxRetries
+ if AttemptLimit <= 0 {
+ AttemptLimit = 1
+ }
+
+ for req := 0; req < concurrency; req++ {
+ c.wg.Add(1)
+ totalSentRequests.Add(1)
+ go func(n int, p params) {
+ defer c.wg.Done()
+ defer totalSentRequests.Done()
+
+ var err error
+ for i := 1; i <= AttemptLimit; i++ {
+ c.wg.Add(1)
+ defer c.wg.Done()
+ select {
+ case <-finishCh:
+ return
+ default:
+ }
+ resp := &http.Response{}
+
+ // rehydrate the body (it is drained each read)
+ if len(originalRequestBody) > 0 {
+ p.req.Body = ioutil.NopCloser(bytes.NewBuffer(originalRequestBody))
+ }
+ if len(originalBody) > 0 {
+ p.body = bytes.NewBuffer(originalBody)
+ }
+
+ // route the calls
+ switch p.method {
+ case "Do":
+ resp, err = httpClient.Do(p.req)
+ case "Get":
+ resp, err = httpClient.Get(p.url)
+ case "Head":
+ resp, err = httpClient.Head(p.url)
+ case "Post":
+ resp, err = httpClient.Post(p.url, p.bodyType, p.body)
+ case "PostForm":
+ resp, err = httpClient.PostForm(p.url, p.data)
+ }
+
+ // Early return if we have a valid result
+ // Only retry (ie, continue the loop) on 5xx status codes
+ if err == nil && resp.StatusCode < 500 {
+ multiplexCh <- result{resp: resp, err: err, req: n, retry: i}
+ return
+ }
+
+ c.log(ErrEntry{
+ Time: time.Now(),
+ Method: p.method,
+ Verb: p.verb,
+ URL: p.url,
+ Request: n,
+ Retry: i + 1, // would remove, but would break backward compatibility
+ Attempt: i,
+ Err: err,
+ })
+
+ // if it is the last iteration, grab the result (which is an error at this point)
+ if i == AttemptLimit {
+ multiplexCh <- result{resp: resp, err: err}
+ return
+ }
+
+ // if we are retrying, we should close this response body to free the fd
+ if resp != nil {
+ resp.Body.Close()
+ }
+
+ // prevent a 0 from causing the tick to block, pass additional microsecond
+ <-time.Tick(c.Backoff(i) + 1*time.Microsecond)
+ }
+ }(req, p)
+ }
+
+ // spin off the go routine so it can continually listen in on late results and close the response bodies
+ go func() {
+ gotFirstResult := false
+ for {
+ select {
+ case res := <-multiplexCh:
+ if !gotFirstResult {
+ gotFirstResult = true
+ close(finishCh)
+ resultCh <- res
+ } else if res.resp != nil {
+ // we only return one result to the caller; close all other response bodies that come back
+ // drain the body before close as to not prevent keepalive. see https://gist.github.com/mholt/eba0f2cc96658be0f717
+ io.Copy(ioutil.Discard, res.resp.Body)
+ res.resp.Body.Close()
+ }
+ case <-allRequestsBackCh:
+ // don't leave this goroutine running
+ return
+ }
+ }
+ }()
+
+ select {
+ case res := <-resultCh:
+ c.Lock()
+ defer c.Unlock()
+ c.SuccessReqNum = res.req
+ c.SuccessRetryNum = res.retry
+ return res.resp, res.err
+ }
+}
+
+// LogString provides a string representation of the errors the client has seen
+func (c *Client) LogString() string {
+ c.Lock()
+ defer c.Unlock()
+ var res string
+ for _, e := range c.ErrLog {
+ res += fmt.Sprintf("%d %s [%s] %s request-%d retry-%d error: %s\n",
+ e.Time.Unix(), e.Method, e.Verb, e.URL, e.Request, e.Retry, e.Err)
+ }
+ return res
+}
+
+// LogErrCount is a helper method used primarily for test validation
+func (c *Client) LogErrCount() int {
+ c.Lock()
+ defer c.Unlock()
+ return len(c.ErrLog)
+}
+
+// EmbedHTTPClient allows you to extend an existing Pester client with an
+// underlying http.Client, such as https://godoc.org/golang.org/x/oauth2/google#DefaultClient
+func (c *Client) EmbedHTTPClient(hc *http.Client) {
+ c.hc = hc
+}
+
+func (c *Client) log(e ErrEntry) {
+ if c.KeepLog {
+ c.Lock()
+ c.ErrLog = append(c.ErrLog, e)
+ c.Unlock()
+ }
+}
+
+// Do provides the same functionality as http.Client.Do
+func (c *Client) Do(req *http.Request) (resp *http.Response, err error) {
+ return c.pester(params{method: "Do", req: req, verb: req.Method, url: req.URL.String()})
+}
+
+// Get provides the same functionality as http.Client.Get
+func (c *Client) Get(url string) (resp *http.Response, err error) {
+ return c.pester(params{method: "Get", url: url, verb: "GET"})
+}
+
+// Head provides the same functionality as http.Client.Head
+func (c *Client) Head(url string) (resp *http.Response, err error) {
+ return c.pester(params{method: "Head", url: url, verb: "HEAD"})
+}
+
+// Post provides the same functionality as http.Client.Post
+func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
+ return c.pester(params{method: "Post", url: url, bodyType: bodyType, body: body, verb: "POST"})
+}
+
+// PostForm provides the same functionality as http.Client.PostForm
+func (c *Client) PostForm(url string, data url.Values) (resp *http.Response, err error) {
+ return c.pester(params{method: "PostForm", url: url, data: data, verb: "POST"})
+}
+
+////////////////////////////////////////
+// Provide self-constructing variants //
+////////////////////////////////////////
+
+// Do provides the same functionality as http.Client.Do and creates its own constructor
+func Do(req *http.Request) (resp *http.Response, err error) {
+ c := New()
+ return c.Do(req)
+}
+
+// Get provides the same functionality as http.Client.Get and creates its own constructor
+func Get(url string) (resp *http.Response, err error) {
+ c := New()
+ return c.Get(url)
+}
+
+// Head provides the same functionality as http.Client.Head and creates its own constructor
+func Head(url string) (resp *http.Response, err error) {
+ c := New()
+ return c.Head(url)
+}
+
+// Post provides the same functionality as http.Client.Post and creates its own constructor
+func Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error) {
+ c := New()
+ return c.Post(url, bodyType, body)
+}
+
+// PostForm provides the same functionality as http.Client.PostForm and creates its own constructor
+func PostForm(url string, data url.Values) (resp *http.Response, err error) {
+ c := New()
+ return c.PostForm(url, data)
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index d9549b0..d71754f 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -187,6 +187,12 @@
"revisionTime": "2016-08-16T17:40:47Z"
},
{
+ "checksumSHA1": "KCWVxG+J8SxHGlGiUghe0KBGsa8=",
+ "path": "github.com/fatih/structs",
+ "revision": "dc3312cb1a4513a366c4c9e622ad55c32df12ed3",
+ "revisionTime": "2016-08-07T23:55:29Z"
+ },
+ {
"checksumSHA1": "xgjI2W3RGiQwNlxsOW2V9fJ9kaM=",
"path": "github.com/fsnotify/fsnotify",
"revision": "f12c6236fe7b5cf6bcf30e5935d08cb079d78334",
@@ -266,64 +272,94 @@
"revisionTime": "2014-10-28T05:47:10Z"
},
{
+ "checksumSHA1": "Uzyon2091lmwacNsl1hCytjhHtg=",
+ "path": "github.com/hashicorp/go-cleanhttp",
+ "revision": "ad28ea4487f05916463e2423a55166280e8254b5",
+ "revisionTime": "2016-04-07T17:41:26Z"
+ },
+ {
"checksumSHA1": "KS9lmJV8Z7KHdtSIhbafQLU1hC4=",
"path": "github.com/hashicorp/go-multierror",
"revision": "8c5f0ad9360406a3807ce7de6bc73269a91a6e51",
"revisionTime": "2016-08-11T01:57:21Z"
},
{
- "checksumSHA1": "fa9G5tEr4oJJc3vtgn/B0NWZXfA=",
+ "checksumSHA1": "A1PcINvF3UiwHRKn8UcgARgvGRs=",
+ "path": "github.com/hashicorp/go-rootcerts",
+ "revision": "6bb64b370b90e7ef1fa532be9e591a81c3493e00",
+ "revisionTime": "2016-05-03T14:34:40Z"
+ },
+ {
+ "checksumSHA1": "8OPDk+bKyRGJoKcS4QNw9F7dpE8=",
"path": "github.com/hashicorp/hcl",
- "revision": "99df0eb941dd8ddbc83d3f3605a34f6a686ac85e",
- "revisionTime": "2016-09-02T16:52:19Z"
+ "revision": "ef8133da8cda503718a74741312bf50821e6de79",
+ "revisionTime": "2016-09-16T13:01:00Z"
},
{
"checksumSHA1": "67DfevLBglV52Y2eAuhFc/xQni0=",
"path": "github.com/hashicorp/hcl/hcl/ast",
- "revision": "99df0eb941dd8ddbc83d3f3605a34f6a686ac85e",
- "revisionTime": "2016-09-02T16:52:19Z"
+ "revision": "ef8133da8cda503718a74741312bf50821e6de79",
+ "revisionTime": "2016-09-16T13:01:00Z"
},
{
"checksumSHA1": "l2oQxBsZRwn6eZjf+whXr8c9+8c=",
"path": "github.com/hashicorp/hcl/hcl/parser",
- "revision": "99df0eb941dd8ddbc83d3f3605a34f6a686ac85e",
- "revisionTime": "2016-09-02T16:52:19Z"
+ "revision": "ef8133da8cda503718a74741312bf50821e6de79",
+ "revisionTime": "2016-09-16T13:01:00Z"
},
{
"checksumSHA1": "lgR7PSAZ0RtvAc9OCtCnNsF/x8g=",
"path": "github.com/hashicorp/hcl/hcl/scanner",
- "revision": "99df0eb941dd8ddbc83d3f3605a34f6a686ac85e",
- "revisionTime": "2016-09-02T16:52:19Z"
+ "revision": "ef8133da8cda503718a74741312bf50821e6de79",
+ "revisionTime": "2016-09-16T13:01:00Z"
},
{
"checksumSHA1": "JlZmnzqdmFFyb1+2afLyR3BOE/8=",
"path": "github.com/hashicorp/hcl/hcl/strconv",
- "revision": "99df0eb941dd8ddbc83d3f3605a34f6a686ac85e",
- "revisionTime": "2016-09-02T16:52:19Z"
+ "revision": "ef8133da8cda503718a74741312bf50821e6de79",
+ "revisionTime": "2016-09-16T13:01:00Z"
},
{
"checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=",
"path": "github.com/hashicorp/hcl/hcl/token",
- "revision": "99df0eb941dd8ddbc83d3f3605a34f6a686ac85e",
- "revisionTime": "2016-09-02T16:52:19Z"
+ "revision": "ef8133da8cda503718a74741312bf50821e6de79",
+ "revisionTime": "2016-09-16T13:01:00Z"
},
{
"checksumSHA1": "jQ45CCc1ed/nlV7bbSnx6z72q1M=",
"path": "github.com/hashicorp/hcl/json/parser",
- "revision": "99df0eb941dd8ddbc83d3f3605a34f6a686ac85e",
- "revisionTime": "2016-09-02T16:52:19Z"
+ "revision": "ef8133da8cda503718a74741312bf50821e6de79",
+ "revisionTime": "2016-09-16T13:01:00Z"
},
{
"checksumSHA1": "YdvFsNOMSWMLnY6fcliWQa0O5Fw=",
"path": "github.com/hashicorp/hcl/json/scanner",
- "revision": "99df0eb941dd8ddbc83d3f3605a34f6a686ac85e",
- "revisionTime": "2016-09-02T16:52:19Z"
+ "revision": "ef8133da8cda503718a74741312bf50821e6de79",
+ "revisionTime": "2016-09-16T13:01:00Z"
},
{
"checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=",
"path": "github.com/hashicorp/hcl/json/token",
- "revision": "99df0eb941dd8ddbc83d3f3605a34f6a686ac85e",
- "revisionTime": "2016-09-02T16:52:19Z"
+ "revision": "ef8133da8cda503718a74741312bf50821e6de79",
+ "revisionTime": "2016-09-16T13:01:00Z"
+ },
+ {
+ "checksumSHA1": "LaOQaDi4rc0QVgWtzDrQqa7hJgs=",
+ "path": "github.com/hashicorp/vault/api",
+ "revision": "b2d2bb55d25f8b6ab70f4044c4ddb6bf1050eab6",
+ "revisionTime": "2016-10-05T21:17:33Z"
+ },
+ {
+ "checksumSHA1": "ft77GtqeZEeCXioGpF/s6DlGm/U=",
+ "path": "github.com/hashicorp/vault/helper/compressutil",
+ "revision": "b2d2bb55d25f8b6ab70f4044c4ddb6bf1050eab6",
+ "revisionTime": "2016-10-05T21:17:33Z"
+ },
+ {
+ "checksumSHA1": "yUiSTPf0QUuL2r/81sjuytqBoeQ=",
+ "path": "github.com/hashicorp/vault/helper/jsonutil",
+ "revision": "b2d2bb55d25f8b6ab70f4044c4ddb6bf1050eab6",
+ "revisionTime": "2016-10-05T21:17:33Z"
},
{
"checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=",
@@ -351,6 +387,12 @@
"revisionTime": "2016-08-21T07:55:01Z"
},
{
+ "checksumSHA1": "AXacfEchaUqT5RGmPmMXsOWRhv8=",
+ "path": "github.com/mitchellh/go-homedir",
+ "revision": "756f7b183b7ab78acdbbee5c7f392838ed459dda",
+ "revisionTime": "2016-06-21T17:42:43Z"
+ },
+ {
"checksumSHA1": "LUrnGREfnifW4WDMaavmc9MlLI0=",
"path": "github.com/mitchellh/mapstructure",
"revision": "ca63d7c062ee3c9f34db231e352b60012b4fd0c1",
@@ -393,6 +435,12 @@
"revisionTime": "2016-01-10T10:55:54Z"
},
{
+ "checksumSHA1": "Qm7DuiE3Cn0+CelfV9tggSPNG0k=",
+ "path": "github.com/sethgrid/pester",
+ "revision": "2a102734c18c43c74fd0664e06cd414cf9602b93",
+ "revisionTime": "2016-09-16T18:34:45Z"
+ },
+ {
"checksumSHA1": "wwt9oTMyWLdPZhkTipVJnZcamFU=",
"path": "github.com/sid77/drop",
"revision": "e00b2d7247e9591c9b9bb9784f91b1cfe8d42680",