aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com/hashicorp
diff options
context:
space:
mode:
authorNiall Sheridan <nsheridan@gmail.com>2018-06-20 22:39:07 +0100
committerNiall Sheridan <nsheridan@gmail.com>2018-06-20 22:39:07 +0100
commitde6d2c524430287c699aaa898c1325da6afea539 (patch)
treef78eb841208d667668a7bc92a9290d693cc7103b /vendor/github.com/hashicorp
parenteb99016e1629e690e55633de6fc63a14c53e7ea2 (diff)
Update dependencies
Diffstat (limited to 'vendor/github.com/hashicorp')
-rw-r--r--vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go1
-rw-r--r--vendor/github.com/hashicorp/go-cleanhttp/handlers.go43
-rw-r--r--vendor/github.com/hashicorp/go-retryablehttp/LICENSE363
-rw-r--r--vendor/github.com/hashicorp/go-retryablehttp/Makefile11
-rw-r--r--vendor/github.com/hashicorp/go-retryablehttp/README.md46
-rw-r--r--vendor/github.com/hashicorp/go-retryablehttp/client.go487
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/GNUmakefile65
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/LICENSE373
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/README.md118
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/doc.go5
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/ifaddr.go254
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go1281
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/ifattr.go65
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/ipaddr.go169
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go98
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go516
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go591
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/rfc.go948
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/route_info.go19
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go36
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/route_info_default.go10
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go40
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go37
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go41
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/sockaddr.go206
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go193
-rw-r--r--vendor/github.com/hashicorp/go-sockaddr/unixsock.go135
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/parser.go6
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go789
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/printer/printer.go66
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go29
-rw-r--r--vendor/github.com/hashicorp/vault/api/SPEC.md611
-rw-r--r--vendor/github.com/hashicorp/vault/api/client.go423
-rw-r--r--vendor/github.com/hashicorp/vault/api/logical.go87
-rw-r--r--vendor/github.com/hashicorp/vault/api/renewer.go127
-rw-r--r--vendor/github.com/hashicorp/vault/api/request.go102
-rw-r--r--vendor/github.com/hashicorp/vault/api/response.go8
-rw-r--r--vendor/github.com/hashicorp/vault/api/secret.go247
-rw-r--r--vendor/github.com/hashicorp/vault/api/ssh_agent.go46
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_audit.go13
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_auth.go49
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_capabilities.go8
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_generate_root.go51
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_health.go20
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_mounts.go57
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_plugins.go117
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_policy.go10
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_rekey.go147
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_seal.go18
-rw-r--r--vendor/github.com/hashicorp/vault/helper/compressutil/compress.go9
-rw-r--r--vendor/github.com/hashicorp/vault/helper/hclutil/hcl.go36
-rw-r--r--vendor/github.com/hashicorp/vault/helper/jsonutil/json.go5
-rw-r--r--vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go98
-rw-r--r--vendor/github.com/hashicorp/vault/helper/strutil/strutil.go327
54 files changed, 8716 insertions, 941 deletions
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
index 7d8a57c..8d306bf 100644
--- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
+++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
@@ -26,6 +26,7 @@ func DefaultPooledTransport() *http.Transport {
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
+ DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
new file mode 100644
index 0000000..7eda377
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
@@ -0,0 +1,43 @@
+package cleanhttp
+
+import (
+ "net/http"
+ "strings"
+ "unicode"
+)
+
+// HandlerInput provides input options to cleanhttp's handlers
+type HandlerInput struct {
+ ErrStatus int
+}
+
+// PrintablePathCheckHandler is a middleware that ensures the request path
+// contains only printable runes.
+func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
+ // Nil-check on input to make it optional
+ if input == nil {
+ input = &HandlerInput{
+ ErrStatus: http.StatusBadRequest,
+ }
+ }
+
+ // Default to http.StatusBadRequest on error
+ if input.ErrStatus == 0 {
+ input.ErrStatus = http.StatusBadRequest
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Check URL path for non-printable characters
+ idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
+ return !unicode.IsPrint(c)
+ })
+
+ if idx != -1 {
+ w.WriteHeader(input.ErrStatus)
+ return
+ }
+
+ next.ServeHTTP(w, r)
+ return
+ })
+}
diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile
new file mode 100644
index 0000000..da17640
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile
@@ -0,0 +1,11 @@
+default: test
+
+test:
+ go vet ./...
+ go test -race ./...
+
+updatedeps:
+ go get -f -t -u ./...
+ go get -f -u ./...
+
+.PHONY: default test updatedeps
diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md
new file mode 100644
index 0000000..ccdc7e8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md
@@ -0,0 +1,46 @@
+go-retryablehttp
+================
+
+[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis]
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[travis]: http://travis-ci.org/hashicorp/go-retryablehttp
+[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp
+
+The `retryablehttp` package provides a familiar HTTP client interface with
+automatic retries and exponential backoff. It is a thin wrapper over the
+standard `net/http` client library and exposes nearly the same public API. This
+makes `retryablehttp` very easy to drop into existing programs.
+
+`retryablehttp` performs automatic retries under certain conditions. Mainly, if
+an error is returned by the client (connection errors, etc.), or if a 500-range
+response code is received (except 501), then a retry is invoked after a wait
+period. Otherwise, the response is returned and left to the caller to
+interpret.
+
+The main difference from `net/http` is that requests which take a request body
+(POST/PUT et. al) can have the body provided in a number of ways (some more or
+less efficient) that allow "rewinding" the request body if the initial request
+fails so that the full request can be attempted again. See the
+[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more
+details.
+
+Example Use
+===========
+
+Using this library should look almost identical to what you would do with
+`net/http`. The most simple example of a GET request is shown below:
+
+```go
+resp, err := retryablehttp.Get("/foo")
+if err != nil {
+ panic(err)
+}
+```
+
+The returned response object is an `*http.Response`, the same thing you would
+usually get from `net/http`. Had the request failed one or more times, the above
+call would block and retry with exponential backoff.
+
+For more usage and examples see the
+[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp).
diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go
new file mode 100644
index 0000000..c016939
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go
@@ -0,0 +1,487 @@
+// The retryablehttp package provides a familiar HTTP client interface with
+// automatic retries and exponential backoff. It is a thin wrapper over the
+// standard net/http client library and exposes nearly the same public API.
+// This makes retryablehttp very easy to drop into existing programs.
+//
+// retryablehttp performs automatic retries under certain conditions. Mainly, if
+// an error is returned by the client (connection errors etc), or if a 500-range
+// response is received, then a retry is invoked. Otherwise, the response is
+// returned and left to the caller to interpret.
+//
+// Requests which take a request body should provide a non-nil function
+// parameter. The best choice is to provide either a function satisfying
+// ReaderFunc which provides multiple io.Readers in an efficient manner, a
+// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte
+// slice. As it is a reference type, and we will wrap it as needed by readers,
+// we can efficiently re-use the request body without needing to copy it. If an
+// io.Reader (such as a *bytes.Reader) is provided, the full body will be read
+// prior to the first request, and will be efficiently re-used for any retries.
+// ReadSeeker can be used, but some users have observed occasional data races
+// between the net/http library and the Seek functionality of some
+// implementations of ReadSeeker, so should be avoided if possible.
+package retryablehttp
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "math"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/go-cleanhttp"
+)
+
+var (
+ // Default retry configuration
+ defaultRetryWaitMin = 1 * time.Second
+ defaultRetryWaitMax = 30 * time.Second
+ defaultRetryMax = 4
+
+ // defaultClient is used for performing requests without explicitly making
+ // a new client. It is purposely private to avoid modifications.
+ defaultClient = NewClient()
+
+ // We need to consume response bodies to maintain http connections, but
+ // limit the size we consume to respReadLimit.
+ respReadLimit = int64(4096)
+)
+
+// ReaderFunc is the type of function that can be given natively to NewRequest
+type ReaderFunc func() (io.Reader, error)
+
+// LenReader is an interface implemented by many in-memory io.Reader's. Used
+// for automatically sending the right Content-Length header when possible.
+type LenReader interface {
+ Len() int
+}
+
+// Request wraps the metadata needed to create HTTP requests.
+type Request struct {
+ // body is a seekable reader over the request body payload. This is
+ // used to rewind the request data in between retries.
+ body ReaderFunc
+
+ // Embed an HTTP request directly. This makes a *Request act exactly
+ // like an *http.Request so that all meta methods are supported.
+ *http.Request
+}
+
+// NewRequest creates a new wrapped request.
+func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
+ var err error
+ var body ReaderFunc
+ var contentLength int64
+
+ if rawBody != nil {
+ switch rawBody.(type) {
+ // If they gave us a function already, great! Use it.
+ case ReaderFunc:
+ body = rawBody.(ReaderFunc)
+ tmp, err := body()
+ if err != nil {
+ return nil, err
+ }
+ if lr, ok := tmp.(LenReader); ok {
+ contentLength = int64(lr.Len())
+ }
+ if c, ok := tmp.(io.Closer); ok {
+ c.Close()
+ }
+
+ case func() (io.Reader, error):
+ body = rawBody.(func() (io.Reader, error))
+ tmp, err := body()
+ if err != nil {
+ return nil, err
+ }
+ if lr, ok := tmp.(LenReader); ok {
+ contentLength = int64(lr.Len())
+ }
+ if c, ok := tmp.(io.Closer); ok {
+ c.Close()
+ }
+
+ // If a regular byte slice, we can read it over and over via new
+ // readers
+ case []byte:
+ buf := rawBody.([]byte)
+ body = func() (io.Reader, error) {
+ return bytes.NewReader(buf), nil
+ }
+ contentLength = int64(len(buf))
+
+ // If a bytes.Buffer we can read the underlying byte slice over and
+ // over
+ case *bytes.Buffer:
+ buf := rawBody.(*bytes.Buffer)
+ body = func() (io.Reader, error) {
+ return bytes.NewReader(buf.Bytes()), nil
+ }
+ contentLength = int64(buf.Len())
+
+ // We prioritize *bytes.Reader here because we don't really want to
+ // deal with it seeking so want it to match here instead of the
+ // io.ReadSeeker case.
+ case *bytes.Reader:
+ buf, err := ioutil.ReadAll(rawBody.(*bytes.Reader))
+ if err != nil {
+ return nil, err
+ }
+ body = func() (io.Reader, error) {
+ return bytes.NewReader(buf), nil
+ }
+ contentLength = int64(len(buf))
+
+ // Compat case
+ case io.ReadSeeker:
+ raw := rawBody.(io.ReadSeeker)
+ body = func() (io.Reader, error) {
+ raw.Seek(0, 0)
+ return ioutil.NopCloser(raw), nil
+ }
+ if lr, ok := raw.(LenReader); ok {
+ contentLength = int64(lr.Len())
+ }
+
+ // Read all in so we can reset
+ case io.Reader:
+ buf, err := ioutil.ReadAll(rawBody.(io.Reader))
+ if err != nil {
+ return nil, err
+ }
+ body = func() (io.Reader, error) {
+ return bytes.NewReader(buf), nil
+ }
+ contentLength = int64(len(buf))
+
+ default:
+ return nil, fmt.Errorf("cannot handle type %T", rawBody)
+ }
+ }
+
+ httpReq, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ httpReq.ContentLength = contentLength
+
+ return &Request{body, httpReq}, nil
+}
+
+// RequestLogHook allows a function to run before each retry. The HTTP
+// request which will be made, and the retry number (0 for the initial
+// request) are available to users. The internal logger is exposed to
+// consumers.
+type RequestLogHook func(*log.Logger, *http.Request, int)
+
+// ResponseLogHook is like RequestLogHook, but allows running a function
+// on each HTTP response. This function will be invoked at the end of
+// every HTTP request executed, regardless of whether a subsequent retry
+// needs to be performed or not. If the response body is read or closed
+// from this method, this will affect the response returned from Do().
+type ResponseLogHook func(*log.Logger, *http.Response)
+
+// CheckRetry specifies a policy for handling retries. It is called
+// following each request with the response and error values returned by
+// the http.Client. If CheckRetry returns false, the Client stops retrying
+// and returns the response to the caller. If CheckRetry returns an error,
+// that error value is returned in lieu of the error from the request. The
+// Client will close any response body when retrying, but if the retry is
+// aborted it is up to the CheckResponse callback to properly close any
+// response body before returning.
+type CheckRetry func(resp *http.Response, err error) (bool, error)
+
+// Backoff specifies a policy for how long to wait between retries.
+// It is called after a failing request to determine the amount of time
+// that should pass before trying again.
+type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration
+
+// ErrorHandler is called if retries are expired, containing the last status
+// from the http library. If not specified, default behavior for the library is
+// to close the body and return an error indicating how many tries were
+// attempted. If overriding this, be sure to close the body if needed.
+type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error)
+
+// Client is used to make HTTP requests. It adds additional functionality
+// like automatic retries to tolerate minor outages.
+type Client struct {
+ HTTPClient *http.Client // Internal HTTP client.
+ Logger *log.Logger // Customer logger instance.
+
+ RetryWaitMin time.Duration // Minimum time to wait
+ RetryWaitMax time.Duration // Maximum time to wait
+ RetryMax int // Maximum number of retries
+
+ // RequestLogHook allows a user-supplied function to be called
+ // before each retry.
+ RequestLogHook RequestLogHook
+
+ // ResponseLogHook allows a user-supplied function to be called
+ // with the response from each HTTP request executed.
+ ResponseLogHook ResponseLogHook
+
+ // CheckRetry specifies the policy for handling retries, and is called
+ // after each request. The default policy is DefaultRetryPolicy.
+ CheckRetry CheckRetry
+
+ // Backoff specifies the policy for how long to wait between retries
+ Backoff Backoff
+
+ // ErrorHandler specifies the custom error handler to use, if any
+ ErrorHandler ErrorHandler
+}
+
+// NewClient creates a new Client with default settings.
+func NewClient() *Client {
+ return &Client{
+ HTTPClient: cleanhttp.DefaultClient(),
+ Logger: log.New(os.Stderr, "", log.LstdFlags),
+ RetryWaitMin: defaultRetryWaitMin,
+ RetryWaitMax: defaultRetryWaitMax,
+ RetryMax: defaultRetryMax,
+ CheckRetry: DefaultRetryPolicy,
+ Backoff: DefaultBackoff,
+ }
+}
+
+// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which
+// will retry on connection errors and server errors.
+func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) {
+ if err != nil {
+ return true, err
+ }
+ // Check the response code. We retry on 500-range responses to allow
+ // the server time to recover, as 500's are typically not permanent
+ // errors and may relate to outages on the server side. This will catch
+ // invalid response codes as well, like 0 and 999.
+ if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) {
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// DefaultBackoff provides a default callback for Client.Backoff which
+// will perform exponential backoff based on the attempt number and limited
+// by the provided minimum and maximum durations.
+func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
+ mult := math.Pow(2, float64(attemptNum)) * float64(min)
+ sleep := time.Duration(mult)
+ if float64(sleep) != mult || sleep > max {
+ sleep = max
+ }
+ return sleep
+}
+
+// LinearJitterBackoff provides a callback for Client.Backoff which will
+// perform linear backoff based on the attempt number and with jitter to
+// prevent a thundering herd.
+//
+// min and max here are *not* absolute values. The number to be multipled by
+// the attempt number will be chosen at random from between them, thus they are
+// bounding the jitter.
+//
+// For instance:
+// * To get strictly linear backoff of one second increasing each retry, set
+// both to one second (1s, 2s, 3s, 4s, ...)
+// * To get a small amount of jitter centered around one second increasing each
+// retry, set to around one second, such as a min of 800ms and max of 1200ms
+// (892ms, 2102ms, 2945ms, 4312ms, ...)
+// * To get extreme jitter, set to a very wide spread, such as a min of 100ms
+// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...)
+func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {
+ // attemptNum always starts at zero but we want to start at 1 for multiplication
+ attemptNum++
+
+ if max <= min {
+ // Unclear what to do here, or they are the same, so return min *
+ // attemptNum
+ return min * time.Duration(attemptNum)
+ }
+
+ // Seed rand; doing this every time is fine
+ rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
+
+ // Pick a random number that lies somewhere between the min and max and
+ // multiply by the attemptNum. attemptNum starts at zero so we always
+ // increment here. We first get a random percentage, then apply that to the
+ // difference between min and max, and add to min.
+ jitter := rand.Float64() * float64(max-min)
+ jitterMin := int64(jitter) + int64(min)
+ return time.Duration(jitterMin * int64(attemptNum))
+}
+
+// PassthroughErrorHandler is an ErrorHandler that directly passes through the
+// values from the net/http library for the final request. The body is not
+// closed.
+func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) {
+ return resp, err
+}
+
+// Do wraps calling an HTTP method with retries.
+func (c *Client) Do(req *Request) (*http.Response, error) {
+ if c.Logger != nil {
+ c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL)
+ }
+
+ var resp *http.Response
+ var err error
+
+ for i := 0; ; i++ {
+ var code int // HTTP response code
+
+ // Always rewind the request body when non-nil.
+ if req.body != nil {
+ body, err := req.body()
+ if err != nil {
+ return resp, err
+ }
+ if c, ok := body.(io.ReadCloser); ok {
+ req.Request.Body = c
+ } else {
+ req.Request.Body = ioutil.NopCloser(body)
+ }
+ }
+
+ if c.RequestLogHook != nil {
+ c.RequestLogHook(c.Logger, req.Request, i)
+ }
+
+ // Attempt the request
+ resp, err = c.HTTPClient.Do(req.Request)
+ if resp != nil {
+ code = resp.StatusCode
+ }
+
+ // Check if we should continue with retries.
+ checkOK, checkErr := c.CheckRetry(resp, err)
+
+ if err != nil {
+ if c.Logger != nil {
+ c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err)
+ }
+ } else {
+ // Call this here to maintain the behavior of logging all requests,
+ // even if CheckRetry signals to stop.
+ if c.ResponseLogHook != nil {
+ // Call the response logger function if provided.
+ c.ResponseLogHook(c.Logger, resp)
+ }
+ }
+
+ // Now decide if we should continue.
+ if !checkOK {
+ if checkErr != nil {
+ err = checkErr
+ }
+ return resp, err
+ }
+
+ // We do this before drainBody beause there's no need for the I/O if
+ // we're breaking out
+ remain := c.RetryMax - i
+ if remain <= 0 {
+ break
+ }
+
+ // We're going to retry, consume any response to reuse the connection.
+ if err == nil && resp != nil {
+ c.drainBody(resp.Body)
+ }
+
+ wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp)
+ desc := fmt.Sprintf("%s %s", req.Method, req.URL)
+ if code > 0 {
+ desc = fmt.Sprintf("%s (status: %d)", desc, code)
+ }
+ if c.Logger != nil {
+ c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain)
+ }
+ time.Sleep(wait)
+ }
+
+ if c.ErrorHandler != nil {
+ return c.ErrorHandler(resp, err, c.RetryMax+1)
+ }
+
+ // By default, we close the response body and return an error without
+ // returning the response
+ if resp != nil {
+ resp.Body.Close()
+ }
+ return nil, fmt.Errorf("%s %s giving up after %d attempts",
+ req.Method, req.URL, c.RetryMax+1)
+}
+
+// Try to read the response body so we can reuse this connection.
+func (c *Client) drainBody(body io.ReadCloser) {
+ defer body.Close()
+ _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit))
+ if err != nil {
+ if c.Logger != nil {
+ c.Logger.Printf("[ERR] error reading response body: %v", err)
+ }
+ }
+}
+
+// Get is a shortcut for doing a GET request without making a new client.
+func Get(url string) (*http.Response, error) {
+ return defaultClient.Get(url)
+}
+
+// Get is a convenience helper for doing simple GET requests.
+func (c *Client) Get(url string) (*http.Response, error) {
+ req, err := NewRequest("GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.Do(req)
+}
+
+// Head is a shortcut for doing a HEAD request without making a new client.
+func Head(url string) (*http.Response, error) {
+ return defaultClient.Head(url)
+}
+
+// Head is a convenience method for doing simple HEAD requests.
+func (c *Client) Head(url string) (*http.Response, error) {
+ req, err := NewRequest("HEAD", url, nil)
+ if err != nil {
+ return nil, err
+ }
+ return c.Do(req)
+}
+
+// Post is a shortcut for doing a POST request without making a new client.
+func Post(url, bodyType string, body interface{}) (*http.Response, error) {
+ return defaultClient.Post(url, bodyType, body)
+}
+
+// Post is a convenience method for doing simple POST requests.
+func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) {
+ req, err := NewRequest("POST", url, body)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", bodyType)
+ return c.Do(req)
+}
+
+// PostForm is a shortcut to perform a POST with form data without creating
+// a new client.
+func PostForm(url string, data url.Values) (*http.Response, error) {
+ return defaultClient.PostForm(url, data)
+}
+
+// PostForm is a convenience method for doing simple POST operations using
+// pre-filled url.Values form data.
+func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) {
+ return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile
new file mode 100644
index 0000000..f3dfd24
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile
@@ -0,0 +1,65 @@
+TOOLS= golang.org/x/tools/cover
+GOCOVER_TMPFILE?= $(GOCOVER_FILE).tmp
+GOCOVER_FILE?= .cover.out
+GOCOVERHTML?= coverage.html
+FIND=`/usr/bin/which 2> /dev/null gfind find | /usr/bin/grep -v ^no | /usr/bin/head -n 1`
+XARGS=`/usr/bin/which 2> /dev/null gxargs xargs | /usr/bin/grep -v ^no | /usr/bin/head -n 1`
+
+test:: $(GOCOVER_FILE)
+ @$(MAKE) -C cmd/sockaddr test
+
+cover:: coverage_report
+
+$(GOCOVER_FILE)::
+ @${FIND} . -type d ! -path '*cmd*' ! -path '*.git*' -print0 | ${XARGS} -0 -I % sh -ec "cd % && rm -f $(GOCOVER_TMPFILE) && go test -coverprofile=$(GOCOVER_TMPFILE)"
+
+ @echo 'mode: set' > $(GOCOVER_FILE)
+ @${FIND} . -type f ! -path '*cmd*' ! -path '*.git*' -name "$(GOCOVER_TMPFILE)" -print0 | ${XARGS} -0 -n1 cat $(GOCOVER_TMPFILE) | grep -v '^mode: ' >> ${PWD}/$(GOCOVER_FILE)
+
+$(GOCOVERHTML): $(GOCOVER_FILE)
+ go tool cover -html=$(GOCOVER_FILE) -o $(GOCOVERHTML)
+
+coverage_report:: $(GOCOVER_FILE)
+ go tool cover -html=$(GOCOVER_FILE)
+
+audit_tools::
+ @go get -u github.com/golang/lint/golint && echo "Installed golint:"
+ @go get -u github.com/fzipp/gocyclo && echo "Installed gocyclo:"
+ @go get -u github.com/remyoudompheng/go-misc/deadcode && echo "Installed deadcode:"
+ @go get -u github.com/client9/misspell/cmd/misspell && echo "Installed misspell:"
+ @go get -u github.com/gordonklaus/ineffassign && echo "Installed ineffassign:"
+
+audit::
+ deadcode
+ go tool vet -all *.go
+ go tool vet -shadow=true *.go
+ golint *.go
+ ineffassign .
+ gocyclo -over 65 *.go
+ misspell *.go
+
+clean::
+ rm -f $(GOCOVER_FILE) $(GOCOVERHTML)
+
+dev::
+ @go build
+ @$(MAKE) -B -C cmd/sockaddr sockaddr
+
+install::
+ @go install
+ @$(MAKE) -C cmd/sockaddr install
+
+doc::
+ @echo Visit: http://127.0.0.1:6161/pkg/github.com/hashicorp/go-sockaddr/
+ godoc -http=:6161 -goroot $GOROOT
+
+world::
+ @set -e; \
+ for os in solaris darwin freebsd linux windows; do \
+ for arch in amd64; do \
+ printf "Building on %s-%s\n" "$${os}" "$${arch}" ; \
+ env GOOS="$${os}" GOARCH="$${arch}" go build -o /dev/null; \
+ done; \
+ done
+
+ $(MAKE) -C cmd/sockaddr world
diff --git a/vendor/github.com/hashicorp/go-sockaddr/LICENSE b/vendor/github.com/hashicorp/go-sockaddr/LICENSE
new file mode 100644
index 0000000..a612ad9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-sockaddr/README.md b/vendor/github.com/hashicorp/go-sockaddr/README.md
new file mode 100644
index 0000000..a2e170a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/README.md
@@ -0,0 +1,118 @@
+# go-sockaddr
+
+## `sockaddr` Library
+
+Socket address convenience functions for Go. `go-sockaddr` is a convenience
+library that makes doing the right thing with IP addresses easy. `go-sockaddr`
+is loosely modeled after the UNIX `sockaddr_t` and creates a union of the family
+of `sockaddr_t` types (see below for an ascii diagram). Library documentation
+is available
+at
+[https://godoc.org/github.com/hashicorp/go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr).
+The primary intent of the library was to make it possible to define heuristics
+for selecting the correct IP addresses when a configuration is evaluated at
+runtime. See
+the
+[docs](https://godoc.org/github.com/hashicorp/go-sockaddr),
+[`template` package](https://godoc.org/github.com/hashicorp/go-sockaddr/template),
+tests,
+and
+[CLI utility](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr)
+for details and hints as to how to use this library.
+
+For example, with this library it is possible to find an IP address that:
+
+* is attached to a default route
+ ([`GetDefaultInterfaces()`](https://godoc.org/github.com/hashicorp/go-sockaddr#GetDefaultInterfaces))
+* is contained within a CIDR block ([`IfByNetwork()`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByNetwork))
+* is an RFC1918 address
+ ([`IfByRFC("1918")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC))
+* is ordered
+ ([`OrderedIfAddrBy(args)`](https://godoc.org/github.com/hashicorp/go-sockaddr#OrderedIfAddrBy) where
+ `args` includes, but is not limited
+ to,
+ [`AscIfType`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscIfType),
+ [`AscNetworkSize`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscNetworkSize))
+* excludes all IPv6 addresses
+ ([`IfByType("^(IPv4)$")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByType))
+* is larger than a `/32`
+ ([`IfByMaskSize(32)`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByMaskSize))
+* is not on a `down` interface
+ ([`ExcludeIfs("flags", "down")`](https://godoc.org/github.com/hashicorp/go-sockaddr#ExcludeIfs))
+* preferences an IPv6 address over an IPv4 address
+ ([`SortIfByType()`](https://godoc.org/github.com/hashicorp/go-sockaddr#SortIfByType) +
+ [`ReverseIfAddrs()`](https://godoc.org/github.com/hashicorp/go-sockaddr#ReverseIfAddrs)); and
+* excludes any IP in RFC6890 address
+ ([`IfByRFC("6890")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC))
+
+Or any combination or variation therein.
+
+There are also a few simple helper functions such as `GetPublicIP` and
+`GetPrivateIP` which both return strings and select the first public or private
+IP address on the default interface, respectively. Similarly, there is also a
+helper function called `GetInterfaceIP` which returns the first usable IP
+address on the named interface.
+
+## `sockaddr` CLI
+
+Given the possible complexity of the `sockaddr` library, there is a CLI utility
+that accompanies the library, also
+called
+[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr).
+The
+[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr)
+utility exposes nearly all of the functionality of the library and can be used
+either as an administrative tool or testing tool. To install
+the
+[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr),
+run:
+
+```text
+$ go get -u github.com/hashicorp/go-sockaddr/cmd/sockaddr
+```
+
+If you're familiar with UNIX's `sockaddr` struct's, the following diagram
+mapping the C `sockaddr` (top) to `go-sockaddr` structs (bottom) and
+interfaces will be helpful:
+
+```
++-------------------------------------------------------+
+| |
+| sockaddr |
+| SockAddr |
+| |
+| +--------------+ +----------------------------------+ |
+| | sockaddr_un | | | |
+| | SockAddrUnix | | sockaddr_in{,6} | |
+| +--------------+ | IPAddr | |
+| | | |
+| | +-------------+ +--------------+ | |
+| | | sockaddr_in | | sockaddr_in6 | | |
+| | | IPv4Addr | | IPv6Addr | | |
+| | +-------------+ +--------------+ | |
+| | | |
+| +----------------------------------+ |
+| |
++-------------------------------------------------------+
+```
+
+## Inspiration and Design
+
+There were many subtle inspirations that led to this design, but the most direct
+inspiration for the filtering syntax was
+OpenBSD's
+[`pf.conf(5)`](https://www.freebsd.org/cgi/man.cgi?query=pf.conf&apropos=0&sektion=0&arch=default&format=html#PARAMETERS) firewall
+syntax that lets you select the first IP address on a given named interface.
+The original problem stemmed from:
+
+* needing to create immutable images using [Packer](https://www.packer.io) that
+ ran the [Consul](https://www.consul.io) process (Consul can only use one IP
+ address at a time);
+* images that may or may not have multiple interfaces or IP addresses at
+ runtime; and
+* we didn't want to rely on configuration management to render out the correct
+ IP address if the VM image was being used in an auto-scaling group.
+
+Instead we needed some way to codify a heuristic that would correctly select the
+right IP address but the input parameters were not known when the image was
+created.
diff --git a/vendor/github.com/hashicorp/go-sockaddr/doc.go b/vendor/github.com/hashicorp/go-sockaddr/doc.go
new file mode 100644
index 0000000..90671de
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/doc.go
@@ -0,0 +1,5 @@
+/*
+Package sockaddr is a Go implementation of the UNIX socket family data types and
+related helper functions.
+*/
+package sockaddr
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go
new file mode 100644
index 0000000..0811b27
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go
@@ -0,0 +1,254 @@
+package sockaddr
+
+import "strings"
+
+// ifAddrAttrMap is a map of the IfAddr type-specific attributes.
+var ifAddrAttrMap map[AttrName]func(IfAddr) string
+var ifAddrAttrs []AttrName
+
+func init() {
+ ifAddrAttrInit()
+}
+
+// GetPrivateIP returns a string with a single IP address that is part of RFC
+// 6890 and has a default route. If the system can't determine its IP address
+// or find an RFC 6890 IP address, an empty string will be returned instead.
+// This function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetPrivateInterfaces | attr "address"}}'
+/// ```
+func GetPrivateIP() (string, error) {
+ privateIfs, err := GetPrivateInterfaces()
+ if err != nil {
+ return "", err
+ }
+ if len(privateIfs) < 1 {
+ return "", nil
+ }
+
+ ifAddr := privateIfs[0]
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ return ip.NetIP().String(), nil
+}
+
+// GetPrivateIPs returns a string with all IP addresses that are part of RFC
+// 6890 (regardless of whether or not there is a default route, unlike
+// GetPublicIP). If the system can't find any RFC 6890 IP addresses, an empty
+// string will be returned instead. This function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "RFC" "6890" | join "address" " "}}'
+/// ```
+func GetPrivateIPs() (string, error) {
+ ifAddrs, err := GetAllInterfaces()
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) < 1 {
+ return "", nil
+ }
+
+ ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP)
+ if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs)
+
+ ifAddrs, _, err = IfByRFC("6890", ifAddrs)
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ _, ifAddrs, err = IfByRFC(ForwardingBlacklistRFC, ifAddrs)
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ ips := make([]string, 0, len(ifAddrs))
+ for _, ifAddr := range ifAddrs {
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ s := ip.NetIP().String()
+ ips = append(ips, s)
+ }
+
+ return strings.Join(ips, " "), nil
+}
+
+// GetPublicIP returns a string with a single IP address that is NOT part of RFC
+// 6890 and has a default route. If the system can't determine its IP address
+// or find a non RFC 6890 IP address, an empty string will be returned instead.
+// This function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetPublicInterfaces | attr "address"}}'
+/// ```
+func GetPublicIP() (string, error) {
+ publicIfs, err := GetPublicInterfaces()
+ if err != nil {
+ return "", err
+ } else if len(publicIfs) < 1 {
+ return "", nil
+ }
+
+ ifAddr := publicIfs[0]
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ return ip.NetIP().String(), nil
+}
+
+// GetPublicIPs returns a string with all IP addresses that are NOT part of RFC
+// 6890 (regardless of whether or not there is a default route, unlike
+// GetPublicIP). If the system can't find any non RFC 6890 IP addresses, an
+// empty string will be returned instead. This function is the `eval`
+// equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | exclude "RFC" "6890" | join "address" " "}}'
+/// ```
+func GetPublicIPs() (string, error) {
+ ifAddrs, err := GetAllInterfaces()
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) < 1 {
+ return "", nil
+ }
+
+ ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP)
+ if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs)
+
+ _, ifAddrs, err = IfByRFC("6890", ifAddrs)
+ if err != nil {
+ return "", err
+ } else if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ ips := make([]string, 0, len(ifAddrs))
+ for _, ifAddr := range ifAddrs {
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ s := ip.NetIP().String()
+ ips = append(ips, s)
+ }
+
+ return strings.Join(ips, " "), nil
+}
+
+// GetInterfaceIP returns a string with a single IP address sorted by the size
+// of the network (i.e. IP addresses with a smaller netmask, larger network
+// size, are sorted first). This function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <<ARG>> | sort "type,size" | include "flag" "forwardable" | attr "address" }}'
+/// ```
+func GetInterfaceIP(namedIfRE string) (string, error) {
+ ifAddrs, err := GetAllInterfaces()
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, _, err = IfByName(namedIfRE, ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, _, err = IfByFlag("forwardable", ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, err = SortIfBy("+type,+size", ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ if len(ifAddrs) == 0 {
+ return "", err
+ }
+
+ ip := ToIPAddr(ifAddrs[0].SockAddr)
+ if ip == nil {
+ return "", err
+ }
+
+ return IPAddrAttr(*ip, "address"), nil
+}
+
+// GetInterfaceIPs returns a string with all IPs, sorted by the size of the
+// network (i.e. IP addresses with a smaller netmask, larger network size, are
+// sorted first), on a named interface. This function is the `eval` equivalent
+// of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <<ARG>> | sort "type,size" | join "address" " "}}'
+/// ```
+func GetInterfaceIPs(namedIfRE string) (string, error) {
+ ifAddrs, err := GetAllInterfaces()
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, _, err = IfByName(namedIfRE, ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ ifAddrs, err = SortIfBy("+type,+size", ifAddrs)
+ if err != nil {
+ return "", err
+ }
+
+ if len(ifAddrs) == 0 {
+ return "", err
+ }
+
+ ips := make([]string, 0, len(ifAddrs))
+ for _, ifAddr := range ifAddrs {
+ ip := *ToIPAddr(ifAddr.SockAddr)
+ s := ip.NetIP().String()
+ ips = append(ips, s)
+ }
+
+ return strings.Join(ips, " "), nil
+}
+
+// IfAddrAttrs returns a list of attributes supported by the IfAddr type
+func IfAddrAttrs() []AttrName {
+ return ifAddrAttrs
+}
+
+// IfAddrAttr returns a string representation of an attribute for the given
+// IfAddr.
+func IfAddrAttr(ifAddr IfAddr, attrName AttrName) string {
+ fn, found := ifAddrAttrMap[attrName]
+ if !found {
+ return ""
+ }
+
+ return fn(ifAddr)
+}
+
+// ifAddrAttrInit is called once at init()
+func ifAddrAttrInit() {
+ // Sorted for human readability
+ ifAddrAttrs = []AttrName{
+ "flags",
+ "name",
+ }
+
+ ifAddrAttrMap = map[AttrName]func(ifAddr IfAddr) string{
+ "flags": func(ifAddr IfAddr) string {
+ return ifAddr.Interface.Flags.String()
+ },
+ "name": func(ifAddr IfAddr) string {
+ return ifAddr.Interface.Name
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go
new file mode 100644
index 0000000..2a706c3
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go
@@ -0,0 +1,1281 @@
+package sockaddr
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+var (
+ // Centralize all regexps and regexp.Copy() where necessary.
+ signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`)
+ whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`)
+ ifNameRE *regexp.Regexp = regexp.MustCompile(`^(?:Ethernet|Wireless LAN) adapter ([^:]+):`)
+ ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`)
+)
+
+// IfAddrs is a slice of IfAddr
+type IfAddrs []IfAddr
+
+func (ifs IfAddrs) Len() int { return len(ifs) }
+
+// CmpIfFunc is the function signature that must be met to be used in the
+// OrderedIfAddrBy multiIfAddrSorter
+type CmpIfAddrFunc func(p1, p2 *IfAddr) int
+
+// multiIfAddrSorter implements the Sort interface, sorting the IfAddrs within.
+type multiIfAddrSorter struct {
+ ifAddrs IfAddrs
+ cmp []CmpIfAddrFunc
+}
+
+// Sort sorts the argument slice according to the Cmp functions passed to
+// OrderedIfAddrBy.
+func (ms *multiIfAddrSorter) Sort(ifAddrs IfAddrs) {
+ ms.ifAddrs = ifAddrs
+ sort.Sort(ms)
+}
+
+// OrderedIfAddrBy sorts SockAddr by the list of sort function pointers.
+func OrderedIfAddrBy(cmpFuncs ...CmpIfAddrFunc) *multiIfAddrSorter {
+ return &multiIfAddrSorter{
+ cmp: cmpFuncs,
+ }
+}
+
+// Len is part of sort.Interface.
+func (ms *multiIfAddrSorter) Len() int {
+ return len(ms.ifAddrs)
+}
+
+// Less is part of sort.Interface. It is implemented by looping along the Cmp()
+// functions until it finds a comparison that is either less than or greater
+// than. A return value of 0 defers sorting to the next function in the
+// multisorter (which means the results of sorting may leave the resutls in a
+// non-deterministic order).
+func (ms *multiIfAddrSorter) Less(i, j int) bool {
+ p, q := &ms.ifAddrs[i], &ms.ifAddrs[j]
+ // Try all but the last comparison.
+ var k int
+ for k = 0; k < len(ms.cmp)-1; k++ {
+ cmp := ms.cmp[k]
+ x := cmp(p, q)
+ switch x {
+ case -1:
+ // p < q, so we have a decision.
+ return true
+ case 1:
+ // p > q, so we have a decision.
+ return false
+ }
+ // p == q; try the next comparison.
+ }
+ // All comparisons to here said "equal", so just return whatever the
+ // final comparison reports.
+ switch ms.cmp[k](p, q) {
+ case -1:
+ return true
+ case 1:
+ return false
+ default:
+ // Still a tie! Now what?
+ return false
+ panic("undefined sort order for remaining items in the list")
+ }
+}
+
+// Swap is part of sort.Interface.
+func (ms *multiIfAddrSorter) Swap(i, j int) {
+ ms.ifAddrs[i], ms.ifAddrs[j] = ms.ifAddrs[j], ms.ifAddrs[i]
+}
+
+// AscIfAddress is a sorting function to sort IfAddrs by their respective
+// address type. Non-equal types are deferred in the sort.
+func AscIfAddress(p1Ptr, p2Ptr *IfAddr) int {
+ return AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// AscIfDefault is a sorting function to sort IfAddrs by whether or not they
+// have a default route or not. Non-equal types are deferred in the sort.
+//
+// FIXME: This is a particularly expensive sorting operation because of the
+// non-memoized calls to NewRouteInfo(). In an ideal world the routeInfo data
+// once at the start of the sort and pass it along as a context or by wrapping
+// the IfAddr type with this information (this would also solve the inability to
+// return errors and the possibility of failing silently). Fortunately,
+// N*log(N) where N = 3 is only ~6.2 invocations. Not ideal, but not worth
+// optimizing today. The common case is this gets called once or twice.
+// Patches welcome.
+func AscIfDefault(p1Ptr, p2Ptr *IfAddr) int {
+ ri, err := NewRouteInfo()
+ if err != nil {
+ return sortDeferDecision
+ }
+
+ defaultIfName, err := ri.GetDefaultInterfaceName()
+ if err != nil {
+ return sortDeferDecision
+ }
+
+ switch {
+ case p1Ptr.Interface.Name == defaultIfName && p2Ptr.Interface.Name == defaultIfName:
+ return sortDeferDecision
+ case p1Ptr.Interface.Name == defaultIfName:
+ return sortReceiverBeforeArg
+ case p2Ptr.Interface.Name == defaultIfName:
+ return sortArgBeforeReceiver
+ default:
+ return sortDeferDecision
+ }
+}
+
+// AscIfName is a sorting function to sort IfAddrs by their interface names.
+func AscIfName(p1Ptr, p2Ptr *IfAddr) int {
+ return strings.Compare(p1Ptr.Name, p2Ptr.Name)
+}
+
+// AscIfNetworkSize is a sorting function to sort IfAddrs by their respective
+// network mask size.
+func AscIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int {
+ return AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// AscIfPort is a sorting function to sort IfAddrs by their respective
+// port type. Non-equal types are deferred in the sort.
+func AscIfPort(p1Ptr, p2Ptr *IfAddr) int {
+ return AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// AscIfPrivate is a sorting function to sort IfAddrs by "private" values before
+// "public" values. Both IPv4 and IPv6 are compared against RFC6890 (RFC6890
+// includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and IPv6
+// includes RFC4193).
+func AscIfPrivate(p1Ptr, p2Ptr *IfAddr) int {
+ return AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// AscIfType is a sorting function to sort IfAddrs by their respective address
+// type. Non-equal types are deferred in the sort.
+func AscIfType(p1Ptr, p2Ptr *IfAddr) int {
+ return AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfAddress is identical to AscIfAddress but reverse ordered.
+func DescIfAddress(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfDefault is identical to AscIfDefault but reverse ordered.
+func DescIfDefault(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscIfDefault(p1Ptr, p2Ptr)
+}
+
+// DescIfName is identical to AscIfName but reverse ordered.
+func DescIfName(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * strings.Compare(p1Ptr.Name, p2Ptr.Name)
+}
+
+// DescIfNetworkSize is identical to AscIfNetworkSize but reverse ordered.
+func DescIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfPort is identical to AscIfPort but reverse ordered.
+func DescIfPort(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfPrivate is identical to AscIfPrivate but reverse ordered.
+func DescIfPrivate(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// DescIfType is identical to AscIfType but reverse ordered.
+func DescIfType(p1Ptr, p2Ptr *IfAddr) int {
+ return -1 * AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr)
+}
+
+// FilterIfByType filters IfAddrs and returns a list of the matching type
+func FilterIfByType(ifAddrs IfAddrs, type_ SockAddrType) (matchedIfs, excludedIfs IfAddrs) {
+ excludedIfs = make(IfAddrs, 0, len(ifAddrs))
+ matchedIfs = make(IfAddrs, 0, len(ifAddrs))
+
+ for _, ifAddr := range ifAddrs {
+ if ifAddr.SockAddr.Type()&type_ != 0 {
+ matchedIfs = append(matchedIfs, ifAddr)
+ } else {
+ excludedIfs = append(excludedIfs, ifAddr)
+ }
+ }
+ return matchedIfs, excludedIfs
+}
+
+// IfAttr forwards the selector to IfAttr.Attr() for resolution. If there is
+// more than one IfAddr, only the first IfAddr is used.
+func IfAttr(selectorName string, ifAddr IfAddr) (string, error) {
+ attrName := AttrName(strings.ToLower(selectorName))
+ attrVal, err := ifAddr.Attr(attrName)
+ return attrVal, err
+}
+
+// IfAttrs forwards the selector to IfAttrs.Attr() for resolution. If there is
+// more than one IfAddr, only the first IfAddr is used.
+func IfAttrs(selectorName string, ifAddrs IfAddrs) (string, error) {
+ if len(ifAddrs) == 0 {
+ return "", nil
+ }
+
+ attrName := AttrName(strings.ToLower(selectorName))
+ attrVal, err := ifAddrs[0].Attr(attrName)
+ return attrVal, err
+}
+
+// GetAllInterfaces iterates over all available network interfaces and finds all
+// available IP addresses on each interface and converts them to
+// sockaddr.IPAddrs, and returning the result as an array of IfAddr.
+func GetAllInterfaces() (IfAddrs, error) {
+ ifs, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+
+ ifAddrs := make(IfAddrs, 0, len(ifs))
+ for _, intf := range ifs {
+ addrs, err := intf.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ for _, addr := range addrs {
+ var ipAddr IPAddr
+ ipAddr, err = NewIPAddr(addr.String())
+ if err != nil {
+ return IfAddrs{}, fmt.Errorf("unable to create an IP address from %q", addr.String())
+ }
+
+ ifAddr := IfAddr{
+ SockAddr: ipAddr,
+ Interface: intf,
+ }
+ ifAddrs = append(ifAddrs, ifAddr)
+ }
+ }
+
+ return ifAddrs, nil
+}
+
+// GetDefaultInterfaces returns IfAddrs of the addresses attached to the default
+// route.
+func GetDefaultInterfaces() (IfAddrs, error) {
+ ri, err := NewRouteInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ defaultIfName, err := ri.GetDefaultInterfaceName()
+ if err != nil {
+ return nil, err
+ }
+
+ var defaultIfs, ifAddrs IfAddrs
+ ifAddrs, err = GetAllInterfaces()
+ for _, ifAddr := range ifAddrs {
+ if ifAddr.Name == defaultIfName {
+ defaultIfs = append(defaultIfs, ifAddr)
+ }
+ }
+
+ return defaultIfs, nil
+}
+
+// GetPrivateInterfaces returns an IfAddrs that are part of RFC 6890 and have a
+// default route. If the system can't determine its IP address or find an RFC
+// 6890 IP address, an empty IfAddrs will be returned instead. This function is
+// the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | include "RFC" "6890" }}'
+/// ```
+func GetPrivateInterfaces() (IfAddrs, error) {
+ privateIfs, err := GetAllInterfaces()
+ if err != nil {
+ return IfAddrs{}, err
+ }
+ if len(privateIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ privateIfs, _ = FilterIfByType(privateIfs, TypeIP)
+ if len(privateIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ privateIfs, _, err = IfByFlag("forwardable", privateIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ privateIfs, _, err = IfByFlag("up", privateIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ if len(privateIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(privateIfs)
+
+ privateIfs, _, err = IfByRFC("6890", privateIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ } else if len(privateIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ return privateIfs, nil
+}
+
+// GetPublicInterfaces returns an IfAddrs that are NOT part of RFC 6890 and has a
+// default route. If the system can't determine its IP address or find a non
+// RFC 6890 IP address, an empty IfAddrs will be returned instead. This
+// function is the `eval` equivalent of:
+//
+// ```
+// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | exclude "RFC" "6890" }}'
+/// ```
+func GetPublicInterfaces() (IfAddrs, error) {
+ publicIfs, err := GetAllInterfaces()
+ if err != nil {
+ return IfAddrs{}, err
+ }
+ if len(publicIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ publicIfs, _ = FilterIfByType(publicIfs, TypeIP)
+ if len(publicIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ publicIfs, _, err = IfByFlag("forwardable", publicIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ publicIfs, _, err = IfByFlag("up", publicIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ if len(publicIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(publicIfs)
+
+ _, publicIfs, err = IfByRFC("6890", publicIfs)
+ if err != nil {
+ return IfAddrs{}, err
+ } else if len(publicIfs) == 0 {
+ return IfAddrs{}, nil
+ }
+
+ return publicIfs, nil
+}
+
+// IfByAddress returns a list of matched and non-matched IfAddrs, or an error if
+// the regexp fails to compile.
+func IfByAddress(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ re, err := regexp.Compile(inputRe)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Unable to compile address regexp %+q: %v", inputRe, err)
+ }
+
+ matchedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ excludedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ for _, addr := range ifAddrs {
+ if re.MatchString(addr.SockAddr.String()) {
+ matchedAddrs = append(matchedAddrs, addr)
+ } else {
+ excludedAddrs = append(excludedAddrs, addr)
+ }
+ }
+
+ return matchedAddrs, excludedAddrs, nil
+}
+
+// IfByName returns a list of matched and non-matched IfAddrs, or an error if
+// the regexp fails to compile.
+func IfByName(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ re, err := regexp.Compile(inputRe)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Unable to compile name regexp %+q: %v", inputRe, err)
+ }
+
+ matchedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ excludedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ for _, addr := range ifAddrs {
+ if re.MatchString(addr.Name) {
+ matchedAddrs = append(matchedAddrs, addr)
+ } else {
+ excludedAddrs = append(excludedAddrs, addr)
+ }
+ }
+
+ return matchedAddrs, excludedAddrs, nil
+}
+
+// IfByPort returns a list of matched and non-matched IfAddrs, or an error if
+// the regexp fails to compile.
+func IfByPort(inputRe string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) {
+ re, err := regexp.Compile(inputRe)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Unable to compile port regexp %+q: %v", inputRe, err)
+ }
+
+ ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP)
+ matchedIfs = make(IfAddrs, 0, len(ipIfs))
+ excludedIfs = append(IfAddrs(nil), nonIfs...)
+ for _, addr := range ipIfs {
+ ipAddr := ToIPAddr(addr.SockAddr)
+ if ipAddr == nil {
+ continue
+ }
+
+ port := strconv.FormatInt(int64((*ipAddr).IPPort()), 10)
+ if re.MatchString(port) {
+ matchedIfs = append(matchedIfs, addr)
+ } else {
+ excludedIfs = append(excludedIfs, addr)
+ }
+ }
+
+ return matchedIfs, excludedIfs, nil
+}
+
+// IfByRFC returns a list of matched and non-matched IfAddrs that contain the
+// relevant RFC-specified traits.
+func IfByRFC(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ inputRFC, err := strconv.ParseUint(selectorParam, 10, 64)
+ if err != nil {
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to parse RFC number %q: %v", selectorParam, err)
+ }
+
+ matchedIfAddrs := make(IfAddrs, 0, len(ifAddrs))
+ remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs))
+
+ rfcNetMap := KnownRFCs()
+ rfcNets, ok := rfcNetMap[uint(inputRFC)]
+ if !ok {
+ return nil, nil, fmt.Errorf("unsupported RFC %d", inputRFC)
+ }
+
+ for _, ifAddr := range ifAddrs {
+ var contained bool
+ for _, rfcNet := range rfcNets {
+ if rfcNet.Contains(ifAddr.SockAddr) {
+ matchedIfAddrs = append(matchedIfAddrs, ifAddr)
+ contained = true
+ break
+ }
+ }
+ if !contained {
+ remainingIfAddrs = append(remainingIfAddrs, ifAddr)
+ }
+ }
+
+ return matchedIfAddrs, remainingIfAddrs, nil
+}
+
+// IfByRFCs returns a list of matched and non-matched IfAddrs that contain the
+// relevant RFC-specified traits. Multiple RFCs can be specified and separated
+// by the `|` symbol. No protection is taken to ensure an IfAddr does not end
+// up in both the included and excluded list.
+func IfByRFCs(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ var includedIfs, excludedIfs IfAddrs
+ for _, rfcStr := range strings.Split(selectorParam, "|") {
+ includedRFCIfs, excludedRFCIfs, err := IfByRFC(rfcStr, ifAddrs)
+ if err != nil {
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to lookup RFC number %q: %v", rfcStr, err)
+ }
+ includedIfs = append(includedIfs, includedRFCIfs...)
+ excludedIfs = append(excludedIfs, excludedRFCIfs...)
+ }
+
+ return includedIfs, excludedIfs, nil
+}
+
+// IfByMaskSize returns a list of matched and non-matched IfAddrs that have the
+// matching mask size.
+func IfByMaskSize(selectorParam string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) {
+ maskSize, err := strconv.ParseUint(selectorParam, 10, 64)
+ if err != nil {
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("invalid exclude size argument (%q): %v", selectorParam, err)
+ }
+
+ ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP)
+ matchedIfs = make(IfAddrs, 0, len(ipIfs))
+ excludedIfs = append(IfAddrs(nil), nonIfs...)
+ for _, addr := range ipIfs {
+ ipAddr := ToIPAddr(addr.SockAddr)
+ if ipAddr == nil {
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to filter mask sizes on non-IP type %s: %v", addr.SockAddr.Type().String(), addr.SockAddr.String())
+ }
+
+ switch {
+ case (*ipAddr).Type()&TypeIPv4 != 0 && maskSize > 32:
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv4 address: %d", maskSize)
+ case (*ipAddr).Type()&TypeIPv6 != 0 && maskSize > 128:
+ return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv6 address: %d", maskSize)
+ }
+
+ if (*ipAddr).Maskbits() == int(maskSize) {
+ matchedIfs = append(matchedIfs, addr)
+ } else {
+ excludedIfs = append(excludedIfs, addr)
+ }
+ }
+
+ return matchedIfs, excludedIfs, nil
+}
+
+// IfByType returns a list of matching and non-matching IfAddr that match the
+// specified type. For instance:
+//
+// include "type" "IPv4,IPv6"
+//
+// will include any IfAddrs that is either an IPv4 or IPv6 address. Any
+// addresses on those interfaces that don't match will be included in the
+// remainder results.
+func IfByType(inputTypes string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ matchingIfAddrs := make(IfAddrs, 0, len(ifAddrs))
+ remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs))
+
+ ifTypes := strings.Split(strings.ToLower(inputTypes), "|")
+ for _, ifType := range ifTypes {
+ switch ifType {
+ case "ip", "ipv4", "ipv6", "unix":
+ // Valid types
+ default:
+ return nil, nil, fmt.Errorf("unsupported type %q %q", ifType, inputTypes)
+ }
+ }
+
+ for _, ifAddr := range ifAddrs {
+ for _, ifType := range ifTypes {
+ var matched bool
+ switch {
+ case ifType == "ip" && ifAddr.SockAddr.Type()&TypeIP != 0:
+ matched = true
+ case ifType == "ipv4" && ifAddr.SockAddr.Type()&TypeIPv4 != 0:
+ matched = true
+ case ifType == "ipv6" && ifAddr.SockAddr.Type()&TypeIPv6 != 0:
+ matched = true
+ case ifType == "unix" && ifAddr.SockAddr.Type()&TypeUnix != 0:
+ matched = true
+ }
+
+ if matched {
+ matchingIfAddrs = append(matchingIfAddrs, ifAddr)
+ } else {
+ remainingIfAddrs = append(remainingIfAddrs, ifAddr)
+ }
+ }
+ }
+
+ return matchingIfAddrs, remainingIfAddrs, nil
+}
+
+// IfByFlag returns a list of matching and non-matching IfAddrs that match the
+// specified type. For instance:
+//
+// include "flag" "up,broadcast"
+//
+// will include any IfAddrs that have both the "up" and "broadcast" flags set.
+// Any addresses on those interfaces that don't match will be omitted from the
+// results.
+func IfByFlag(inputFlags string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) {
+ matchedAddrs := make(IfAddrs, 0, len(ifAddrs))
+ excludedAddrs := make(IfAddrs, 0, len(ifAddrs))
+
+ var wantForwardable,
+ wantGlobalUnicast,
+ wantInterfaceLocalMulticast,
+ wantLinkLocalMulticast,
+ wantLinkLocalUnicast,
+ wantLoopback,
+ wantMulticast,
+ wantUnspecified bool
+ var ifFlags net.Flags
+ var checkFlags, checkAttrs bool
+ for _, flagName := range strings.Split(strings.ToLower(inputFlags), "|") {
+ switch flagName {
+ case "broadcast":
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagBroadcast
+ case "down":
+ checkFlags = true
+ ifFlags = (ifFlags &^ net.FlagUp)
+ case "forwardable":
+ checkAttrs = true
+ wantForwardable = true
+ case "global unicast":
+ checkAttrs = true
+ wantGlobalUnicast = true
+ case "interface-local multicast":
+ checkAttrs = true
+ wantInterfaceLocalMulticast = true
+ case "link-local multicast":
+ checkAttrs = true
+ wantLinkLocalMulticast = true
+ case "link-local unicast":
+ checkAttrs = true
+ wantLinkLocalUnicast = true
+ case "loopback":
+ checkAttrs = true
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagLoopback
+ wantLoopback = true
+ case "multicast":
+ checkAttrs = true
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagMulticast
+ wantMulticast = true
+ case "point-to-point":
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagPointToPoint
+ case "unspecified":
+ checkAttrs = true
+ wantUnspecified = true
+ case "up":
+ checkFlags = true
+ ifFlags = ifFlags | net.FlagUp
+ default:
+ return nil, nil, fmt.Errorf("Unknown interface flag: %+q", flagName)
+ }
+ }
+
+ for _, ifAddr := range ifAddrs {
+ var matched bool
+ if checkFlags && ifAddr.Interface.Flags&ifFlags == ifFlags {
+ matched = true
+ }
+ if checkAttrs {
+ if ip := ToIPAddr(ifAddr.SockAddr); ip != nil {
+ netIP := (*ip).NetIP()
+ switch {
+ case wantGlobalUnicast && netIP.IsGlobalUnicast():
+ matched = true
+ case wantInterfaceLocalMulticast && netIP.IsInterfaceLocalMulticast():
+ matched = true
+ case wantLinkLocalMulticast && netIP.IsLinkLocalMulticast():
+ matched = true
+ case wantLinkLocalUnicast && netIP.IsLinkLocalUnicast():
+ matched = true
+ case wantLoopback && netIP.IsLoopback():
+ matched = true
+ case wantMulticast && netIP.IsMulticast():
+ matched = true
+ case wantUnspecified && netIP.IsUnspecified():
+ matched = true
+ case wantForwardable && !IsRFC(ForwardingBlacklist, ifAddr.SockAddr):
+ matched = true
+ }
+ }
+ }
+ if matched {
+ matchedAddrs = append(matchedAddrs, ifAddr)
+ } else {
+ excludedAddrs = append(excludedAddrs, ifAddr)
+ }
+ }
+ return matchedAddrs, excludedAddrs, nil
+}
+
+// IfByNetwork returns an IfAddrs that are equal to or included within the
+// network passed in by selector.
+func IfByNetwork(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, IfAddrs, error) {
+ var includedIfs, excludedIfs IfAddrs
+ for _, netStr := range strings.Split(selectorParam, "|") {
+ netAddr, err := NewIPAddr(netStr)
+ if err != nil {
+ return nil, nil, fmt.Errorf("unable to create an IP address from %+q: %v", netStr, err)
+ }
+
+ for _, ifAddr := range inputIfAddrs {
+ if netAddr.Contains(ifAddr.SockAddr) {
+ includedIfs = append(includedIfs, ifAddr)
+ } else {
+ excludedIfs = append(excludedIfs, ifAddr)
+ }
+ }
+ }
+
+ return includedIfs, excludedIfs, nil
+}
+
+// IfAddrMath will return a new IfAddr struct with a mutated value.
+func IfAddrMath(operation, value string, inputIfAddr IfAddr) (IfAddr, error) {
+ // Regexp used to enforce the sign being a required part of the grammar for
+ // some values.
+ signRe := signRE.Copy()
+
+ switch strings.ToLower(operation) {
+ case "address":
+ // "address" operates on the IP address and is allowed to overflow or
+ // underflow networks, however it will wrap along the underlying address's
+ // underlying type.
+
+ if !signRe.MatchString(value) {
+ return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation)
+ }
+
+ switch sockType := inputIfAddr.SockAddr.Type(); sockType {
+ case TypeIPv4:
+ // 33 == Accept any uint32 value
+ // TODO(seanc@): Add the ability to parse hex
+ i, err := strconv.ParseInt(value, 10, 33)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr)
+ ipv4Uint32 := uint32(ipv4.Address)
+ ipv4Uint32 += uint32(i)
+ return IfAddr{
+ SockAddr: IPv4Addr{
+ Address: IPv4Address(ipv4Uint32),
+ Mask: ipv4.Mask,
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ case TypeIPv6:
+ // 64 == Accept any int32 value
+ // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int.
+ i, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr)
+ ipv6BigIntA := new(big.Int)
+ ipv6BigIntA.Set(ipv6.Address)
+ ipv6BigIntB := big.NewInt(i)
+
+ ipv6Addr := ipv6BigIntA.Add(ipv6BigIntA, ipv6BigIntB)
+ ipv6Addr.And(ipv6Addr, ipv6HostMask)
+
+ return IfAddr{
+ SockAddr: IPv6Addr{
+ Address: IPv6Address(ipv6Addr),
+ Mask: ipv6.Mask,
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ default:
+ return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType)
+ }
+ case "network":
+ // "network" operates on the network address. Positive values start at the
+ // network address and negative values wrap at the network address, which
+ // means a "-1" value on a network will be the broadcast address after
+ // wrapping is applied.
+
+ if !signRe.MatchString(value) {
+ return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation)
+ }
+
+ switch sockType := inputIfAddr.SockAddr.Type(); sockType {
+ case TypeIPv4:
+ // 33 == Accept any uint32 value
+ // TODO(seanc@): Add the ability to parse hex
+ i, err := strconv.ParseInt(value, 10, 33)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr)
+ ipv4Uint32 := uint32(ipv4.NetworkAddress())
+
+ // Wrap along network mask boundaries. EZ-mode wrapping made possible by
+ // use of int64 vs a uint.
+ var wrappedMask int64
+ if i >= 0 {
+ wrappedMask = i
+ } else {
+ wrappedMask = 1 + i + int64(^uint32(ipv4.Mask))
+ }
+
+ ipv4Uint32 = ipv4Uint32 + (uint32(wrappedMask) &^ uint32(ipv4.Mask))
+
+ return IfAddr{
+ SockAddr: IPv4Addr{
+ Address: IPv4Address(ipv4Uint32),
+ Mask: ipv4.Mask,
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ case TypeIPv6:
+ // 64 == Accept any int32 value
+ // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int.
+ i, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr)
+ ipv6BigInt := new(big.Int)
+ ipv6BigInt.Set(ipv6.NetworkAddress())
+
+ mask := new(big.Int)
+ mask.Set(ipv6.Mask)
+ if i > 0 {
+ wrappedMask := new(big.Int)
+ wrappedMask.SetInt64(i)
+
+ wrappedMask.AndNot(wrappedMask, mask)
+ ipv6BigInt.Add(ipv6BigInt, wrappedMask)
+ } else {
+ // Mask off any bits that exceed the network size. Subtract the
+ // wrappedMask from the last usable - 1
+ wrappedMask := new(big.Int)
+ wrappedMask.SetInt64(-1 * i)
+ wrappedMask.Sub(wrappedMask, big.NewInt(1))
+
+ wrappedMask.AndNot(wrappedMask, mask)
+
+ lastUsable := new(big.Int)
+ lastUsable.Set(ipv6.LastUsable().(IPv6Addr).Address)
+
+ ipv6BigInt = lastUsable.Sub(lastUsable, wrappedMask)
+ }
+
+ return IfAddr{
+ SockAddr: IPv6Addr{
+ Address: IPv6Address(ipv6BigInt),
+ Mask: ipv6.Mask,
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ default:
+ return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType)
+ }
+ case "mask":
+ // "mask" operates on the IP address and returns the IP address on
+ // which the given integer mask has been applied. If the applied mask
+ // corresponds to a larger network than the mask of the IP address,
+ // the latter will be replaced by the former.
+ switch sockType := inputIfAddr.SockAddr.Type(); sockType {
+ case TypeIPv4:
+ i, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ if i > 32 {
+ return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv4 addresses must be between 0 and 32", operation)
+ }
+
+ ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr)
+
+ ipv4Mask := net.CIDRMask(int(i), 32)
+ ipv4MaskUint32 := binary.BigEndian.Uint32(ipv4Mask)
+
+ maskedIpv4 := ipv4.NetIP().Mask(ipv4Mask)
+ maskedIpv4Uint32 := binary.BigEndian.Uint32(maskedIpv4)
+
+ maskedIpv4MaskUint32 := uint32(ipv4.Mask)
+
+ if ipv4MaskUint32 < maskedIpv4MaskUint32 {
+ maskedIpv4MaskUint32 = ipv4MaskUint32
+ }
+
+ return IfAddr{
+ SockAddr: IPv4Addr{
+ Address: IPv4Address(maskedIpv4Uint32),
+ Mask: IPv4Mask(maskedIpv4MaskUint32),
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ case TypeIPv6:
+ i, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err)
+ }
+
+ if i > 128 {
+ return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv6 addresses must be between 0 and 64", operation)
+ }
+
+ ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr)
+
+ ipv6Mask := net.CIDRMask(int(i), 128)
+ ipv6MaskBigInt := new(big.Int)
+ ipv6MaskBigInt.SetBytes(ipv6Mask)
+
+ maskedIpv6 := ipv6.NetIP().Mask(ipv6Mask)
+ maskedIpv6BigInt := new(big.Int)
+ maskedIpv6BigInt.SetBytes(maskedIpv6)
+
+ maskedIpv6MaskBigInt := new(big.Int)
+ maskedIpv6MaskBigInt.Set(ipv6.Mask)
+
+ if ipv6MaskBigInt.Cmp(maskedIpv6MaskBigInt) == -1 {
+ maskedIpv6MaskBigInt = ipv6MaskBigInt
+ }
+
+ return IfAddr{
+ SockAddr: IPv6Addr{
+ Address: IPv6Address(maskedIpv6BigInt),
+ Mask: IPv6Mask(maskedIpv6MaskBigInt),
+ },
+ Interface: inputIfAddr.Interface,
+ }, nil
+ default:
+ return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType)
+ }
+ default:
+ return IfAddr{}, fmt.Errorf("unsupported math operation: %q", operation)
+ }
+}
+
+// IfAddrsMath will apply an IfAddrMath operation each IfAddr struct. Any
+// failure will result in zero results.
+func IfAddrsMath(operation, value string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ outputAddrs := make(IfAddrs, 0, len(inputIfAddrs))
+ for _, ifAddr := range inputIfAddrs {
+ result, err := IfAddrMath(operation, value, ifAddr)
+ if err != nil {
+ return IfAddrs{}, fmt.Errorf("unable to perform an IPMath operation on %s: %v", ifAddr, err)
+ }
+ outputAddrs = append(outputAddrs, result)
+ }
+ return outputAddrs, nil
+}
+
+// IncludeIfs returns an IfAddrs based on the passed in selector.
+func IncludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ var includedIfs IfAddrs
+ var err error
+
+ switch strings.ToLower(selectorName) {
+ case "address":
+ includedIfs, _, err = IfByAddress(selectorParam, inputIfAddrs)
+ case "flag", "flags":
+ includedIfs, _, err = IfByFlag(selectorParam, inputIfAddrs)
+ case "name":
+ includedIfs, _, err = IfByName(selectorParam, inputIfAddrs)
+ case "network":
+ includedIfs, _, err = IfByNetwork(selectorParam, inputIfAddrs)
+ case "port":
+ includedIfs, _, err = IfByPort(selectorParam, inputIfAddrs)
+ case "rfc", "rfcs":
+ includedIfs, _, err = IfByRFCs(selectorParam, inputIfAddrs)
+ case "size":
+ includedIfs, _, err = IfByMaskSize(selectorParam, inputIfAddrs)
+ case "type":
+ includedIfs, _, err = IfByType(selectorParam, inputIfAddrs)
+ default:
+ return IfAddrs{}, fmt.Errorf("invalid include selector %q", selectorName)
+ }
+
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ return includedIfs, nil
+}
+
+// ExcludeIfs returns an IfAddrs based on the passed in selector.
+func ExcludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ var excludedIfs IfAddrs
+ var err error
+
+ switch strings.ToLower(selectorName) {
+ case "address":
+ _, excludedIfs, err = IfByAddress(selectorParam, inputIfAddrs)
+ case "flag", "flags":
+ _, excludedIfs, err = IfByFlag(selectorParam, inputIfAddrs)
+ case "name":
+ _, excludedIfs, err = IfByName(selectorParam, inputIfAddrs)
+ case "network":
+ _, excludedIfs, err = IfByNetwork(selectorParam, inputIfAddrs)
+ case "port":
+ _, excludedIfs, err = IfByPort(selectorParam, inputIfAddrs)
+ case "rfc", "rfcs":
+ _, excludedIfs, err = IfByRFCs(selectorParam, inputIfAddrs)
+ case "size":
+ _, excludedIfs, err = IfByMaskSize(selectorParam, inputIfAddrs)
+ case "type":
+ _, excludedIfs, err = IfByType(selectorParam, inputIfAddrs)
+ default:
+ return IfAddrs{}, fmt.Errorf("invalid exclude selector %q", selectorName)
+ }
+
+ if err != nil {
+ return IfAddrs{}, err
+ }
+
+ return excludedIfs, nil
+}
+
+// SortIfBy returns an IfAddrs sorted based on the passed in selector. Multiple
+// sort clauses can be passed in as a comma delimited list without whitespace.
+func SortIfBy(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ sortedIfs := append(IfAddrs(nil), inputIfAddrs...)
+
+ clauses := strings.Split(selectorParam, ",")
+ sortFuncs := make([]CmpIfAddrFunc, len(clauses))
+
+ for i, clause := range clauses {
+ switch strings.TrimSpace(strings.ToLower(clause)) {
+ case "+address", "address":
+ // The "address" selector returns an array of IfAddrs
+ // ordered by the network address. IfAddrs that are not
+ // comparable will be at the end of the list and in a
+ // non-deterministic order.
+ sortFuncs[i] = AscIfAddress
+ case "-address":
+ sortFuncs[i] = DescIfAddress
+ case "+default", "default":
+ sortFuncs[i] = AscIfDefault
+ case "-default":
+ sortFuncs[i] = DescIfDefault
+ case "+name", "name":
+ // The "name" selector returns an array of IfAddrs
+ // ordered by the interface name.
+ sortFuncs[i] = AscIfName
+ case "-name":
+ sortFuncs[i] = DescIfName
+ case "+port", "port":
+ // The "port" selector returns an array of IfAddrs
+ // ordered by the port, if included in the IfAddr.
+ // IfAddrs that are not comparable will be at the end of
+ // the list and in a non-deterministic order.
+ sortFuncs[i] = AscIfPort
+ case "-port":
+ sortFuncs[i] = DescIfPort
+ case "+private", "private":
+ // The "private" selector returns an array of IfAddrs
+ // ordered by private addresses first. IfAddrs that are
+ // not comparable will be at the end of the list and in
+ // a non-deterministic order.
+ sortFuncs[i] = AscIfPrivate
+ case "-private":
+ sortFuncs[i] = DescIfPrivate
+ case "+size", "size":
+ // The "size" selector returns an array of IfAddrs
+ // ordered by the size of the network mask, smaller mask
+ // (larger number of hosts per network) to largest
+ // (e.g. a /24 sorts before a /32).
+ sortFuncs[i] = AscIfNetworkSize
+ case "-size":
+ sortFuncs[i] = DescIfNetworkSize
+ case "+type", "type":
+ // The "type" selector returns an array of IfAddrs
+ // ordered by the type of the IfAddr. The sort order is
+ // Unix, IPv4, then IPv6.
+ sortFuncs[i] = AscIfType
+ case "-type":
+ sortFuncs[i] = DescIfType
+ default:
+ // Return an empty list for invalid sort types.
+ return IfAddrs{}, fmt.Errorf("unknown sort type: %q", clause)
+ }
+ }
+
+ OrderedIfAddrBy(sortFuncs...).Sort(sortedIfs)
+
+ return sortedIfs, nil
+}
+
+// UniqueIfAddrsBy creates a unique set of IfAddrs based on the matching
+// selector. UniqueIfAddrsBy assumes the input has already been sorted.
+func UniqueIfAddrsBy(selectorName string, inputIfAddrs IfAddrs) (IfAddrs, error) {
+ attrName := strings.ToLower(selectorName)
+
+ ifs := make(IfAddrs, 0, len(inputIfAddrs))
+ var lastMatch string
+ for _, ifAddr := range inputIfAddrs {
+ var out string
+ switch attrName {
+ case "address":
+ out = ifAddr.SockAddr.String()
+ case "name":
+ out = ifAddr.Name
+ default:
+ return nil, fmt.Errorf("unsupported unique constraint %+q", selectorName)
+ }
+
+ switch {
+ case lastMatch == "", lastMatch != out:
+ lastMatch = out
+ ifs = append(ifs, ifAddr)
+ case lastMatch == out:
+ continue
+ }
+ }
+
+ return ifs, nil
+}
+
+// JoinIfAddrs joins an IfAddrs and returns a string
+func JoinIfAddrs(selectorName string, joinStr string, inputIfAddrs IfAddrs) (string, error) {
+ outputs := make([]string, 0, len(inputIfAddrs))
+ attrName := AttrName(strings.ToLower(selectorName))
+
+ for _, ifAddr := range inputIfAddrs {
+ var attrVal string
+ var err error
+ attrVal, err = ifAddr.Attr(attrName)
+ if err != nil {
+ return "", err
+ }
+ outputs = append(outputs, attrVal)
+ }
+ return strings.Join(outputs, joinStr), nil
+}
+
+// LimitIfAddrs returns a slice of IfAddrs based on the specified limit.
+func LimitIfAddrs(lim uint, in IfAddrs) (IfAddrs, error) {
+ // Clamp the limit to the length of the array
+ if int(lim) > len(in) {
+ lim = uint(len(in))
+ }
+
+ return in[0:lim], nil
+}
+
+// OffsetIfAddrs returns a slice of IfAddrs based on the specified offset.
+func OffsetIfAddrs(off int, in IfAddrs) (IfAddrs, error) {
+ var end bool
+ if off < 0 {
+ end = true
+ off = off * -1
+ }
+
+ if off > len(in) {
+ return IfAddrs{}, fmt.Errorf("unable to seek past the end of the interface array: offset (%d) exceeds the number of interfaces (%d)", off, len(in))
+ }
+
+ if end {
+ return in[len(in)-off:], nil
+ }
+ return in[off:], nil
+}
+
+func (ifAddr IfAddr) String() string {
+ return fmt.Sprintf("%s %v", ifAddr.SockAddr, ifAddr.Interface)
+}
+
+// parseDefaultIfNameFromRoute parses standard route(8)'s output for the *BSDs
+// and Solaris.
+func parseDefaultIfNameFromRoute(routeOut string) (string, error) {
+ lines := strings.Split(routeOut, "\n")
+ for _, line := range lines {
+ kvs := strings.SplitN(line, ":", 2)
+ if len(kvs) != 2 {
+ continue
+ }
+
+ if strings.TrimSpace(kvs[0]) == "interface" {
+ ifName := strings.TrimSpace(kvs[1])
+ return ifName, nil
+ }
+ }
+
+ return "", errors.New("No default interface found")
+}
+
+// parseDefaultIfNameFromIPCmd parses the default interface from ip(8) for
+// Linux.
+func parseDefaultIfNameFromIPCmd(routeOut string) (string, error) {
+ lines := strings.Split(routeOut, "\n")
+ re := whitespaceRE.Copy()
+ for _, line := range lines {
+ kvs := re.Split(line, -1)
+ if len(kvs) < 5 {
+ continue
+ }
+
+ if kvs[0] == "default" &&
+ kvs[1] == "via" &&
+ kvs[3] == "dev" {
+ ifName := strings.TrimSpace(kvs[4])
+ return ifName, nil
+ }
+ }
+
+ return "", errors.New("No default interface found")
+}
+
+// parseDefaultIfNameWindows parses the default interface from `netstat -rn` and
+// `ipconfig` on Windows.
+func parseDefaultIfNameWindows(routeOut, ipconfigOut string) (string, error) {
+ defaultIPAddr, err := parseDefaultIPAddrWindowsRoute(routeOut)
+ if err != nil {
+ return "", err
+ }
+
+ ifName, err := parseDefaultIfNameWindowsIPConfig(defaultIPAddr, ipconfigOut)
+ if err != nil {
+ return "", err
+ }
+
+ return ifName, nil
+}
+
+// parseDefaultIPAddrWindowsRoute parses the IP address on the default interface
+// `netstat -rn`.
+//
+// NOTES(sean): Only IPv4 addresses are parsed at this time. If you have an
+// IPv6 connected host, submit an issue on github.com/hashicorp/go-sockaddr with
+// the output from `netstat -rn`, `ipconfig`, and version of Windows to see IPv6
+// support added.
+func parseDefaultIPAddrWindowsRoute(routeOut string) (string, error) {
+ lines := strings.Split(routeOut, "\n")
+ re := whitespaceRE.Copy()
+ for _, line := range lines {
+ kvs := re.Split(strings.TrimSpace(line), -1)
+ if len(kvs) < 3 {
+ continue
+ }
+
+ if kvs[0] == "0.0.0.0" && kvs[1] == "0.0.0.0" {
+ defaultIPAddr := strings.TrimSpace(kvs[3])
+ return defaultIPAddr, nil
+ }
+ }
+
+ return "", errors.New("No IP on default interface found")
+}
+
+// parseDefaultIfNameWindowsIPConfig parses the output of `ipconfig` to find the
+// interface name forwarding traffic to the default gateway.
+func parseDefaultIfNameWindowsIPConfig(defaultIPAddr, routeOut string) (string, error) {
+ lines := strings.Split(routeOut, "\n")
+ ifNameRe := ifNameRE.Copy()
+ ipAddrRe := ipAddrRE.Copy()
+ var ifName string
+ for _, line := range lines {
+ switch ifNameMatches := ifNameRe.FindStringSubmatch(line); {
+ case len(ifNameMatches) > 1:
+ ifName = ifNameMatches[1]
+ continue
+ }
+
+ switch ipAddrMatches := ipAddrRe.FindStringSubmatch(line); {
+ case len(ipAddrMatches) > 1 && ipAddrMatches[1] == defaultIPAddr:
+ return ifName, nil
+ }
+ }
+
+ return "", errors.New("No default interface found with matching IP")
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifattr.go b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go
new file mode 100644
index 0000000..6984cb4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go
@@ -0,0 +1,65 @@
+package sockaddr
+
+import (
+ "fmt"
+ "net"
+)
+
+// IfAddr is a union of a SockAddr and a net.Interface.
+type IfAddr struct {
+ SockAddr
+ net.Interface
+}
+
+// Attr returns the named attribute as a string
+func (ifAddr IfAddr) Attr(attrName AttrName) (string, error) {
+ val := IfAddrAttr(ifAddr, attrName)
+ if val != "" {
+ return val, nil
+ }
+
+ return Attr(ifAddr.SockAddr, attrName)
+}
+
+// Attr returns the named attribute as a string
+func Attr(sa SockAddr, attrName AttrName) (string, error) {
+ switch sockType := sa.Type(); {
+ case sockType&TypeIP != 0:
+ ip := *ToIPAddr(sa)
+ attrVal := IPAddrAttr(ip, attrName)
+ if attrVal != "" {
+ return attrVal, nil
+ }
+
+ if sockType == TypeIPv4 {
+ ipv4 := *ToIPv4Addr(sa)
+ attrVal := IPv4AddrAttr(ipv4, attrName)
+ if attrVal != "" {
+ return attrVal, nil
+ }
+ } else if sockType == TypeIPv6 {
+ ipv6 := *ToIPv6Addr(sa)
+ attrVal := IPv6AddrAttr(ipv6, attrName)
+ if attrVal != "" {
+ return attrVal, nil
+ }
+ }
+
+ case sockType == TypeUnix:
+ us := *ToUnixSock(sa)
+ attrVal := UnixSockAttr(us, attrName)
+ if attrVal != "" {
+ return attrVal, nil
+ }
+ }
+
+ // Non type-specific attributes
+ switch attrName {
+ case "string":
+ return sa.String(), nil
+ case "type":
+ return sa.Type().String(), nil
+ }
+
+ return "", fmt.Errorf("unsupported attribute name %q", attrName)
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go
new file mode 100644
index 0000000..b47d15c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go
@@ -0,0 +1,169 @@
+package sockaddr
+
+import (
+ "fmt"
+ "math/big"
+ "net"
+ "strings"
+)
+
+// Constants for the sizes of IPv3, IPv4, and IPv6 address types.
+const (
+ IPv3len = 6
+ IPv4len = 4
+ IPv6len = 16
+)
+
+// IPAddr is a generic IP address interface for IPv4 and IPv6 addresses,
+// networks, and socket endpoints.
+type IPAddr interface {
+ SockAddr
+ AddressBinString() string
+ AddressHexString() string
+ Cmp(SockAddr) int
+ CmpAddress(SockAddr) int
+ CmpPort(SockAddr) int
+ FirstUsable() IPAddr
+ Host() IPAddr
+ IPPort() IPPort
+ LastUsable() IPAddr
+ Maskbits() int
+ NetIP() *net.IP
+ NetIPMask() *net.IPMask
+ NetIPNet() *net.IPNet
+ Network() IPAddr
+ Octets() []int
+}
+
+// IPPort is the type for an IP port number for the TCP and UDP IP transports.
+type IPPort uint16
+
+// IPPrefixLen is a typed integer representing the prefix length for a given
+// IPAddr.
+type IPPrefixLen byte
+
+// ipAddrAttrMap is a map of the IPAddr type-specific attributes.
+var ipAddrAttrMap map[AttrName]func(IPAddr) string
+var ipAddrAttrs []AttrName
+
+func init() {
+ ipAddrInit()
+}
+
+// NewIPAddr creates a new IPAddr from a string. Returns nil if the string is
+// not an IPv4 or an IPv6 address.
+func NewIPAddr(addr string) (IPAddr, error) {
+ ipv4Addr, err := NewIPv4Addr(addr)
+ if err == nil {
+ return ipv4Addr, nil
+ }
+
+ ipv6Addr, err := NewIPv6Addr(addr)
+ if err == nil {
+ return ipv6Addr, nil
+ }
+
+ return nil, fmt.Errorf("invalid IPAddr %v", addr)
+}
+
+// IPAddrAttr returns a string representation of an attribute for the given
+// IPAddr.
+func IPAddrAttr(ip IPAddr, selector AttrName) string {
+ fn, found := ipAddrAttrMap[selector]
+ if !found {
+ return ""
+ }
+
+ return fn(ip)
+}
+
+// IPAttrs returns a list of attributes supported by the IPAddr type
+func IPAttrs() []AttrName {
+ return ipAddrAttrs
+}
+
+// MustIPAddr is a helper method that must return an IPAddr or panic on invalid
+// input.
+func MustIPAddr(addr string) IPAddr {
+ ip, err := NewIPAddr(addr)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create an IPAddr from %+q: %v", addr, err))
+ }
+ return ip
+}
+
+// ipAddrInit is called once at init()
+func ipAddrInit() {
+ // Sorted for human readability
+ ipAddrAttrs = []AttrName{
+ "host",
+ "address",
+ "port",
+ "netmask",
+ "network",
+ "mask_bits",
+ "binary",
+ "hex",
+ "first_usable",
+ "last_usable",
+ "octets",
+ }
+
+ ipAddrAttrMap = map[AttrName]func(ip IPAddr) string{
+ "address": func(ip IPAddr) string {
+ return ip.NetIP().String()
+ },
+ "binary": func(ip IPAddr) string {
+ return ip.AddressBinString()
+ },
+ "first_usable": func(ip IPAddr) string {
+ return ip.FirstUsable().String()
+ },
+ "hex": func(ip IPAddr) string {
+ return ip.AddressHexString()
+ },
+ "host": func(ip IPAddr) string {
+ return ip.Host().String()
+ },
+ "last_usable": func(ip IPAddr) string {
+ return ip.LastUsable().String()
+ },
+ "mask_bits": func(ip IPAddr) string {
+ return fmt.Sprintf("%d", ip.Maskbits())
+ },
+ "netmask": func(ip IPAddr) string {
+ switch v := ip.(type) {
+ case IPv4Addr:
+ ipv4Mask := IPv4Addr{
+ Address: IPv4Address(v.Mask),
+ Mask: IPv4HostMask,
+ }
+ return ipv4Mask.String()
+ case IPv6Addr:
+ ipv6Mask := new(big.Int)
+ ipv6Mask.Set(v.Mask)
+ ipv6MaskAddr := IPv6Addr{
+ Address: IPv6Address(ipv6Mask),
+ Mask: ipv6HostMask,
+ }
+ return ipv6MaskAddr.String()
+ default:
+ return fmt.Sprintf("<unsupported type: %T>", ip)
+ }
+ },
+ "network": func(ip IPAddr) string {
+ return ip.Network().NetIP().String()
+ },
+ "octets": func(ip IPAddr) string {
+ octets := ip.Octets()
+ octetStrs := make([]string, 0, len(octets))
+ for _, octet := range octets {
+ octetStrs = append(octetStrs, fmt.Sprintf("%d", octet))
+ }
+ return strings.Join(octetStrs, " ")
+ },
+ "port": func(ip IPAddr) string {
+ return fmt.Sprintf("%d", ip.IPPort())
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go
new file mode 100644
index 0000000..6eeb7dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go
@@ -0,0 +1,98 @@
+package sockaddr
+
+import "bytes"
+
+type IPAddrs []IPAddr
+
+func (s IPAddrs) Len() int { return len(s) }
+func (s IPAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// // SortIPAddrsByCmp is a type that satisfies sort.Interface and can be used
+// // by the routines in this package. The SortIPAddrsByCmp type is used to
+// // sort IPAddrs by Cmp()
+// type SortIPAddrsByCmp struct{ IPAddrs }
+
+// // Less reports whether the element with index i should sort before the
+// // element with index j.
+// func (s SortIPAddrsByCmp) Less(i, j int) bool {
+// // Sort by Type, then address, then port number.
+// return Less(s.IPAddrs[i], s.IPAddrs[j])
+// }
+
+// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and
+// can be used by the routines in this package. The
+// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest
+// network (most specific to largest network).
+type SortIPAddrsByNetworkSize struct{ IPAddrs }
+
+// Less reports whether the element with index i should sort before the
+// element with index j.
+func (s SortIPAddrsByNetworkSize) Less(i, j int) bool {
+ // Sort masks with a larger binary value (i.e. fewer hosts per network
+ // prefix) after masks with a smaller value (larger number of hosts per
+ // prefix).
+ switch bytes.Compare([]byte(*s.IPAddrs[i].NetIPMask()), []byte(*s.IPAddrs[j].NetIPMask())) {
+ case 0:
+ // Fall through to the second test if the net.IPMasks are the
+ // same.
+ break
+ case 1:
+ return true
+ case -1:
+ return false
+ default:
+ panic("bad, m'kay?")
+ }
+
+ // Sort IPs based on the length (i.e. prefer IPv4 over IPv6).
+ iLen := len(*s.IPAddrs[i].NetIP())
+ jLen := len(*s.IPAddrs[j].NetIP())
+ if iLen != jLen {
+ return iLen > jLen
+ }
+
+ // Sort IPs based on their network address from lowest to highest.
+ switch bytes.Compare(s.IPAddrs[i].NetIPNet().IP, s.IPAddrs[j].NetIPNet().IP) {
+ case 0:
+ break
+ case 1:
+ return false
+ case -1:
+ return true
+ default:
+ panic("lol wut?")
+ }
+
+ // If a host does not have a port set, it always sorts after hosts
+ // that have a port (e.g. a host with a /32 and port number is more
+ // specific and should sort first over a host with a /32 but no port
+ // set).
+ if s.IPAddrs[i].IPPort() == 0 || s.IPAddrs[j].IPPort() == 0 {
+ return false
+ }
+ return s.IPAddrs[i].IPPort() < s.IPAddrs[j].IPPort()
+}
+
+// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and
+// can be used by the routines in this package. The
+// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest
+// network (most specific to largest network).
+type SortIPAddrsBySpecificMaskLen struct{ IPAddrs }
+
+// Less reports whether the element with index i should sort before the
+// element with index j.
+func (s SortIPAddrsBySpecificMaskLen) Less(i, j int) bool {
+ return s.IPAddrs[i].Maskbits() > s.IPAddrs[j].Maskbits()
+}
+
+// SortIPAddrsByBroadMaskLen is a type that satisfies sort.Interface and can
+// be used by the routines in this package. The SortIPAddrsByBroadMaskLen
+// type is used to sort IPAddrs by largest network (i.e. largest subnets
+// first).
+type SortIPAddrsByBroadMaskLen struct{ IPAddrs }
+
+// Less reports whether the element with index i should sort before the
+// element with index j.
+func (s SortIPAddrsByBroadMaskLen) Less(i, j int) bool {
+ return s.IPAddrs[i].Maskbits() < s.IPAddrs[j].Maskbits()
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go
new file mode 100644
index 0000000..4d395dc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go
@@ -0,0 +1,516 @@
+package sockaddr
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type (
+ // IPv4Address is a named type representing an IPv4 address.
+ IPv4Address uint32
+
+ // IPv4Network is a named type representing an IPv4 network.
+ IPv4Network uint32
+
+ // IPv4Mask is a named type representing an IPv4 network mask.
+ IPv4Mask uint32
+)
+
+// IPv4HostMask is a constant represents a /32 IPv4 Address
+// (i.e. 255.255.255.255).
+const IPv4HostMask = IPv4Mask(0xffffffff)
+
+// ipv4AddrAttrMap is a map of the IPv4Addr type-specific attributes.
+var ipv4AddrAttrMap map[AttrName]func(IPv4Addr) string
+var ipv4AddrAttrs []AttrName
+var trailingHexNetmaskRE *regexp.Regexp
+
+// IPv4Addr implements a convenience wrapper around the union of Go's
+// built-in net.IP and net.IPNet types. In UNIX-speak, IPv4Addr implements
+// `sockaddr` when the the address family is set to AF_INET
+// (i.e. `sockaddr_in`).
+type IPv4Addr struct {
+ IPAddr
+ Address IPv4Address
+ Mask IPv4Mask
+ Port IPPort
+}
+
+func init() {
+ ipv4AddrInit()
+ trailingHexNetmaskRE = regexp.MustCompile(`/([0f]{8})$`)
+}
+
+// NewIPv4Addr creates an IPv4Addr from a string. String can be in the form
+// of either an IPv4:port (e.g. `1.2.3.4:80`, in which case the mask is
+// assumed to be a `/32`), an IPv4 address (e.g. `1.2.3.4`, also with a `/32`
+// mask), or an IPv4 CIDR (e.g. `1.2.3.4/24`, which has its IP port
+// initialized to zero). ipv4Str can not be a hostname.
+//
+// NOTE: Many net.*() routines will initialize and return an IPv6 address.
+// To create uint32 values from net.IP, always test to make sure the address
+// returned can be converted to a 4 byte array using To4().
+func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) {
+ // Strip off any bogus hex-encoded netmasks that will be mis-parsed by Go. In
+ // particular, clients with the Barracuda VPN client will see something like:
+ // `192.168.3.51/00ffffff` as their IP address.
+ trailingHexNetmaskRe := trailingHexNetmaskRE.Copy()
+ if match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil {
+ ipv4Str = ipv4Str[:match[0]]
+ }
+
+ // Parse as an IPv4 CIDR
+ ipAddr, network, err := net.ParseCIDR(ipv4Str)
+ if err == nil {
+ ipv4 := ipAddr.To4()
+ if ipv4 == nil {
+ return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address", ipv4Str)
+ }
+
+ // If we see an IPv6 netmask, convert it to an IPv4 mask.
+ netmaskSepPos := strings.LastIndexByte(ipv4Str, '/')
+ if netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) {
+ netMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8)
+ if err != nil {
+ return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v", ipv4Str, err)
+ } else if netMask > 128 {
+ return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: invalid CIDR netmask", ipv4Str)
+ }
+
+ if netMask >= 96 {
+ // Convert the IPv6 netmask to an IPv4 netmask
+ network.Mask = net.CIDRMask(int(netMask-96), IPv4len*8)
+ }
+ }
+ ipv4Addr := IPv4Addr{
+ Address: IPv4Address(binary.BigEndian.Uint32(ipv4)),
+ Mask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)),
+ }
+ return ipv4Addr, nil
+ }
+
+ // Attempt to parse ipv4Str as a /32 host with a port number.
+ tcpAddr, err := net.ResolveTCPAddr("tcp4", ipv4Str)
+ if err == nil {
+ ipv4 := tcpAddr.IP.To4()
+ if ipv4 == nil {
+ return IPv4Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv4 address", ipv4Str)
+ }
+
+ ipv4Uint32 := binary.BigEndian.Uint32(ipv4)
+ ipv4Addr := IPv4Addr{
+ Address: IPv4Address(ipv4Uint32),
+ Mask: IPv4HostMask,
+ Port: IPPort(tcpAddr.Port),
+ }
+
+ return ipv4Addr, nil
+ }
+
+ // Parse as a naked IPv4 address
+ ip := net.ParseIP(ipv4Str)
+ if ip != nil {
+ ipv4 := ip.To4()
+ if ipv4 == nil {
+ return IPv4Addr{}, fmt.Errorf("Unable to string convert %+q to an IPv4 address", ipv4Str)
+ }
+
+ ipv4Uint32 := binary.BigEndian.Uint32(ipv4)
+ ipv4Addr := IPv4Addr{
+ Address: IPv4Address(ipv4Uint32),
+ Mask: IPv4HostMask,
+ }
+ return ipv4Addr, nil
+ }
+
+ return IPv4Addr{}, fmt.Errorf("Unable to parse %+q to an IPv4 address: %v", ipv4Str, err)
+}
+
+// AddressBinString returns a string with the IPv4Addr's Address represented
+// as a sequence of '0' and '1' characters. This method is useful for
+// debugging or by operators who want to inspect an address.
+func (ipv4 IPv4Addr) AddressBinString() string {
+ return fmt.Sprintf("%032s", strconv.FormatUint(uint64(ipv4.Address), 2))
+}
+
+// AddressHexString returns a string with the IPv4Addr address represented as
+// a sequence of hex characters. This method is useful for debugging or by
+// operators who want to inspect an address.
+func (ipv4 IPv4Addr) AddressHexString() string {
+ return fmt.Sprintf("%08s", strconv.FormatUint(uint64(ipv4.Address), 16))
+}
+
+// Broadcast is an IPv4Addr-only method that returns the broadcast address of
+// the network.
+//
+// NOTE: IPv6 only supports multicast, so this method only exists for
+// IPv4Addr.
+func (ipv4 IPv4Addr) Broadcast() IPAddr {
+ // Nothing should listen on a broadcast address.
+ return IPv4Addr{
+ Address: IPv4Address(ipv4.BroadcastAddress()),
+ Mask: IPv4HostMask,
+ }
+}
+
+// BroadcastAddress returns a IPv4Network of the IPv4Addr's broadcast
+// address.
+func (ipv4 IPv4Addr) BroadcastAddress() IPv4Network {
+ return IPv4Network(uint32(ipv4.Address)&uint32(ipv4.Mask) | ^uint32(ipv4.Mask))
+}
+
+// CmpAddress follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its address is lower than arg
+// - 0 if the SockAddr arg is equal to the receiving IPv4Addr or the argument is
+// of a different type.
+// - 1 If the argument should sort first.
+func (ipv4 IPv4Addr) CmpAddress(sa SockAddr) int {
+ ipv4b, ok := sa.(IPv4Addr)
+ if !ok {
+ return sortDeferDecision
+ }
+
+ switch {
+ case ipv4.Address == ipv4b.Address:
+ return sortDeferDecision
+ case ipv4.Address < ipv4b.Address:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// CmpPort follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its port is lower than arg
+// - 0 if the SockAddr arg's port number is equal to the receiving IPv4Addr,
+// regardless of type.
+// - 1 If the argument should sort first.
+func (ipv4 IPv4Addr) CmpPort(sa SockAddr) int {
+ var saPort IPPort
+ switch v := sa.(type) {
+ case IPv4Addr:
+ saPort = v.Port
+ case IPv6Addr:
+ saPort = v.Port
+ default:
+ return sortDeferDecision
+ }
+
+ switch {
+ case ipv4.Port == saPort:
+ return sortDeferDecision
+ case ipv4.Port < saPort:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// CmpRFC follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because it belongs to the RFC and its
+// arg does not
+// - 0 if the receiver and arg both belong to the same RFC or neither do.
+// - 1 If the arg belongs to the RFC but receiver does not.
+func (ipv4 IPv4Addr) CmpRFC(rfcNum uint, sa SockAddr) int {
+ recvInRFC := IsRFC(rfcNum, ipv4)
+ ipv4b, ok := sa.(IPv4Addr)
+ if !ok {
+ // If the receiver is part of the desired RFC and the SockAddr
+ // argument is not, return -1 so that the receiver sorts before
+ // the non-IPv4 SockAddr. Conversely, if the receiver is not
+ // part of the RFC, punt on sorting and leave it for the next
+ // sorter.
+ if recvInRFC {
+ return sortReceiverBeforeArg
+ } else {
+ return sortDeferDecision
+ }
+ }
+
+ argInRFC := IsRFC(rfcNum, ipv4b)
+ switch {
+ case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC):
+ // If a and b both belong to the RFC, or neither belong to
+ // rfcNum, defer sorting to the next sorter.
+ return sortDeferDecision
+ case recvInRFC && !argInRFC:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// Contains returns true if the SockAddr is contained within the receiver.
+func (ipv4 IPv4Addr) Contains(sa SockAddr) bool {
+ ipv4b, ok := sa.(IPv4Addr)
+ if !ok {
+ return false
+ }
+
+ return ipv4.ContainsNetwork(ipv4b)
+}
+
+// ContainsAddress returns true if the IPv4Address is contained within the
+// receiver.
+func (ipv4 IPv4Addr) ContainsAddress(x IPv4Address) bool {
+ return IPv4Address(ipv4.NetworkAddress()) <= x &&
+ IPv4Address(ipv4.BroadcastAddress()) >= x
+}
+
+// ContainsNetwork returns true if the network from IPv4Addr is contained
+// within the receiver.
+func (ipv4 IPv4Addr) ContainsNetwork(x IPv4Addr) bool {
+ return ipv4.NetworkAddress() <= x.NetworkAddress() &&
+ ipv4.BroadcastAddress() >= x.BroadcastAddress()
+}
+
+// DialPacketArgs returns the arguments required to be passed to
+// net.DialUDP(). If the Mask of ipv4 is not a /32 or the Port is 0,
+// DialPacketArgs() will fail. See Host() to create an IPv4Addr with its
+// mask set to /32.
+func (ipv4 IPv4Addr) DialPacketArgs() (network, dialArgs string) {
+ if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 {
+ return "udp4", ""
+ }
+ return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+}
+
+// DialStreamArgs returns the arguments required to be passed to
+// net.DialTCP(). If the Mask of ipv4 is not a /32 or the Port is 0,
+// DialStreamArgs() will fail. See Host() to create an IPv4Addr with its
+// mask set to /32.
+func (ipv4 IPv4Addr) DialStreamArgs() (network, dialArgs string) {
+ if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 {
+ return "tcp4", ""
+ }
+ return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+}
+
+// Equal returns true if a SockAddr is equal to the receiving IPv4Addr.
+func (ipv4 IPv4Addr) Equal(sa SockAddr) bool {
+ ipv4b, ok := sa.(IPv4Addr)
+ if !ok {
+ return false
+ }
+
+ if ipv4.Port != ipv4b.Port {
+ return false
+ }
+
+ if ipv4.Address != ipv4b.Address {
+ return false
+ }
+
+ if ipv4.NetIPNet().String() != ipv4b.NetIPNet().String() {
+ return false
+ }
+
+ return true
+}
+
+// FirstUsable returns an IPv4Addr set to the first address following the
+// network prefix. The first usable address in a network is normally the
+// gateway and should not be used except by devices forwarding packets
+// between two administratively distinct networks (i.e. a router). This
+// function does not discriminate against first usable vs "first address that
+// should be used." For example, FirstUsable() on "192.168.1.10/24" would
+// return the address "192.168.1.1/24".
+func (ipv4 IPv4Addr) FirstUsable() IPAddr {
+ addr := ipv4.NetworkAddress()
+
+ // If /32, return the address itself. If /31 assume a point-to-point
+ // link and return the lower address.
+ if ipv4.Maskbits() < 31 {
+ addr++
+ }
+
+ return IPv4Addr{
+ Address: IPv4Address(addr),
+ Mask: IPv4HostMask,
+ }
+}
+
+// Host returns a copy of ipv4 with its mask set to /32 so that it can be
+// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or
+// ListenStreamArgs().
+func (ipv4 IPv4Addr) Host() IPAddr {
+ // Nothing should listen on a broadcast address.
+ return IPv4Addr{
+ Address: ipv4.Address,
+ Mask: IPv4HostMask,
+ Port: ipv4.Port,
+ }
+}
+
+// IPPort returns the Port number attached to the IPv4Addr
+func (ipv4 IPv4Addr) IPPort() IPPort {
+ return ipv4.Port
+}
+
+// LastUsable returns the last address before the broadcast address in a
+// given network.
+func (ipv4 IPv4Addr) LastUsable() IPAddr {
+ addr := ipv4.BroadcastAddress()
+
+ // If /32, return the address itself. If /31 assume a point-to-point
+ // link and return the upper address.
+ if ipv4.Maskbits() < 31 {
+ addr--
+ }
+
+ return IPv4Addr{
+ Address: IPv4Address(addr),
+ Mask: IPv4HostMask,
+ }
+}
+
+// ListenPacketArgs returns the arguments required to be passed to
+// net.ListenUDP(). If the Mask of ipv4 is not a /32, ListenPacketArgs()
+// will fail. See Host() to create an IPv4Addr with its mask set to /32.
+func (ipv4 IPv4Addr) ListenPacketArgs() (network, listenArgs string) {
+ if ipv4.Mask != IPv4HostMask {
+ return "udp4", ""
+ }
+ return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+}
+
+// ListenStreamArgs returns the arguments required to be passed to
+// net.ListenTCP(). If the Mask of ipv4 is not a /32, ListenStreamArgs()
+// will fail. See Host() to create an IPv4Addr with its mask set to /32.
+func (ipv4 IPv4Addr) ListenStreamArgs() (network, listenArgs string) {
+ if ipv4.Mask != IPv4HostMask {
+ return "tcp4", ""
+ }
+ return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+}
+
+// Maskbits returns the number of network mask bits in a given IPv4Addr. For
+// example, the Maskbits() of "192.168.1.1/24" would return 24.
+func (ipv4 IPv4Addr) Maskbits() int {
+ mask := make(net.IPMask, IPv4len)
+ binary.BigEndian.PutUint32(mask, uint32(ipv4.Mask))
+ maskOnes, _ := mask.Size()
+ return maskOnes
+}
+
+// MustIPv4Addr is a helper method that must return an IPv4Addr or panic on
+// invalid input.
+func MustIPv4Addr(addr string) IPv4Addr {
+ ipv4, err := NewIPv4Addr(addr)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create an IPv4Addr from %+q: %v", addr, err))
+ }
+ return ipv4
+}
+
+// NetIP returns the address as a net.IP (address is always presized to
+// IPv4).
+func (ipv4 IPv4Addr) NetIP() *net.IP {
+ x := make(net.IP, IPv4len)
+ binary.BigEndian.PutUint32(x, uint32(ipv4.Address))
+ return &x
+}
+
+// NetIPMask create a new net.IPMask from the IPv4Addr.
+func (ipv4 IPv4Addr) NetIPMask() *net.IPMask {
+ ipv4Mask := net.IPMask{}
+ ipv4Mask = make(net.IPMask, IPv4len)
+ binary.BigEndian.PutUint32(ipv4Mask, uint32(ipv4.Mask))
+ return &ipv4Mask
+}
+
+// NetIPNet create a new net.IPNet from the IPv4Addr.
+func (ipv4 IPv4Addr) NetIPNet() *net.IPNet {
+ ipv4net := &net.IPNet{}
+ ipv4net.IP = make(net.IP, IPv4len)
+ binary.BigEndian.PutUint32(ipv4net.IP, uint32(ipv4.NetworkAddress()))
+ ipv4net.Mask = *ipv4.NetIPMask()
+ return ipv4net
+}
+
+// Network returns the network prefix or network address for a given network.
+func (ipv4 IPv4Addr) Network() IPAddr {
+ return IPv4Addr{
+ Address: IPv4Address(ipv4.NetworkAddress()),
+ Mask: ipv4.Mask,
+ }
+}
+
+// NetworkAddress returns an IPv4Network of the IPv4Addr's network address.
+func (ipv4 IPv4Addr) NetworkAddress() IPv4Network {
+ return IPv4Network(uint32(ipv4.Address) & uint32(ipv4.Mask))
+}
+
+// Octets returns a slice of the four octets in an IPv4Addr's Address. The
+// order of the bytes is big endian.
+func (ipv4 IPv4Addr) Octets() []int {
+ return []int{
+ int(ipv4.Address >> 24),
+ int((ipv4.Address >> 16) & 0xff),
+ int((ipv4.Address >> 8) & 0xff),
+ int(ipv4.Address & 0xff),
+ }
+}
+
+// String returns a string representation of the IPv4Addr
+func (ipv4 IPv4Addr) String() string {
+ if ipv4.Port != 0 {
+ return fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port)
+ }
+
+ if ipv4.Maskbits() == 32 {
+ return ipv4.NetIP().String()
+ }
+
+ return fmt.Sprintf("%s/%d", ipv4.NetIP().String(), ipv4.Maskbits())
+}
+
+// Type is used as a type switch and returns TypeIPv4
+func (IPv4Addr) Type() SockAddrType {
+ return TypeIPv4
+}
+
+// IPv4AddrAttr returns a string representation of an attribute for the given
+// IPv4Addr.
+func IPv4AddrAttr(ipv4 IPv4Addr, selector AttrName) string {
+ fn, found := ipv4AddrAttrMap[selector]
+ if !found {
+ return ""
+ }
+
+ return fn(ipv4)
+}
+
+// IPv4Attrs returns a list of attributes supported by the IPv4Addr type
+func IPv4Attrs() []AttrName {
+ return ipv4AddrAttrs
+}
+
+// ipv4AddrInit is called once at init()
+func ipv4AddrInit() {
+ // Sorted for human readability
+ ipv4AddrAttrs = []AttrName{
+ "size", // Same position as in IPv6 for output consistency
+ "broadcast",
+ "uint32",
+ }
+
+ ipv4AddrAttrMap = map[AttrName]func(ipv4 IPv4Addr) string{
+ "broadcast": func(ipv4 IPv4Addr) string {
+ return ipv4.Broadcast().String()
+ },
+ "size": func(ipv4 IPv4Addr) string {
+ return fmt.Sprintf("%d", 1<<uint(IPv4len*8-ipv4.Maskbits()))
+ },
+ "uint32": func(ipv4 IPv4Addr) string {
+ return fmt.Sprintf("%d", uint32(ipv4.Address))
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go b/vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go
new file mode 100644
index 0000000..d7f4121
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/ipv6addr.go
@@ -0,0 +1,591 @@
+package sockaddr
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "math/big"
+ "net"
+)
+
+type (
+ // IPv6Address is a named type representing an IPv6 address.
+ IPv6Address *big.Int
+
+ // IPv6Network is a named type representing an IPv6 network.
+ IPv6Network *big.Int
+
+ // IPv6Mask is a named type representing an IPv6 network mask.
+ IPv6Mask *big.Int
+)
+
+// IPv6HostPrefix is a constant represents a /128 IPv6 Prefix.
+const IPv6HostPrefix = IPPrefixLen(128)
+
+// ipv6HostMask is an unexported big.Int representing a /128 IPv6 address.
+// This value must be a constant and always set to all ones.
+var ipv6HostMask IPv6Mask
+
+// ipv6AddrAttrMap is a map of the IPv6Addr type-specific attributes.
+var ipv6AddrAttrMap map[AttrName]func(IPv6Addr) string
+var ipv6AddrAttrs []AttrName
+
+func init() {
+ biMask := new(big.Int)
+ biMask.SetBytes([]byte{
+ 0xff, 0xff,
+ 0xff, 0xff,
+ 0xff, 0xff,
+ 0xff, 0xff,
+ 0xff, 0xff,
+ 0xff, 0xff,
+ 0xff, 0xff,
+ 0xff, 0xff,
+ },
+ )
+ ipv6HostMask = IPv6Mask(biMask)
+
+ ipv6AddrInit()
+}
+
+// IPv6Addr implements a convenience wrapper around the union of Go's
+// built-in net.IP and net.IPNet types. In UNIX-speak, IPv6Addr implements
+// `sockaddr` when the the address family is set to AF_INET6
+// (i.e. `sockaddr_in6`).
+type IPv6Addr struct {
+ IPAddr
+ Address IPv6Address
+ Mask IPv6Mask
+ Port IPPort
+}
+
+// NewIPv6Addr creates an IPv6Addr from a string. String can be in the form of
+// an an IPv6:port (e.g. `[2001:4860:0:2001::68]:80`, in which case the mask is
+// assumed to be a /128), an IPv6 address (e.g. `2001:4860:0:2001::68`, also
+// with a `/128` mask), an IPv6 CIDR (e.g. `2001:4860:0:2001::68/64`, which has
+// its IP port initialized to zero). ipv6Str can not be a hostname.
+//
+// NOTE: Many net.*() routines will initialize and return an IPv4 address.
+// Always test to make sure the address returned cannot be converted to a 4 byte
+// array using To4().
+func NewIPv6Addr(ipv6Str string) (IPv6Addr, error) {
+ v6Addr := false
+LOOP:
+ for i := 0; i < len(ipv6Str); i++ {
+ switch ipv6Str[i] {
+ case '.':
+ break LOOP
+ case ':':
+ v6Addr = true
+ break LOOP
+ }
+ }
+
+ if !v6Addr {
+ return IPv6Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv6 address, appears to be an IPv4 address", ipv6Str)
+ }
+
+ // Attempt to parse ipv6Str as a /128 host with a port number.
+ tcpAddr, err := net.ResolveTCPAddr("tcp6", ipv6Str)
+ if err == nil {
+ ipv6 := tcpAddr.IP.To16()
+ if ipv6 == nil {
+ return IPv6Addr{}, fmt.Errorf("Unable to resolve %+q as a 16byte IPv6 address", ipv6Str)
+ }
+
+ ipv6BigIntAddr := new(big.Int)
+ ipv6BigIntAddr.SetBytes(ipv6)
+
+ ipv6BigIntMask := new(big.Int)
+ ipv6BigIntMask.Set(ipv6HostMask)
+
+ ipv6Addr := IPv6Addr{
+ Address: IPv6Address(ipv6BigIntAddr),
+ Mask: IPv6Mask(ipv6BigIntMask),
+ Port: IPPort(tcpAddr.Port),
+ }
+
+ return ipv6Addr, nil
+ }
+
+ // Parse as a naked IPv6 address. Trim square brackets if present.
+ if len(ipv6Str) > 2 && ipv6Str[0] == '[' && ipv6Str[len(ipv6Str)-1] == ']' {
+ ipv6Str = ipv6Str[1 : len(ipv6Str)-1]
+ }
+ ip := net.ParseIP(ipv6Str)
+ if ip != nil {
+ ipv6 := ip.To16()
+ if ipv6 == nil {
+ return IPv6Addr{}, fmt.Errorf("Unable to string convert %+q to a 16byte IPv6 address", ipv6Str)
+ }
+
+ ipv6BigIntAddr := new(big.Int)
+ ipv6BigIntAddr.SetBytes(ipv6)
+
+ ipv6BigIntMask := new(big.Int)
+ ipv6BigIntMask.Set(ipv6HostMask)
+
+ return IPv6Addr{
+ Address: IPv6Address(ipv6BigIntAddr),
+ Mask: IPv6Mask(ipv6BigIntMask),
+ }, nil
+ }
+
+ // Parse as an IPv6 CIDR
+ ipAddr, network, err := net.ParseCIDR(ipv6Str)
+ if err == nil {
+ ipv6 := ipAddr.To16()
+ if ipv6 == nil {
+ return IPv6Addr{}, fmt.Errorf("Unable to convert %+q to a 16byte IPv6 address", ipv6Str)
+ }
+
+ ipv6BigIntAddr := new(big.Int)
+ ipv6BigIntAddr.SetBytes(ipv6)
+
+ ipv6BigIntMask := new(big.Int)
+ ipv6BigIntMask.SetBytes(network.Mask)
+
+ ipv6Addr := IPv6Addr{
+ Address: IPv6Address(ipv6BigIntAddr),
+ Mask: IPv6Mask(ipv6BigIntMask),
+ }
+ return ipv6Addr, nil
+ }
+
+ return IPv6Addr{}, fmt.Errorf("Unable to parse %+q to an IPv6 address: %v", ipv6Str, err)
+}
+
+// AddressBinString returns a string with the IPv6Addr's Address represented
+// as a sequence of '0' and '1' characters. This method is useful for
+// debugging or by operators who want to inspect an address.
+func (ipv6 IPv6Addr) AddressBinString() string {
+ bi := big.Int(*ipv6.Address)
+ return fmt.Sprintf("%0128s", bi.Text(2))
+}
+
+// AddressHexString returns a string with the IPv6Addr address represented as
+// a sequence of hex characters. This method is useful for debugging or by
+// operators who want to inspect an address.
+func (ipv6 IPv6Addr) AddressHexString() string {
+ bi := big.Int(*ipv6.Address)
+ return fmt.Sprintf("%032s", bi.Text(16))
+}
+
+// CmpAddress follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its address is lower than arg
+// - 0 if the SockAddr arg equal to the receiving IPv6Addr or the argument is of a
+// different type.
+// - 1 If the argument should sort first.
+func (ipv6 IPv6Addr) CmpAddress(sa SockAddr) int {
+ ipv6b, ok := sa.(IPv6Addr)
+ if !ok {
+ return sortDeferDecision
+ }
+
+ ipv6aBigInt := new(big.Int)
+ ipv6aBigInt.Set(ipv6.Address)
+ ipv6bBigInt := new(big.Int)
+ ipv6bBigInt.Set(ipv6b.Address)
+
+ return ipv6aBigInt.Cmp(ipv6bBigInt)
+}
+
+// CmpPort follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its port is lower than arg
+// - 0 if the SockAddr arg's port number is equal to the receiving IPv6Addr,
+// regardless of type.
+// - 1 If the argument should sort first.
+func (ipv6 IPv6Addr) CmpPort(sa SockAddr) int {
+ var saPort IPPort
+ switch v := sa.(type) {
+ case IPv4Addr:
+ saPort = v.Port
+ case IPv6Addr:
+ saPort = v.Port
+ default:
+ return sortDeferDecision
+ }
+
+ switch {
+ case ipv6.Port == saPort:
+ return sortDeferDecision
+ case ipv6.Port < saPort:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// CmpRFC follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because it belongs to the RFC and its
+// arg does not
+// - 0 if the receiver and arg both belong to the same RFC or neither do.
+// - 1 If the arg belongs to the RFC but receiver does not.
+func (ipv6 IPv6Addr) CmpRFC(rfcNum uint, sa SockAddr) int {
+ recvInRFC := IsRFC(rfcNum, ipv6)
+ ipv6b, ok := sa.(IPv6Addr)
+ if !ok {
+ // If the receiver is part of the desired RFC and the SockAddr
+ // argument is not, sort receiver before the non-IPv6 SockAddr.
+ // Conversely, if the receiver is not part of the RFC, punt on
+ // sorting and leave it for the next sorter.
+ if recvInRFC {
+ return sortReceiverBeforeArg
+ } else {
+ return sortDeferDecision
+ }
+ }
+
+ argInRFC := IsRFC(rfcNum, ipv6b)
+ switch {
+ case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC):
+ // If a and b both belong to the RFC, or neither belong to
+ // rfcNum, defer sorting to the next sorter.
+ return sortDeferDecision
+ case recvInRFC && !argInRFC:
+ return sortReceiverBeforeArg
+ default:
+ return sortArgBeforeReceiver
+ }
+}
+
+// Contains returns true if the SockAddr is contained within the receiver.
+func (ipv6 IPv6Addr) Contains(sa SockAddr) bool {
+ ipv6b, ok := sa.(IPv6Addr)
+ if !ok {
+ return false
+ }
+
+ return ipv6.ContainsNetwork(ipv6b)
+}
+
+// ContainsAddress returns true if the IPv6Address is contained within the
+// receiver.
+func (ipv6 IPv6Addr) ContainsAddress(x IPv6Address) bool {
+ xAddr := IPv6Addr{
+ Address: x,
+ Mask: ipv6HostMask,
+ }
+
+ {
+ xIPv6 := xAddr.FirstUsable().(IPv6Addr)
+ yIPv6 := ipv6.FirstUsable().(IPv6Addr)
+ if xIPv6.CmpAddress(yIPv6) >= 1 {
+ return false
+ }
+ }
+
+ {
+ xIPv6 := xAddr.LastUsable().(IPv6Addr)
+ yIPv6 := ipv6.LastUsable().(IPv6Addr)
+ if xIPv6.CmpAddress(yIPv6) <= -1 {
+ return false
+ }
+ }
+ return true
+}
+
+// ContainsNetwork returns true if the network from IPv6Addr is contained within
+// the receiver.
+func (x IPv6Addr) ContainsNetwork(y IPv6Addr) bool {
+ {
+ xIPv6 := x.FirstUsable().(IPv6Addr)
+ yIPv6 := y.FirstUsable().(IPv6Addr)
+ if ret := xIPv6.CmpAddress(yIPv6); ret >= 1 {
+ return false
+ }
+ }
+
+ {
+ xIPv6 := x.LastUsable().(IPv6Addr)
+ yIPv6 := y.LastUsable().(IPv6Addr)
+ if ret := xIPv6.CmpAddress(yIPv6); ret <= -1 {
+ return false
+ }
+ }
+ return true
+}
+
+// DialPacketArgs returns the arguments required to be passed to
+// net.DialUDP(). If the Mask of ipv6 is not a /128 or the Port is 0,
+// DialPacketArgs() will fail. See Host() to create an IPv6Addr with its
+// mask set to /128.
+func (ipv6 IPv6Addr) DialPacketArgs() (network, dialArgs string) {
+ ipv6Mask := big.Int(*ipv6.Mask)
+ if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 {
+ return "udp6", ""
+ }
+ return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+}
+
+// DialStreamArgs returns the arguments required to be passed to
+// net.DialTCP(). If the Mask of ipv6 is not a /128 or the Port is 0,
+// DialStreamArgs() will fail. See Host() to create an IPv6Addr with its
+// mask set to /128.
+func (ipv6 IPv6Addr) DialStreamArgs() (network, dialArgs string) {
+ ipv6Mask := big.Int(*ipv6.Mask)
+ if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 {
+ return "tcp6", ""
+ }
+ return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+}
+
+// Equal returns true if a SockAddr is equal to the receiving IPv4Addr.
+func (ipv6a IPv6Addr) Equal(sa SockAddr) bool {
+ ipv6b, ok := sa.(IPv6Addr)
+ if !ok {
+ return false
+ }
+
+ if ipv6a.NetIP().String() != ipv6b.NetIP().String() {
+ return false
+ }
+
+ if ipv6a.NetIPNet().String() != ipv6b.NetIPNet().String() {
+ return false
+ }
+
+ if ipv6a.Port != ipv6b.Port {
+ return false
+ }
+
+ return true
+}
+
+// FirstUsable returns an IPv6Addr set to the first address following the
+// network prefix. The first usable address in a network is normally the
+// gateway and should not be used except by devices forwarding packets
+// between two administratively distinct networks (i.e. a router). This
+// function does not discriminate against first usable vs "first address that
+// should be used." For example, FirstUsable() on "2001:0db8::0003/64" would
+// return "2001:0db8::00011".
+func (ipv6 IPv6Addr) FirstUsable() IPAddr {
+ return IPv6Addr{
+ Address: IPv6Address(ipv6.NetworkAddress()),
+ Mask: ipv6HostMask,
+ }
+}
+
+// Host returns a copy of ipv6 with its mask set to /128 so that it can be
+// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or
+// ListenStreamArgs().
+func (ipv6 IPv6Addr) Host() IPAddr {
+ // Nothing should listen on a broadcast address.
+ return IPv6Addr{
+ Address: ipv6.Address,
+ Mask: ipv6HostMask,
+ Port: ipv6.Port,
+ }
+}
+
+// IPPort returns the Port number attached to the IPv6Addr
+func (ipv6 IPv6Addr) IPPort() IPPort {
+ return ipv6.Port
+}
+
+// LastUsable returns the last address in a given network.
+func (ipv6 IPv6Addr) LastUsable() IPAddr {
+ addr := new(big.Int)
+ addr.Set(ipv6.Address)
+
+ mask := new(big.Int)
+ mask.Set(ipv6.Mask)
+
+ negMask := new(big.Int)
+ negMask.Xor(ipv6HostMask, mask)
+
+ lastAddr := new(big.Int)
+ lastAddr.And(addr, mask)
+ lastAddr.Or(lastAddr, negMask)
+
+ return IPv6Addr{
+ Address: IPv6Address(lastAddr),
+ Mask: ipv6HostMask,
+ }
+}
+
+// ListenPacketArgs returns the arguments required to be passed to
+// net.ListenUDP(). If the Mask of ipv6 is not a /128, ListenPacketArgs()
+// will fail. See Host() to create an IPv6Addr with its mask set to /128.
+func (ipv6 IPv6Addr) ListenPacketArgs() (network, listenArgs string) {
+ ipv6Mask := big.Int(*ipv6.Mask)
+ if ipv6Mask.Cmp(ipv6HostMask) != 0 {
+ return "udp6", ""
+ }
+ return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+}
+
+// ListenStreamArgs returns the arguments required to be passed to
+// net.ListenTCP(). If the Mask of ipv6 is not a /128, ListenStreamArgs()
+// will fail. See Host() to create an IPv6Addr with its mask set to /128.
+func (ipv6 IPv6Addr) ListenStreamArgs() (network, listenArgs string) {
+ ipv6Mask := big.Int(*ipv6.Mask)
+ if ipv6Mask.Cmp(ipv6HostMask) != 0 {
+ return "tcp6", ""
+ }
+ return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+}
+
+// Maskbits returns the number of network mask bits in a given IPv6Addr. For
+// example, the Maskbits() of "2001:0db8::0003/64" would return 64.
+func (ipv6 IPv6Addr) Maskbits() int {
+ maskOnes, _ := ipv6.NetIPNet().Mask.Size()
+
+ return maskOnes
+}
+
+// MustIPv6Addr is a helper method that must return an IPv6Addr or panic on
+// invalid input.
+func MustIPv6Addr(addr string) IPv6Addr {
+ ipv6, err := NewIPv6Addr(addr)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create an IPv6Addr from %+q: %v", addr, err))
+ }
+ return ipv6
+}
+
+// NetIP returns the address as a net.IP.
+func (ipv6 IPv6Addr) NetIP() *net.IP {
+ return bigIntToNetIPv6(ipv6.Address)
+}
+
+// NetIPMask create a new net.IPMask from the IPv6Addr.
+func (ipv6 IPv6Addr) NetIPMask() *net.IPMask {
+ ipv6Mask := make(net.IPMask, IPv6len)
+ m := big.Int(*ipv6.Mask)
+ copy(ipv6Mask, m.Bytes())
+ return &ipv6Mask
+}
+
+// Network returns a pointer to the net.IPNet within IPv4Addr receiver.
+func (ipv6 IPv6Addr) NetIPNet() *net.IPNet {
+ ipv6net := &net.IPNet{}
+ ipv6net.IP = make(net.IP, IPv6len)
+ copy(ipv6net.IP, *ipv6.NetIP())
+ ipv6net.Mask = *ipv6.NetIPMask()
+ return ipv6net
+}
+
+// Network returns the network prefix or network address for a given network.
+func (ipv6 IPv6Addr) Network() IPAddr {
+ return IPv6Addr{
+ Address: IPv6Address(ipv6.NetworkAddress()),
+ Mask: ipv6.Mask,
+ }
+}
+
+// NetworkAddress returns an IPv6Network of the IPv6Addr's network address.
+func (ipv6 IPv6Addr) NetworkAddress() IPv6Network {
+ addr := new(big.Int)
+ addr.SetBytes((*ipv6.Address).Bytes())
+
+ mask := new(big.Int)
+ mask.SetBytes(*ipv6.NetIPMask())
+
+ netAddr := new(big.Int)
+ netAddr.And(addr, mask)
+
+ return IPv6Network(netAddr)
+}
+
+// Octets returns a slice of the 16 octets in an IPv6Addr's Address. The
+// order of the bytes is big endian.
+func (ipv6 IPv6Addr) Octets() []int {
+ x := make([]int, IPv6len)
+ for i, b := range *bigIntToNetIPv6(ipv6.Address) {
+ x[i] = int(b)
+ }
+
+ return x
+}
+
+// String returns a string representation of the IPv6Addr
+func (ipv6 IPv6Addr) String() string {
+ if ipv6.Port != 0 {
+ return fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port)
+ }
+
+ if ipv6.Maskbits() == 128 {
+ return ipv6.NetIP().String()
+ }
+
+ return fmt.Sprintf("%s/%d", ipv6.NetIP().String(), ipv6.Maskbits())
+}
+
+// Type is used as a type switch and returns TypeIPv6
+func (IPv6Addr) Type() SockAddrType {
+ return TypeIPv6
+}
+
+// IPv6Attrs returns a list of attributes supported by the IPv6Addr type
+func IPv6Attrs() []AttrName {
+ return ipv6AddrAttrs
+}
+
+// IPv6AddrAttr returns a string representation of an attribute for the given
+// IPv6Addr.
+func IPv6AddrAttr(ipv6 IPv6Addr, selector AttrName) string {
+ fn, found := ipv6AddrAttrMap[selector]
+ if !found {
+ return ""
+ }
+
+ return fn(ipv6)
+}
+
+// ipv6AddrInit is called once at init()
+func ipv6AddrInit() {
+ // Sorted for human readability
+ ipv6AddrAttrs = []AttrName{
+ "size", // Same position as in IPv6 for output consistency
+ "uint128",
+ }
+
+ ipv6AddrAttrMap = map[AttrName]func(ipv6 IPv6Addr) string{
+ "size": func(ipv6 IPv6Addr) string {
+ netSize := big.NewInt(1)
+ netSize = netSize.Lsh(netSize, uint(IPv6len*8-ipv6.Maskbits()))
+ return netSize.Text(10)
+ },
+ "uint128": func(ipv6 IPv6Addr) string {
+ b := big.Int(*ipv6.Address)
+ return b.Text(10)
+ },
+ }
+}
+
+// bigIntToNetIPv6 is a helper function that correctly returns a net.IP with the
+// correctly padded values.
+func bigIntToNetIPv6(bi *big.Int) *net.IP {
+ x := make(net.IP, IPv6len)
+ ipv6Bytes := bi.Bytes()
+
+ // It's possibe for ipv6Bytes to be less than IPv6len bytes in size. If
+ // they are different sizes we to pad the size of response.
+ if len(ipv6Bytes) < IPv6len {
+ buf := new(bytes.Buffer)
+ buf.Grow(IPv6len)
+
+ for i := len(ipv6Bytes); i < IPv6len; i++ {
+ if err := binary.Write(buf, binary.BigEndian, byte(0)); err != nil {
+ panic(fmt.Sprintf("Unable to pad byte %d of input %v: %v", i, bi, err))
+ }
+ }
+
+ for _, b := range ipv6Bytes {
+ if err := binary.Write(buf, binary.BigEndian, b); err != nil {
+ panic(fmt.Sprintf("Unable to preserve endianness of input %v: %v", bi, err))
+ }
+ }
+
+ ipv6Bytes = buf.Bytes()
+ }
+ i := copy(x, ipv6Bytes)
+ if i != IPv6len {
+ panic("IPv6 wrong size")
+ }
+ return &x
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/rfc.go b/vendor/github.com/hashicorp/go-sockaddr/rfc.go
new file mode 100644
index 0000000..02e188f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/rfc.go
@@ -0,0 +1,948 @@
+package sockaddr
+
+// ForwardingBlacklist is a faux RFC that includes a list of non-forwardable IP
+// blocks.
+const ForwardingBlacklist = 4294967295
+const ForwardingBlacklistRFC = "4294967295"
+
+// IsRFC tests to see if an SockAddr matches the specified RFC
+func IsRFC(rfcNum uint, sa SockAddr) bool {
+ rfcNetMap := KnownRFCs()
+ rfcNets, ok := rfcNetMap[rfcNum]
+ if !ok {
+ return false
+ }
+
+ var contained bool
+ for _, rfcNet := range rfcNets {
+ if rfcNet.Contains(sa) {
+ contained = true
+ break
+ }
+ }
+ return contained
+}
+
+// KnownRFCs returns an initial set of known RFCs.
+//
+// NOTE (sean@): As this list evolves over time, please submit patches to keep
+// this list current. If something isn't right, inquire, as it may just be a
+// bug on my part. Some of the inclusions were based on my judgement as to what
+// would be a useful value (e.g. RFC3330).
+//
+// Useful resources:
+//
+// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml
+// * https://www.iana.org/assignments/ipv6-unicast-address-assignments/ipv6-unicast-address-assignments.xhtml
+// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml
+func KnownRFCs() map[uint]SockAddrs {
+ // NOTE(sean@): Multiple SockAddrs per RFC lend themselves well to a
+ // RADIX tree, but `ENOTIME`. Patches welcome.
+ return map[uint]SockAddrs{
+ 919: {
+ // [RFC919] Broadcasting Internet Datagrams
+ MustIPv4Addr("255.255.255.255/32"), // [RFC1122], §7 Broadcast IP Addressing - Proposed Standards
+ },
+ 1122: {
+ // [RFC1122] Requirements for Internet Hosts -- Communication Layers
+ MustIPv4Addr("0.0.0.0/8"), // [RFC1122], §3.2.1.3
+ MustIPv4Addr("127.0.0.0/8"), // [RFC1122], §3.2.1.3
+ },
+ 1112: {
+ // [RFC1112] Host Extensions for IP Multicasting
+ MustIPv4Addr("224.0.0.0/4"), // [RFC1112], §4 Host Group Addresses
+ },
+ 1918: {
+ // [RFC1918] Address Allocation for Private Internets
+ MustIPv4Addr("10.0.0.0/8"),
+ MustIPv4Addr("172.16.0.0/12"),
+ MustIPv4Addr("192.168.0.0/16"),
+ },
+ 2544: {
+ // [RFC2544] Benchmarking Methodology for Network
+ // Interconnect Devices
+ MustIPv4Addr("198.18.0.0/15"),
+ },
+ 2765: {
+ // [RFC2765] Stateless IP/ICMP Translation Algorithm
+ // (SIIT) (obsoleted by RFCs 6145, which itself was
+ // later obsoleted by 7915).
+
+ // [RFC2765], §2.1 Addresses
+ MustIPv6Addr("0:0:0:0:0:ffff:0:0/96"),
+ },
+ 2928: {
+ // [RFC2928] Initial IPv6 Sub-TLA ID Assignments
+ MustIPv6Addr("2001::/16"), // Superblock
+ //MustIPv6Addr("2001:0000::/23"), // IANA
+ //MustIPv6Addr("2001:0200::/23"), // APNIC
+ //MustIPv6Addr("2001:0400::/23"), // ARIN
+ //MustIPv6Addr("2001:0600::/23"), // RIPE NCC
+ //MustIPv6Addr("2001:0800::/23"), // (future assignment)
+ // ...
+ //MustIPv6Addr("2001:FE00::/23"), // (future assignment)
+ },
+ 3056: { // 6to4 address
+ // [RFC3056] Connection of IPv6 Domains via IPv4 Clouds
+
+ // [RFC3056], §2 IPv6 Prefix Allocation
+ MustIPv6Addr("2002::/16"),
+ },
+ 3068: {
+ // [RFC3068] An Anycast Prefix for 6to4 Relay Routers
+ // (obsolete by RFC7526)
+
+ // [RFC3068], § 6to4 Relay anycast address
+ MustIPv4Addr("192.88.99.0/24"),
+
+ // [RFC3068], §2.5 6to4 IPv6 relay anycast address
+ //
+ // NOTE: /120 == 128-(32-24)
+ MustIPv6Addr("2002:c058:6301::/120"),
+ },
+ 3171: {
+ // [RFC3171] IANA Guidelines for IPv4 Multicast Address Assignments
+ MustIPv4Addr("224.0.0.0/4"),
+ },
+ 3330: {
+ // [RFC3330] Special-Use IPv4 Addresses
+
+ // Addresses in this block refer to source hosts on
+ // "this" network. Address 0.0.0.0/32 may be used as a
+ // source address for this host on this network; other
+ // addresses within 0.0.0.0/8 may be used to refer to
+ // specified hosts on this network [RFC1700, page 4].
+ MustIPv4Addr("0.0.0.0/8"),
+
+ // 10.0.0.0/8 - This block is set aside for use in
+ // private networks. Its intended use is documented in
+ // [RFC1918]. Addresses within this block should not
+ // appear on the public Internet.
+ MustIPv4Addr("10.0.0.0/8"),
+
+ // 14.0.0.0/8 - This block is set aside for assignments
+ // to the international system of Public Data Networks
+ // [RFC1700, page 181]. The registry of assignments
+ // within this block can be accessed from the "Public
+ // Data Network Numbers" link on the web page at
+ // http://www.iana.org/numbers.html. Addresses within
+ // this block are assigned to users and should be
+ // treated as such.
+
+ // 24.0.0.0/8 - This block was allocated in early 1996
+ // for use in provisioning IP service over cable
+ // television systems. Although the IANA initially was
+ // involved in making assignments to cable operators,
+ // this responsibility was transferred to American
+ // Registry for Internet Numbers (ARIN) in May 2001.
+ // Addresses within this block are assigned in the
+ // normal manner and should be treated as such.
+
+ // 39.0.0.0/8 - This block was used in the "Class A
+ // Subnet Experiment" that commenced in May 1995, as
+ // documented in [RFC1797]. The experiment has been
+ // completed and this block has been returned to the
+ // pool of addresses reserved for future allocation or
+ // assignment. This block therefore no longer has a
+ // special use and is subject to allocation to a
+ // Regional Internet Registry for assignment in the
+ // normal manner.
+
+ // 127.0.0.0/8 - This block is assigned for use as the Internet host
+ // loopback address. A datagram sent by a higher level protocol to an
+ // address anywhere within this block should loop back inside the host.
+ // This is ordinarily implemented using only 127.0.0.1/32 for loopback,
+ // but no addresses within this block should ever appear on any network
+ // anywhere [RFC1700, page 5].
+ MustIPv4Addr("127.0.0.0/8"),
+
+ // 128.0.0.0/16 - This block, corresponding to the
+ // numerically lowest of the former Class B addresses,
+ // was initially and is still reserved by the IANA.
+ // Given the present classless nature of the IP address
+ // space, the basis for the reservation no longer
+ // applies and addresses in this block are subject to
+ // future allocation to a Regional Internet Registry for
+ // assignment in the normal manner.
+
+ // 169.254.0.0/16 - This is the "link local" block. It
+ // is allocated for communication between hosts on a
+ // single link. Hosts obtain these addresses by
+ // auto-configuration, such as when a DHCP server may
+ // not be found.
+ MustIPv4Addr("169.254.0.0/16"),
+
+ // 172.16.0.0/12 - This block is set aside for use in
+ // private networks. Its intended use is documented in
+ // [RFC1918]. Addresses within this block should not
+ // appear on the public Internet.
+ MustIPv4Addr("172.16.0.0/12"),
+
+ // 191.255.0.0/16 - This block, corresponding to the numerically highest
+ // to the former Class B addresses, was initially and is still reserved
+ // by the IANA. Given the present classless nature of the IP address
+ // space, the basis for the reservation no longer applies and addresses
+ // in this block are subject to future allocation to a Regional Internet
+ // Registry for assignment in the normal manner.
+
+ // 192.0.0.0/24 - This block, corresponding to the
+ // numerically lowest of the former Class C addresses,
+ // was initially and is still reserved by the IANA.
+ // Given the present classless nature of the IP address
+ // space, the basis for the reservation no longer
+ // applies and addresses in this block are subject to
+ // future allocation to a Regional Internet Registry for
+ // assignment in the normal manner.
+
+ // 192.0.2.0/24 - This block is assigned as "TEST-NET" for use in
+ // documentation and example code. It is often used in conjunction with
+ // domain names example.com or example.net in vendor and protocol
+ // documentation. Addresses within this block should not appear on the
+ // public Internet.
+ MustIPv4Addr("192.0.2.0/24"),
+
+ // 192.88.99.0/24 - This block is allocated for use as 6to4 relay
+ // anycast addresses, according to [RFC3068].
+ MustIPv4Addr("192.88.99.0/24"),
+
+ // 192.168.0.0/16 - This block is set aside for use in private networks.
+ // Its intended use is documented in [RFC1918]. Addresses within this
+ // block should not appear on the public Internet.
+ MustIPv4Addr("192.168.0.0/16"),
+
+ // 198.18.0.0/15 - This block has been allocated for use
+ // in benchmark tests of network interconnect devices.
+ // Its use is documented in [RFC2544].
+ MustIPv4Addr("198.18.0.0/15"),
+
+ // 223.255.255.0/24 - This block, corresponding to the
+ // numerically highest of the former Class C addresses,
+ // was initially and is still reserved by the IANA.
+ // Given the present classless nature of the IP address
+ // space, the basis for the reservation no longer
+ // applies and addresses in this block are subject to
+ // future allocation to a Regional Internet Registry for
+ // assignment in the normal manner.
+
+ // 224.0.0.0/4 - This block, formerly known as the Class
+ // D address space, is allocated for use in IPv4
+ // multicast address assignments. The IANA guidelines
+ // for assignments from this space are described in
+ // [RFC3171].
+ MustIPv4Addr("224.0.0.0/4"),
+
+ // 240.0.0.0/4 - This block, formerly known as the Class E address
+ // space, is reserved. The "limited broadcast" destination address
+ // 255.255.255.255 should never be forwarded outside the (sub-)net of
+ // the source. The remainder of this space is reserved
+ // for future use. [RFC1700, page 4]
+ MustIPv4Addr("240.0.0.0/4"),
+ },
+ 3849: {
+ // [RFC3849] IPv6 Address Prefix Reserved for Documentation
+ MustIPv6Addr("2001:db8::/32"), // [RFC3849], §4 IANA Considerations
+ },
+ 3927: {
+ // [RFC3927] Dynamic Configuration of IPv4 Link-Local Addresses
+ MustIPv4Addr("169.254.0.0/16"), // [RFC3927], §2.1 Link-Local Address Selection
+ },
+ 4038: {
+ // [RFC4038] Application Aspects of IPv6 Transition
+
+ // [RFC4038], §4.2. IPv6 Applications in a Dual-Stack Node
+ MustIPv6Addr("0:0:0:0:0:ffff::/96"),
+ },
+ 4193: {
+ // [RFC4193] Unique Local IPv6 Unicast Addresses
+ MustIPv6Addr("fc00::/7"),
+ },
+ 4291: {
+ // [RFC4291] IP Version 6 Addressing Architecture
+
+ // [RFC4291], §2.5.2 The Unspecified Address
+ MustIPv6Addr("::/128"),
+
+ // [RFC4291], §2.5.3 The Loopback Address
+ MustIPv6Addr("::1/128"),
+
+ // [RFC4291], §2.5.5.1. IPv4-Compatible IPv6 Address
+ MustIPv6Addr("::/96"),
+
+ // [RFC4291], §2.5.5.2. IPv4-Mapped IPv6 Address
+ MustIPv6Addr("::ffff:0:0/96"),
+
+ // [RFC4291], §2.5.6 Link-Local IPv6 Unicast Addresses
+ MustIPv6Addr("fe80::/10"),
+
+ // [RFC4291], §2.5.7 Site-Local IPv6 Unicast Addresses
+ // (depreciated)
+ MustIPv6Addr("fec0::/10"),
+
+ // [RFC4291], §2.7 Multicast Addresses
+ MustIPv6Addr("ff00::/8"),
+
+ // IPv6 Multicast Information.
+ //
+ // In the following "table" below, `ff0x` is replaced
+ // with the following values depending on the scope of
+ // the query:
+ //
+ // IPv6 Multicast Scopes:
+ // * ff00/9 // reserved
+ // * ff01/9 // interface-local
+ // * ff02/9 // link-local
+ // * ff03/9 // realm-local
+ // * ff04/9 // admin-local
+ // * ff05/9 // site-local
+ // * ff08/9 // organization-local
+ // * ff0e/9 // global
+ // * ff0f/9 // reserved
+ //
+ // IPv6 Multicast Addresses:
+ // * ff0x::2 // All routers
+ // * ff02::5 // OSPFIGP
+ // * ff02::6 // OSPFIGP Designated Routers
+ // * ff02::9 // RIP Routers
+ // * ff02::a // EIGRP Routers
+ // * ff02::d // All PIM Routers
+ // * ff02::1a // All RPL Routers
+ // * ff0x::fb // mDNSv6
+ // * ff0x::101 // All Network Time Protocol (NTP) servers
+ // * ff02::1:1 // Link Name
+ // * ff02::1:2 // All-dhcp-agents
+ // * ff02::1:3 // Link-local Multicast Name Resolution
+ // * ff05::1:3 // All-dhcp-servers
+ // * ff02::1:ff00:0/104 // Solicited-node multicast address.
+ // * ff02::2:ff00:0/104 // Node Information Queries
+ },
+ 4380: {
+ // [RFC4380] Teredo: Tunneling IPv6 over UDP through
+ // Network Address Translations (NATs)
+
+ // [RFC4380], §2.6 Global Teredo IPv6 Service Prefix
+ MustIPv6Addr("2001:0000::/32"),
+ },
+ 4773: {
+ // [RFC4773] Administration of the IANA Special Purpose IPv6 Address Block
+ MustIPv6Addr("2001:0000::/23"), // IANA
+ },
+ 4843: {
+ // [RFC4843] An IPv6 Prefix for Overlay Routable Cryptographic Hash Identifiers (ORCHID)
+ MustIPv6Addr("2001:10::/28"), // [RFC4843], §7 IANA Considerations
+ },
+ 5180: {
+ // [RFC5180] IPv6 Benchmarking Methodology for Network Interconnect Devices
+ MustIPv6Addr("2001:0200::/48"), // [RFC5180], §8 IANA Considerations
+ },
+ 5735: {
+ // [RFC5735] Special Use IPv4 Addresses
+ MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1
+ MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2
+ MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3
+ MustIPv4Addr("198.18.0.0/15"), // Benchmarks
+ },
+ 5737: {
+ // [RFC5737] IPv4 Address Blocks Reserved for Documentation
+ MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1
+ MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2
+ MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3
+ },
+ 6052: {
+ // [RFC6052] IPv6 Addressing of IPv4/IPv6 Translators
+ MustIPv6Addr("64:ff9b::/96"), // [RFC6052], §2.1. Well-Known Prefix
+ },
+ 6333: {
+ // [RFC6333] Dual-Stack Lite Broadband Deployments Following IPv4 Exhaustion
+ MustIPv4Addr("192.0.0.0/29"), // [RFC6333], §5.7 Well-Known IPv4 Address
+ },
+ 6598: {
+ // [RFC6598] IANA-Reserved IPv4 Prefix for Shared Address Space
+ MustIPv4Addr("100.64.0.0/10"),
+ },
+ 6666: {
+ // [RFC6666] A Discard Prefix for IPv6
+ MustIPv6Addr("0100::/64"),
+ },
+ 6890: {
+ // [RFC6890] Special-Purpose IP Address Registries
+
+ // From "RFC6890 §2.2.1 Information Requirements":
+ /*
+ The IPv4 and IPv6 Special-Purpose Address Registries maintain the
+ following information regarding each entry:
+
+ o Address Block - A block of IPv4 or IPv6 addresses that has been
+ registered for a special purpose.
+
+ o Name - A descriptive name for the special-purpose address block.
+
+ o RFC - The RFC through which the special-purpose address block was
+ requested.
+
+ o Allocation Date - The date upon which the special-purpose address
+ block was allocated.
+
+ o Termination Date - The date upon which the allocation is to be
+ terminated. This field is applicable for limited-use allocations
+ only.
+
+ o Source - A boolean value indicating whether an address from the
+ allocated special-purpose address block is valid when used as the
+ source address of an IP datagram that transits two devices.
+
+ o Destination - A boolean value indicating whether an address from
+ the allocated special-purpose address block is valid when used as
+ the destination address of an IP datagram that transits two
+ devices.
+
+ o Forwardable - A boolean value indicating whether a router may
+ forward an IP datagram whose destination address is drawn from the
+ allocated special-purpose address block between external
+ interfaces.
+
+ o Global - A boolean value indicating whether an IP datagram whose
+ destination address is drawn from the allocated special-purpose
+ address block is forwardable beyond a specified administrative
+ domain.
+
+ o Reserved-by-Protocol - A boolean value indicating whether the
+ special-purpose address block is reserved by IP, itself. This
+ value is "TRUE" if the RFC that created the special-purpose
+ address block requires all compliant IP implementations to behave
+ in a special way when processing packets either to or from
+ addresses contained by the address block.
+
+ If the value of "Destination" is FALSE, the values of "Forwardable"
+ and "Global" must also be false.
+ */
+
+ /*+----------------------+----------------------------+
+ * | Attribute | Value |
+ * +----------------------+----------------------------+
+ * | Address Block | 0.0.0.0/8 |
+ * | Name | "This host on this network"|
+ * | RFC | [RFC1122], Section 3.2.1.3 |
+ * | Allocation Date | September 1981 |
+ * | Termination Date | N/A |
+ * | Source | True |
+ * | Destination | False |
+ * | Forwardable | False |
+ * | Global | False |
+ * | Reserved-by-Protocol | True |
+ * +----------------------+----------------------------+*/
+ MustIPv4Addr("0.0.0.0/8"),
+
+ /*+----------------------+---------------+
+ * | Attribute | Value |
+ * +----------------------+---------------+
+ * | Address Block | 10.0.0.0/8 |
+ * | Name | Private-Use |
+ * | RFC | [RFC1918] |
+ * | Allocation Date | February 1996 |
+ * | Termination Date | N/A |
+ * | Source | True |
+ * | Destination | True |
+ * | Forwardable | True |
+ * | Global | False |
+ * | Reserved-by-Protocol | False |
+ * +----------------------+---------------+ */
+ MustIPv4Addr("10.0.0.0/8"),
+
+ /*+----------------------+----------------------+
+ | Attribute | Value |
+ +----------------------+----------------------+
+ | Address Block | 100.64.0.0/10 |
+ | Name | Shared Address Space |
+ | RFC | [RFC6598] |
+ | Allocation Date | April 2012 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------+*/
+ MustIPv4Addr("100.64.0.0/10"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 127.0.0.0/8 |
+ | Name | Loopback |
+ | RFC | [RFC1122], Section 3.2.1.3 |
+ | Allocation Date | September 1981 |
+ | Termination Date | N/A |
+ | Source | False [1] |
+ | Destination | False [1] |
+ | Forwardable | False [1] |
+ | Global | False [1] |
+ | Reserved-by-Protocol | True |
+ +----------------------+----------------------------+*/
+ // [1] Several protocols have been granted exceptions to
+ // this rule. For examples, see [RFC4379] and
+ // [RFC5884].
+ MustIPv4Addr("127.0.0.0/8"),
+
+ /*+----------------------+----------------+
+ | Attribute | Value |
+ +----------------------+----------------+
+ | Address Block | 169.254.0.0/16 |
+ | Name | Link Local |
+ | RFC | [RFC3927] |
+ | Allocation Date | May 2005 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+----------------+*/
+ MustIPv4Addr("169.254.0.0/16"),
+
+ /*+----------------------+---------------+
+ | Attribute | Value |
+ +----------------------+---------------+
+ | Address Block | 172.16.0.0/12 |
+ | Name | Private-Use |
+ | RFC | [RFC1918] |
+ | Allocation Date | February 1996 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------+*/
+ MustIPv4Addr("172.16.0.0/12"),
+
+ /*+----------------------+---------------------------------+
+ | Attribute | Value |
+ +----------------------+---------------------------------+
+ | Address Block | 192.0.0.0/24 [2] |
+ | Name | IETF Protocol Assignments |
+ | RFC | Section 2.1 of this document |
+ | Allocation Date | January 2010 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------------------------+*/
+ // [2] Not usable unless by virtue of a more specific
+ // reservation.
+ MustIPv4Addr("192.0.0.0/24"),
+
+ /*+----------------------+--------------------------------+
+ | Attribute | Value |
+ +----------------------+--------------------------------+
+ | Address Block | 192.0.0.0/29 |
+ | Name | IPv4 Service Continuity Prefix |
+ | RFC | [RFC6333], [RFC7335] |
+ | Allocation Date | June 2011 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+--------------------------------+*/
+ MustIPv4Addr("192.0.0.0/29"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 192.0.2.0/24 |
+ | Name | Documentation (TEST-NET-1) |
+ | RFC | [RFC5737] |
+ | Allocation Date | January 2010 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------------+*/
+ MustIPv4Addr("192.0.2.0/24"),
+
+ /*+----------------------+--------------------+
+ | Attribute | Value |
+ +----------------------+--------------------+
+ | Address Block | 192.88.99.0/24 |
+ | Name | 6to4 Relay Anycast |
+ | RFC | [RFC3068] |
+ | Allocation Date | June 2001 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | True |
+ | Reserved-by-Protocol | False |
+ +----------------------+--------------------+*/
+ MustIPv4Addr("192.88.99.0/24"),
+
+ /*+----------------------+----------------+
+ | Attribute | Value |
+ +----------------------+----------------+
+ | Address Block | 192.168.0.0/16 |
+ | Name | Private-Use |
+ | RFC | [RFC1918] |
+ | Allocation Date | February 1996 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------+*/
+ MustIPv4Addr("192.168.0.0/16"),
+
+ /*+----------------------+---------------+
+ | Attribute | Value |
+ +----------------------+---------------+
+ | Address Block | 198.18.0.0/15 |
+ | Name | Benchmarking |
+ | RFC | [RFC2544] |
+ | Allocation Date | March 1999 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------+*/
+ MustIPv4Addr("198.18.0.0/15"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 198.51.100.0/24 |
+ | Name | Documentation (TEST-NET-2) |
+ | RFC | [RFC5737] |
+ | Allocation Date | January 2010 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------------+*/
+ MustIPv4Addr("198.51.100.0/24"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 203.0.113.0/24 |
+ | Name | Documentation (TEST-NET-3) |
+ | RFC | [RFC5737] |
+ | Allocation Date | January 2010 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------------+*/
+ MustIPv4Addr("203.0.113.0/24"),
+
+ /*+----------------------+----------------------+
+ | Attribute | Value |
+ +----------------------+----------------------+
+ | Address Block | 240.0.0.0/4 |
+ | Name | Reserved |
+ | RFC | [RFC1112], Section 4 |
+ | Allocation Date | August 1989 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+----------------------+*/
+ MustIPv4Addr("240.0.0.0/4"),
+
+ /*+----------------------+----------------------+
+ | Attribute | Value |
+ +----------------------+----------------------+
+ | Address Block | 255.255.255.255/32 |
+ | Name | Limited Broadcast |
+ | RFC | [RFC0919], Section 7 |
+ | Allocation Date | October 1984 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | True |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------+*/
+ MustIPv4Addr("255.255.255.255/32"),
+
+ /*+----------------------+------------------+
+ | Attribute | Value |
+ +----------------------+------------------+
+ | Address Block | ::1/128 |
+ | Name | Loopback Address |
+ | RFC | [RFC4291] |
+ | Allocation Date | February 2006 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+------------------+*/
+ MustIPv6Addr("::1/128"),
+
+ /*+----------------------+---------------------+
+ | Attribute | Value |
+ +----------------------+---------------------+
+ | Address Block | ::/128 |
+ | Name | Unspecified Address |
+ | RFC | [RFC4291] |
+ | Allocation Date | February 2006 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+---------------------+*/
+ MustIPv6Addr("::/128"),
+
+ /*+----------------------+---------------------+
+ | Attribute | Value |
+ +----------------------+---------------------+
+ | Address Block | 64:ff9b::/96 |
+ | Name | IPv4-IPv6 Translat. |
+ | RFC | [RFC6052] |
+ | Allocation Date | October 2010 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | True |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------------+*/
+ MustIPv6Addr("64:ff9b::/96"),
+
+ /*+----------------------+---------------------+
+ | Attribute | Value |
+ +----------------------+---------------------+
+ | Address Block | ::ffff:0:0/96 |
+ | Name | IPv4-mapped Address |
+ | RFC | [RFC4291] |
+ | Allocation Date | February 2006 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+---------------------+*/
+ MustIPv6Addr("::ffff:0:0/96"),
+
+ /*+----------------------+----------------------------+
+ | Attribute | Value |
+ +----------------------+----------------------------+
+ | Address Block | 100::/64 |
+ | Name | Discard-Only Address Block |
+ | RFC | [RFC6666] |
+ | Allocation Date | June 2012 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------------------+*/
+ MustIPv6Addr("100::/64"),
+
+ /*+----------------------+---------------------------+
+ | Attribute | Value |
+ +----------------------+---------------------------+
+ | Address Block | 2001::/23 |
+ | Name | IETF Protocol Assignments |
+ | RFC | [RFC2928] |
+ | Allocation Date | September 2000 |
+ | Termination Date | N/A |
+ | Source | False[1] |
+ | Destination | False[1] |
+ | Forwardable | False[1] |
+ | Global | False[1] |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------------------+*/
+ // [1] Unless allowed by a more specific allocation.
+ MustIPv6Addr("2001::/16"),
+
+ /*+----------------------+----------------+
+ | Attribute | Value |
+ +----------------------+----------------+
+ | Address Block | 2001::/32 |
+ | Name | TEREDO |
+ | RFC | [RFC4380] |
+ | Allocation Date | January 2006 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------+*/
+ // Covered by previous entry, included for completeness.
+ //
+ // MustIPv6Addr("2001::/16"),
+
+ /*+----------------------+----------------+
+ | Attribute | Value |
+ +----------------------+----------------+
+ | Address Block | 2001:2::/48 |
+ | Name | Benchmarking |
+ | RFC | [RFC5180] |
+ | Allocation Date | April 2008 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+----------------+*/
+ // Covered by previous entry, included for completeness.
+ //
+ // MustIPv6Addr("2001:2::/48"),
+
+ /*+----------------------+---------------+
+ | Attribute | Value |
+ +----------------------+---------------+
+ | Address Block | 2001:db8::/32 |
+ | Name | Documentation |
+ | RFC | [RFC3849] |
+ | Allocation Date | July 2004 |
+ | Termination Date | N/A |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------+*/
+ // Covered by previous entry, included for completeness.
+ //
+ // MustIPv6Addr("2001:db8::/32"),
+
+ /*+----------------------+--------------+
+ | Attribute | Value |
+ +----------------------+--------------+
+ | Address Block | 2001:10::/28 |
+ | Name | ORCHID |
+ | RFC | [RFC4843] |
+ | Allocation Date | March 2007 |
+ | Termination Date | March 2014 |
+ | Source | False |
+ | Destination | False |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+--------------+*/
+ // Covered by previous entry, included for completeness.
+ //
+ // MustIPv6Addr("2001:10::/28"),
+
+ /*+----------------------+---------------+
+ | Attribute | Value |
+ +----------------------+---------------+
+ | Address Block | 2002::/16 [2] |
+ | Name | 6to4 |
+ | RFC | [RFC3056] |
+ | Allocation Date | February 2001 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | N/A [2] |
+ | Reserved-by-Protocol | False |
+ +----------------------+---------------+*/
+ // [2] See [RFC3056] for details.
+ MustIPv6Addr("2002::/16"),
+
+ /*+----------------------+--------------+
+ | Attribute | Value |
+ +----------------------+--------------+
+ | Address Block | fc00::/7 |
+ | Name | Unique-Local |
+ | RFC | [RFC4193] |
+ | Allocation Date | October 2005 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | True |
+ | Global | False |
+ | Reserved-by-Protocol | False |
+ +----------------------+--------------+*/
+ MustIPv6Addr("fc00::/7"),
+
+ /*+----------------------+-----------------------+
+ | Attribute | Value |
+ +----------------------+-----------------------+
+ | Address Block | fe80::/10 |
+ | Name | Linked-Scoped Unicast |
+ | RFC | [RFC4291] |
+ | Allocation Date | February 2006 |
+ | Termination Date | N/A |
+ | Source | True |
+ | Destination | True |
+ | Forwardable | False |
+ | Global | False |
+ | Reserved-by-Protocol | True |
+ +----------------------+-----------------------+*/
+ MustIPv6Addr("fe80::/10"),
+ },
+ 7335: {
+ // [RFC7335] IPv4 Service Continuity Prefix
+ MustIPv4Addr("192.0.0.0/29"), // [RFC7335], §6 IANA Considerations
+ },
+ ForwardingBlacklist: { // Pseudo-RFC
+ // Blacklist of non-forwardable IP blocks taken from RFC6890
+ //
+ // TODO: the attributes for forwardable should be
+ // searcahble and embedded in the main list of RFCs
+ // above.
+ MustIPv4Addr("0.0.0.0/8"),
+ MustIPv4Addr("127.0.0.0/8"),
+ MustIPv4Addr("169.254.0.0/16"),
+ MustIPv4Addr("192.0.0.0/24"),
+ MustIPv4Addr("192.0.2.0/24"),
+ MustIPv4Addr("198.51.100.0/24"),
+ MustIPv4Addr("203.0.113.0/24"),
+ MustIPv4Addr("240.0.0.0/4"),
+ MustIPv4Addr("255.255.255.255/32"),
+ MustIPv6Addr("::1/128"),
+ MustIPv6Addr("::/128"),
+ MustIPv6Addr("::ffff:0:0/96"),
+
+ // There is no way of expressing a whitelist per RFC2928
+ // atm without creating a negative mask, which I don't
+ // want to do atm.
+ //MustIPv6Addr("2001::/23"),
+
+ MustIPv6Addr("2001:db8::/32"),
+ MustIPv6Addr("2001:10::/28"),
+ MustIPv6Addr("fe80::/10"),
+ },
+ }
+}
+
+// VisitAllRFCs iterates over all known RFCs and calls the visitor
+func VisitAllRFCs(fn func(rfcNum uint, sockaddrs SockAddrs)) {
+ rfcNetMap := KnownRFCs()
+
+ // Blacklist of faux-RFCs. Don't show the world that we're abusing the
+ // RFC system in this library.
+ rfcBlacklist := map[uint]struct{}{
+ ForwardingBlacklist: {},
+ }
+
+ for rfcNum, sas := range rfcNetMap {
+ if _, found := rfcBlacklist[rfcNum]; !found {
+ fn(rfcNum, sas)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info.go b/vendor/github.com/hashicorp/go-sockaddr/route_info.go
new file mode 100644
index 0000000..2a3ee1d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info.go
@@ -0,0 +1,19 @@
+package sockaddr
+
+// RouteInterface specifies an interface for obtaining memoized route table and
+// network information from a given OS.
+type RouteInterface interface {
+ // GetDefaultInterfaceName returns the name of the interface that has a
+ // default route or an error and an empty string if a problem was
+ // encountered.
+ GetDefaultInterfaceName() (string, error)
+}
+
+// VisitCommands visits each command used by the platform-specific RouteInfo
+// implementation.
+func (ri routeInfo) VisitCommands(fn func(name string, cmd []string)) {
+ for k, v := range ri.cmds {
+ cmds := append([]string(nil), v...)
+ fn(k, cmds)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go
new file mode 100644
index 0000000..705757a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go
@@ -0,0 +1,36 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package sockaddr
+
+import "os/exec"
+
+var cmds map[string][]string = map[string][]string{
+ "route": {"/sbin/route", "-n", "get", "default"},
+}
+
+type routeInfo struct {
+ cmds map[string][]string
+}
+
+// NewRouteInfo returns a BSD-specific implementation of the RouteInfo
+// interface.
+func NewRouteInfo() (routeInfo, error) {
+ return routeInfo{
+ cmds: cmds,
+ }, nil
+}
+
+// GetDefaultInterfaceName returns the interface name attached to the default
+// route on the default interface.
+func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
+ out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ var ifName string
+ if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil {
+ return "", err
+ }
+ return ifName, nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go
new file mode 100644
index 0000000..d1b009f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go
@@ -0,0 +1,10 @@
+// +build android nacl plan9
+
+package sockaddr
+
+import "errors"
+
+// getDefaultIfName is the default interface function for unsupported platforms.
+func getDefaultIfName() (string, error) {
+ return "", errors.New("No default interface found (unsupported platform)")
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go
new file mode 100644
index 0000000..c2ec91e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go
@@ -0,0 +1,40 @@
+package sockaddr
+
+import (
+ "errors"
+ "os/exec"
+)
+
+type routeInfo struct {
+ cmds map[string][]string
+}
+
+// NewRouteInfo returns a Linux-specific implementation of the RouteInfo
+// interface.
+func NewRouteInfo() (routeInfo, error) {
+ // CoreOS Container Linux moved ip to /usr/bin/ip, so look it up on
+ // $PATH and fallback to /sbin/ip on error.
+ path, _ := exec.LookPath("ip")
+ if path == "" {
+ path = "/sbin/ip"
+ }
+
+ return routeInfo{
+ cmds: map[string][]string{"ip": {path, "route"}},
+ }, nil
+}
+
+// GetDefaultInterfaceName returns the interface name attached to the default
+// route on the default interface.
+func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
+ out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ var ifName string
+ if ifName, err = parseDefaultIfNameFromIPCmd(string(out)); err != nil {
+ return "", errors.New("No default interface found")
+ }
+ return ifName, nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go
new file mode 100644
index 0000000..ee8e798
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go
@@ -0,0 +1,37 @@
+package sockaddr
+
+import (
+ "errors"
+ "os/exec"
+)
+
+var cmds map[string][]string = map[string][]string{
+ "route": {"/usr/sbin/route", "-n", "get", "default"},
+}
+
+type routeInfo struct {
+ cmds map[string][]string
+}
+
+// NewRouteInfo returns a BSD-specific implementation of the RouteInfo
+// interface.
+func NewRouteInfo() (routeInfo, error) {
+ return routeInfo{
+ cmds: cmds,
+ }, nil
+}
+
+// GetDefaultInterfaceName returns the interface name attached to the default
+// route on the default interface.
+func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
+ out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ var ifName string
+ if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil {
+ return "", errors.New("No default interface found")
+ }
+ return ifName, nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go
new file mode 100644
index 0000000..3da9728
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go
@@ -0,0 +1,41 @@
+package sockaddr
+
+import "os/exec"
+
+var cmds map[string][]string = map[string][]string{
+ "netstat": {"netstat", "-rn"},
+ "ipconfig": {"ipconfig"},
+}
+
+type routeInfo struct {
+ cmds map[string][]string
+}
+
+// NewRouteInfo returns a BSD-specific implementation of the RouteInfo
+// interface.
+func NewRouteInfo() (routeInfo, error) {
+ return routeInfo{
+ cmds: cmds,
+ }, nil
+}
+
+// GetDefaultInterfaceName returns the interface name attached to the default
+// route on the default interface.
+func (ri routeInfo) GetDefaultInterfaceName() (string, error) {
+ ifNameOut, err := exec.Command(cmds["netstat"][0], cmds["netstat"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ ipconfigOut, err := exec.Command(cmds["ipconfig"][0], cmds["ipconfig"][1:]...).Output()
+ if err != nil {
+ return "", err
+ }
+
+ ifName, err := parseDefaultIfNameWindows(string(ifNameOut), string(ipconfigOut))
+ if err != nil {
+ return "", err
+ }
+
+ return ifName, nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go
new file mode 100644
index 0000000..826c91c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go
@@ -0,0 +1,206 @@
+package sockaddr
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type SockAddrType int
+type AttrName string
+
+const (
+ TypeUnknown SockAddrType = 0x0
+ TypeUnix = 0x1
+ TypeIPv4 = 0x2
+ TypeIPv6 = 0x4
+
+ // TypeIP is the union of TypeIPv4 and TypeIPv6
+ TypeIP = 0x6
+)
+
+type SockAddr interface {
+ // CmpRFC returns 0 if SockAddr exactly matches one of the matched RFC
+ // networks, -1 if the receiver is contained within the RFC network, or
+ // 1 if the address is not contained within the RFC.
+ CmpRFC(rfcNum uint, sa SockAddr) int
+
+ // Contains returns true if the SockAddr arg is contained within the
+ // receiver
+ Contains(SockAddr) bool
+
+ // Equal allows for the comparison of two SockAddrs
+ Equal(SockAddr) bool
+
+ DialPacketArgs() (string, string)
+ DialStreamArgs() (string, string)
+ ListenPacketArgs() (string, string)
+ ListenStreamArgs() (string, string)
+
+ // String returns the string representation of SockAddr
+ String() string
+
+ // Type returns the SockAddrType
+ Type() SockAddrType
+}
+
+// sockAddrAttrMap is a map of the SockAddr type-specific attributes.
+var sockAddrAttrMap map[AttrName]func(SockAddr) string
+var sockAddrAttrs []AttrName
+
+func init() {
+ sockAddrInit()
+}
+
+// New creates a new SockAddr from the string. The order in which New()
+// attempts to construct a SockAddr is: IPv4Addr, IPv6Addr, SockAddrUnix.
+//
+// NOTE: New() relies on the heuristic wherein if the path begins with either a
+// '.' or '/' character before creating a new UnixSock. For UNIX sockets that
+// are absolute paths or are nested within a sub-directory, this works as
+// expected, however if the UNIX socket is contained in the current working
+// directory, this will fail unless the path begins with "./"
+// (e.g. "./my-local-socket"). Calls directly to NewUnixSock() do not suffer
+// this limitation. Invalid IP addresses such as "256.0.0.0/-1" will run afoul
+// of this heuristic and be assumed to be a valid UNIX socket path (which they
+// are, but it is probably not what you want and you won't realize it until you
+// stat(2) the file system to discover it doesn't exist).
+func NewSockAddr(s string) (SockAddr, error) {
+ ipv4Addr, err := NewIPv4Addr(s)
+ if err == nil {
+ return ipv4Addr, nil
+ }
+
+ ipv6Addr, err := NewIPv6Addr(s)
+ if err == nil {
+ return ipv6Addr, nil
+ }
+
+ // Check to make sure the string begins with either a '.' or '/', or
+ // contains a '/'.
+ if len(s) > 1 && (strings.IndexAny(s[0:1], "./") != -1 || strings.IndexByte(s, '/') != -1) {
+ unixSock, err := NewUnixSock(s)
+ if err == nil {
+ return unixSock, nil
+ }
+ }
+
+ return nil, fmt.Errorf("Unable to convert %q to an IPv4 or IPv6 address, or a UNIX Socket", s)
+}
+
+// ToIPAddr returns an IPAddr type or nil if the type conversion fails.
+func ToIPAddr(sa SockAddr) *IPAddr {
+ ipa, ok := sa.(IPAddr)
+ if !ok {
+ return nil
+ }
+ return &ipa
+}
+
+// ToIPv4Addr returns an IPv4Addr type or nil if the type conversion fails.
+func ToIPv4Addr(sa SockAddr) *IPv4Addr {
+ switch v := sa.(type) {
+ case IPv4Addr:
+ return &v
+ default:
+ return nil
+ }
+}
+
+// ToIPv6Addr returns an IPv6Addr type or nil if the type conversion fails.
+func ToIPv6Addr(sa SockAddr) *IPv6Addr {
+ switch v := sa.(type) {
+ case IPv6Addr:
+ return &v
+ default:
+ return nil
+ }
+}
+
+// ToUnixSock returns a UnixSock type or nil if the type conversion fails.
+func ToUnixSock(sa SockAddr) *UnixSock {
+ switch v := sa.(type) {
+ case UnixSock:
+ return &v
+ default:
+ return nil
+ }
+}
+
+// SockAddrAttr returns a string representation of an attribute for the given
+// SockAddr.
+func SockAddrAttr(sa SockAddr, selector AttrName) string {
+ fn, found := sockAddrAttrMap[selector]
+ if !found {
+ return ""
+ }
+
+ return fn(sa)
+}
+
+// String() for SockAddrType returns a string representation of the
+// SockAddrType (e.g. "IPv4", "IPv6", "UNIX", "IP", or "unknown").
+func (sat SockAddrType) String() string {
+ switch sat {
+ case TypeIPv4:
+ return "IPv4"
+ case TypeIPv6:
+ return "IPv6"
+ // There is no concrete "IP" type. Leaving here as a reminder.
+ // case TypeIP:
+ // return "IP"
+ case TypeUnix:
+ return "UNIX"
+ default:
+ panic("unsupported type")
+ }
+}
+
+// sockAddrInit is called once at init()
+func sockAddrInit() {
+ sockAddrAttrs = []AttrName{
+ "type", // type should be first
+ "string",
+ }
+
+ sockAddrAttrMap = map[AttrName]func(sa SockAddr) string{
+ "string": func(sa SockAddr) string {
+ return sa.String()
+ },
+ "type": func(sa SockAddr) string {
+ return sa.Type().String()
+ },
+ }
+}
+
+// UnixSockAttrs returns a list of attributes supported by the UnixSock type
+func SockAddrAttrs() []AttrName {
+ return sockAddrAttrs
+}
+
+// Although this is pretty trivial to do in a program, having the logic here is
+// useful all around. Note that this marshals into a *string* -- the underlying
+// string representation of the sockaddr. If you then unmarshal into this type
+// in Go, all will work as expected, but externally you can take what comes out
+// and use the string value directly.
+type SockAddrMarshaler struct {
+ SockAddr
+}
+
+func (s *SockAddrMarshaler) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.SockAddr.String())
+}
+
+func (s *SockAddrMarshaler) UnmarshalJSON(in []byte) error {
+ var str string
+ err := json.Unmarshal(in, &str)
+ if err != nil {
+ return err
+ }
+ sa, err := NewSockAddr(str)
+ if err != nil {
+ return err
+ }
+ s.SockAddr = sa
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go
new file mode 100644
index 0000000..75fbffb
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go
@@ -0,0 +1,193 @@
+package sockaddr
+
+import (
+ "bytes"
+ "sort"
+)
+
+// SockAddrs is a slice of SockAddrs
+type SockAddrs []SockAddr
+
+func (s SockAddrs) Len() int { return len(s) }
+func (s SockAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// CmpAddrFunc is the function signature that must be met to be used in the
+// OrderedAddrBy multiAddrSorter
+type CmpAddrFunc func(p1, p2 *SockAddr) int
+
+// multiAddrSorter implements the Sort interface, sorting the SockAddrs within.
+type multiAddrSorter struct {
+ addrs SockAddrs
+ cmp []CmpAddrFunc
+}
+
+// Sort sorts the argument slice according to the Cmp functions passed to
+// OrderedAddrBy.
+func (ms *multiAddrSorter) Sort(sockAddrs SockAddrs) {
+ ms.addrs = sockAddrs
+ sort.Sort(ms)
+}
+
+// OrderedAddrBy sorts SockAddr by the list of sort function pointers.
+func OrderedAddrBy(cmpFuncs ...CmpAddrFunc) *multiAddrSorter {
+ return &multiAddrSorter{
+ cmp: cmpFuncs,
+ }
+}
+
+// Len is part of sort.Interface.
+func (ms *multiAddrSorter) Len() int {
+ return len(ms.addrs)
+}
+
+// Less is part of sort.Interface. It is implemented by looping along the
+// Cmp() functions until it finds a comparison that is either less than,
+// equal to, or greater than.
+func (ms *multiAddrSorter) Less(i, j int) bool {
+ p, q := &ms.addrs[i], &ms.addrs[j]
+ // Try all but the last comparison.
+ var k int
+ for k = 0; k < len(ms.cmp)-1; k++ {
+ cmp := ms.cmp[k]
+ x := cmp(p, q)
+ switch x {
+ case -1:
+ // p < q, so we have a decision.
+ return true
+ case 1:
+ // p > q, so we have a decision.
+ return false
+ }
+ // p == q; try the next comparison.
+ }
+ // All comparisons to here said "equal", so just return whatever the
+ // final comparison reports.
+ switch ms.cmp[k](p, q) {
+ case -1:
+ return true
+ case 1:
+ return false
+ default:
+ // Still a tie! Now what?
+ return false
+ }
+}
+
+// Swap is part of sort.Interface.
+func (ms *multiAddrSorter) Swap(i, j int) {
+ ms.addrs[i], ms.addrs[j] = ms.addrs[j], ms.addrs[i]
+}
+
+const (
+ // NOTE (sean@): These constants are here for code readability only and
+ // are sprucing up the code for readability purposes. Some of the
+ // Cmp*() variants have confusing logic (especially when dealing with
+ // mixed-type comparisons) and this, I think, has made it easier to grok
+ // the code faster.
+ sortReceiverBeforeArg = -1
+ sortDeferDecision = 0
+ sortArgBeforeReceiver = 1
+)
+
+// AscAddress is a sorting function to sort SockAddrs by their respective
+// address type. Non-equal types are deferred in the sort.
+func AscAddress(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+
+ switch v := p1.(type) {
+ case IPv4Addr:
+ return v.CmpAddress(p2)
+ case IPv6Addr:
+ return v.CmpAddress(p2)
+ case UnixSock:
+ return v.CmpAddress(p2)
+ default:
+ return sortDeferDecision
+ }
+}
+
+// AscPort is a sorting function to sort SockAddrs by their respective address
+// type. Non-equal types are deferred in the sort.
+func AscPort(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+
+ switch v := p1.(type) {
+ case IPv4Addr:
+ return v.CmpPort(p2)
+ case IPv6Addr:
+ return v.CmpPort(p2)
+ default:
+ return sortDeferDecision
+ }
+}
+
+// AscPrivate is a sorting function to sort "more secure" private values before
+// "more public" values. Both IPv4 and IPv6 are compared against RFC6890
+// (RFC6890 includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and
+// IPv6 includes RFC4193).
+func AscPrivate(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+
+ switch v := p1.(type) {
+ case IPv4Addr, IPv6Addr:
+ return v.CmpRFC(6890, p2)
+ default:
+ return sortDeferDecision
+ }
+}
+
+// AscNetworkSize is a sorting function to sort SockAddrs based on their network
+// size. Non-equal types are deferred in the sort.
+func AscNetworkSize(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+ p1Type := p1.Type()
+ p2Type := p2.Type()
+
+ // Network size operations on non-IP types make no sense
+ if p1Type != p2Type && p1Type != TypeIP {
+ return sortDeferDecision
+ }
+
+ ipA := p1.(IPAddr)
+ ipB := p2.(IPAddr)
+
+ return bytes.Compare([]byte(*ipA.NetIPMask()), []byte(*ipB.NetIPMask()))
+}
+
+// AscType is a sorting function to sort "more secure" types before
+// "less-secure" types.
+func AscType(p1Ptr, p2Ptr *SockAddr) int {
+ p1 := *p1Ptr
+ p2 := *p2Ptr
+ p1Type := p1.Type()
+ p2Type := p2.Type()
+ switch {
+ case p1Type < p2Type:
+ return sortReceiverBeforeArg
+ case p1Type == p2Type:
+ return sortDeferDecision
+ case p1Type > p2Type:
+ return sortArgBeforeReceiver
+ default:
+ return sortDeferDecision
+ }
+}
+
+// FilterByType returns two lists: a list of matched and unmatched SockAddrs
+func (sas SockAddrs) FilterByType(type_ SockAddrType) (matched, excluded SockAddrs) {
+ matched = make(SockAddrs, 0, len(sas))
+ excluded = make(SockAddrs, 0, len(sas))
+
+ for _, sa := range sas {
+ if sa.Type()&type_ != 0 {
+ matched = append(matched, sa)
+ } else {
+ excluded = append(excluded, sa)
+ }
+ }
+ return matched, excluded
+}
diff --git a/vendor/github.com/hashicorp/go-sockaddr/unixsock.go b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go
new file mode 100644
index 0000000..f3be3f6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go
@@ -0,0 +1,135 @@
+package sockaddr
+
+import (
+ "fmt"
+ "strings"
+)
+
+type UnixSock struct {
+ SockAddr
+ path string
+}
+type UnixSocks []*UnixSock
+
+// unixAttrMap is a map of the UnixSockAddr type-specific attributes.
+var unixAttrMap map[AttrName]func(UnixSock) string
+var unixAttrs []AttrName
+
+func init() {
+ unixAttrInit()
+}
+
+// NewUnixSock creates an UnixSock from a string path. String can be in the
+// form of either URI-based string (e.g. `file:///etc/passwd`), an absolute
+// path (e.g. `/etc/passwd`), or a relative path (e.g. `./foo`).
+func NewUnixSock(s string) (ret UnixSock, err error) {
+ ret.path = s
+ return ret, nil
+}
+
+// CmpAddress follows the Cmp() standard protocol and returns:
+//
+// - -1 If the receiver should sort first because its name lexically sorts before arg
+// - 0 if the SockAddr arg is not a UnixSock, or is a UnixSock with the same path.
+// - 1 If the argument should sort first.
+func (us UnixSock) CmpAddress(sa SockAddr) int {
+ usb, ok := sa.(UnixSock)
+ if !ok {
+ return sortDeferDecision
+ }
+
+ return strings.Compare(us.Path(), usb.Path())
+}
+
+// DialPacketArgs returns the arguments required to be passed to net.DialUnix()
+// with the `unixgram` network type.
+func (us UnixSock) DialPacketArgs() (network, dialArgs string) {
+ return "unixgram", us.path
+}
+
+// DialStreamArgs returns the arguments required to be passed to net.DialUnix()
+// with the `unix` network type.
+func (us UnixSock) DialStreamArgs() (network, dialArgs string) {
+ return "unix", us.path
+}
+
+// Equal returns true if a SockAddr is equal to the receiving UnixSock.
+func (us UnixSock) Equal(sa SockAddr) bool {
+ usb, ok := sa.(UnixSock)
+ if !ok {
+ return false
+ }
+
+ if us.Path() != usb.Path() {
+ return false
+ }
+
+ return true
+}
+
+// ListenPacketArgs returns the arguments required to be passed to
+// net.ListenUnixgram() with the `unixgram` network type.
+func (us UnixSock) ListenPacketArgs() (network, dialArgs string) {
+ return "unixgram", us.path
+}
+
+// ListenStreamArgs returns the arguments required to be passed to
+// net.ListenUnix() with the `unix` network type.
+func (us UnixSock) ListenStreamArgs() (network, dialArgs string) {
+ return "unix", us.path
+}
+
+// MustUnixSock is a helper method that must return an UnixSock or panic on
+// invalid input.
+func MustUnixSock(addr string) UnixSock {
+ us, err := NewUnixSock(addr)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create a UnixSock from %+q: %v", addr, err))
+ }
+ return us
+}
+
+// Path returns the given path of the UnixSock
+func (us UnixSock) Path() string {
+ return us.path
+}
+
+// String returns the path of the UnixSock
+func (us UnixSock) String() string {
+ return fmt.Sprintf("%+q", us.path)
+}
+
+// Type is used as a type switch and returns TypeUnix
+func (UnixSock) Type() SockAddrType {
+ return TypeUnix
+}
+
+// UnixSockAttrs returns a list of attributes supported by the UnixSockAddr type
+func UnixSockAttrs() []AttrName {
+ return unixAttrs
+}
+
+// UnixSockAttr returns a string representation of an attribute for the given
+// UnixSock.
+func UnixSockAttr(us UnixSock, attrName AttrName) string {
+ fn, found := unixAttrMap[attrName]
+ if !found {
+ return ""
+ }
+
+ return fn(us)
+}
+
+// unixAttrInit is called once at init()
+func unixAttrInit() {
+ // Sorted for human readability
+ unixAttrs = []AttrName{
+ "path",
+ }
+
+ unixAttrMap = map[AttrName]func(us UnixSock) string{
+ "path": func(us UnixSock) string {
+ return us.Path()
+ },
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
index 098e1bc..64c83bc 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -205,6 +205,12 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
}
}
+ // key=#comment
+ // val
+ if p.lineComment != nil {
+ o.LineComment, p.lineComment = p.lineComment, nil
+ }
+
// do a look-ahead for line comment
p.scan()
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
new file mode 100644
index 0000000..7c038d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
@@ -0,0 +1,789 @@
+package printer
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+const (
+ blank = byte(' ')
+ newline = byte('\n')
+ tab = byte('\t')
+ infinity = 1 << 30 // offset or line
+)
+
+var (
+ unindent = []byte("\uE123") // in the private use space
+)
+
+type printer struct {
+ cfg Config
+ prev token.Pos
+
+ comments []*ast.CommentGroup // may be nil, contains all comments
+ standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
+
+ enableTrace bool
+ indentTrace int
+}
+
+type ByPosition []*ast.CommentGroup
+
+func (b ByPosition) Len() int { return len(b) }
+func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
+
+// collectComments comments all standalone comments which are not lead or line
+// comment
+func (p *printer) collectComments(node ast.Node) {
+ // first collect all comments. This is already stored in
+ // ast.File.(comments)
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.File:
+ p.comments = t.Comments
+ return nn, false
+ }
+ return nn, true
+ })
+
+ standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
+ for _, c := range p.comments {
+ standaloneComments[c.Pos()] = c
+ }
+
+ // next remove all lead and line comments from the overall comment map.
+ // This will give us comments which are standalone, comments which are not
+ // assigned to any kind of node.
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.LiteralType:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ case *ast.ObjectItem:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ }
+
+ return nn, true
+ })
+
+ for _, c := range standaloneComments {
+ p.standaloneComments = append(p.standaloneComments, c)
+ }
+
+ sort.Sort(ByPosition(p.standaloneComments))
+}
+
+// output prints creates b printable HCL output and returns it.
+func (p *printer) output(n interface{}) []byte {
+ var buf bytes.Buffer
+
+ switch t := n.(type) {
+ case *ast.File:
+ // File doesn't trace so we add the tracing here
+ defer un(trace(p, "File"))
+ return p.output(t.Node)
+ case *ast.ObjectList:
+ defer un(trace(p, "ObjectList"))
+
+ var index int
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is at "infinity"
+ var nextItem token.Pos
+ if index != len(t.Items) {
+ nextItem = t.Items[index].Pos()
+ } else {
+ nextItem = token.Pos{Offset: infinity, Line: infinity}
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ // Go through all the comments in the group. The group
+ // should be printed together, not separated by double newlines.
+ printed := false
+ newlinePrinted := false
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // if we hit the end add newlines so we can print the comment
+ // we don't do this if prev is invalid which means the
+ // beginning of the file since the first comment should
+ // be at the first line.
+ if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
+ buf.Write([]byte{newline, newline})
+ newlinePrinted = true
+ }
+
+ // Write the actual comment.
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+
+ // Set printed to true to note that we printed something
+ printed = true
+ }
+ }
+
+ // If we're not at the last item, write a new line so
+ // that there is a newline separating this comment from
+ // the next object.
+ if printed && index != len(t.Items) {
+ buf.WriteByte(newline)
+ }
+ }
+
+ if index == len(t.Items) {
+ break
+ }
+
+ buf.Write(p.output(t.Items[index]))
+ if index != len(t.Items)-1 {
+ // Always write a newline to separate us from the next item
+ buf.WriteByte(newline)
+
+ // Need to determine if we're going to separate the next item
+ // with a blank line. The logic here is simple, though there
+ // are a few conditions:
+ //
+ // 1. The next object is more than one line away anyways,
+ // so we need an empty line.
+ //
+ // 2. The next object is not a "single line" object, so
+ // we need an empty line.
+ //
+ // 3. This current object is not a single line object,
+ // so we need an empty line.
+ current := t.Items[index]
+ next := t.Items[index+1]
+ if next.Pos().Line != t.Items[index].Pos().Line+1 ||
+ !p.isSingleLineObject(next) ||
+ !p.isSingleLineObject(current) {
+ buf.WriteByte(newline)
+ }
+ }
+ index++
+ }
+ case *ast.ObjectKey:
+ buf.WriteString(t.Token.Text)
+ case *ast.ObjectItem:
+ p.prev = t.Pos()
+ buf.Write(p.objectItem(t))
+ case *ast.LiteralType:
+ buf.Write(p.literalType(t))
+ case *ast.ListType:
+ buf.Write(p.list(t))
+ case *ast.ObjectType:
+ buf.Write(p.objectType(t))
+ default:
+ fmt.Printf(" unknown type: %T\n", n)
+ }
+
+ return buf.Bytes()
+}
+
+func (p *printer) literalType(lit *ast.LiteralType) []byte {
+ result := []byte(lit.Token.Text)
+ switch lit.Token.Type {
+ case token.HEREDOC:
+ // Clear the trailing newline from heredocs
+ if result[len(result)-1] == '\n' {
+ result = result[:len(result)-1]
+ }
+
+ // Poison lines 2+ so that we don't indent them
+ result = p.heredocIndent(result)
+ case token.STRING:
+ // If this is a multiline string, poison lines 2+ so we don't
+ // indent them.
+ if bytes.IndexRune(result, '\n') >= 0 {
+ result = p.heredocIndent(result)
+ }
+ }
+
+ return result
+}
+
+// objectItem returns the printable HCL form of an object item. An object type
+// starts with one/multiple keys and has a value. The value might be of any
+// type.
+func (p *printer) objectItem(o *ast.ObjectItem) []byte {
+ defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
+ var buf bytes.Buffer
+
+ if o.LeadComment != nil {
+ for _, comment := range o.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ // If key and val are on different lines, treat line comments like lead comments.
+ if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range o.Keys {
+ buf.WriteString(k.Token.Text)
+ buf.WriteByte(blank)
+
+ // reach end of key
+ if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ buf.Write(p.output(o.Val))
+
+ if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
+ buf.WriteByte(blank)
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// objectType returns the printable HCL form of an object type. An object type
+// begins with a brace and ends with a brace.
+func (p *printer) objectType(o *ast.ObjectType) []byte {
+ defer un(trace(p, "ObjectType"))
+ var buf bytes.Buffer
+ buf.WriteString("{")
+
+ var index int
+ var nextItem token.Pos
+ var commented, newlinePrinted bool
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is the closing brace
+ if index != len(o.List.Items) {
+ nextItem = o.List.Items[index].Pos()
+ } else {
+ nextItem = o.Rbrace
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ printed := false
+ var lastCommentPos token.Pos
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // If there are standalone comments and the initial newline has not
+ // been printed yet, do it now.
+ if !newlinePrinted {
+ newlinePrinted = true
+ buf.WriteByte(newline)
+ }
+
+ // add newline if it's between other printed nodes
+ if index > 0 {
+ commented = true
+ buf.WriteByte(newline)
+ }
+
+ // Store this position
+ lastCommentPos = comment.Pos()
+
+ // output the comment itself
+ buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
+
+ // Set printed to true to note that we printed something
+ printed = true
+
+ /*
+ if index != len(o.List.Items) {
+ buf.WriteByte(newline) // do not print on the end
+ }
+ */
+ }
+ }
+
+ // Stuff to do if we had comments
+ if printed {
+ // Always write a newline
+ buf.WriteByte(newline)
+
+ // If there is another item in the object and our comment
+ // didn't hug it directly, then make sure there is a blank
+ // line separating them.
+ if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
+ buf.WriteByte(newline)
+ }
+ }
+ }
+
+ if index == len(o.List.Items) {
+ p.prev = o.Rbrace
+ break
+ }
+
+ // At this point we are sure that it's not a totally empty block: print
+ // the initial newline if it hasn't been printed yet by the previous
+ // block about standalone comments.
+ if !newlinePrinted {
+ buf.WriteByte(newline)
+ newlinePrinted = true
+ }
+
+ // check if we have adjacent one liner items. If yes we'll going to align
+ // the comments.
+ var aligned []*ast.ObjectItem
+ for _, item := range o.List.Items[index:] {
+ // we don't group one line lists
+ if len(o.List.Items) == 1 {
+ break
+ }
+
+ // one means a oneliner with out any lead comment
+ // two means a oneliner with lead comment
+ // anything else might be something else
+ cur := lines(string(p.objectItem(item)))
+ if cur > 2 {
+ break
+ }
+
+ curPos := item.Pos()
+
+ nextPos := token.Pos{}
+ if index != len(o.List.Items)-1 {
+ nextPos = o.List.Items[index+1].Pos()
+ }
+
+ prevPos := token.Pos{}
+ if index != 0 {
+ prevPos = o.List.Items[index-1].Pos()
+ }
+
+ // fmt.Println("DEBUG ----------------")
+ // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+ // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+ // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+ if curPos.Line+1 == nextPos.Line {
+ aligned = append(aligned, item)
+ index++
+ continue
+ }
+
+ if curPos.Line-1 == prevPos.Line {
+ aligned = append(aligned, item)
+ index++
+
+ // finish if we have a new line or comment next. This happens
+ // if the next item is not adjacent
+ if curPos.Line+1 != nextPos.Line {
+ break
+ }
+ continue
+ }
+
+ break
+ }
+
+ // put newlines if the items are between other non aligned items.
+ // newlines are also added if there is a standalone comment already, so
+ // check it too
+ if !commented && index != len(aligned) {
+ buf.WriteByte(newline)
+ }
+
+ if len(aligned) >= 1 {
+ p.prev = aligned[len(aligned)-1].Pos()
+
+ items := p.alignedItems(aligned)
+ buf.Write(p.indent(items))
+ } else {
+ p.prev = o.List.Items[index].Pos()
+
+ buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+ index++
+ }
+
+ buf.WriteByte(newline)
+ }
+
+ buf.WriteString("}")
+ return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+ var buf bytes.Buffer
+
+ // find the longest key and value length, needed for alignment
+ var longestKeyLen int // longest key length
+ var longestValLen int // longest value length
+ for _, item := range items {
+ key := len(item.Keys[0].Token.Text)
+ val := len(p.output(item.Val))
+
+ if key > longestKeyLen {
+ longestKeyLen = key
+ }
+
+ if val > longestValLen {
+ longestValLen = val
+ }
+ }
+
+ for i, item := range items {
+ if item.LeadComment != nil {
+ for _, comment := range item.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range item.Keys {
+ keyLen := len(k.Token.Text)
+ buf.WriteString(k.Token.Text)
+ for i := 0; i < longestKeyLen-keyLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ // reach end of key
+ if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ val := p.output(item.Val)
+ valLen := len(val)
+ buf.Write(val)
+
+ if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+ for i := 0; i < longestValLen-valLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range item.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ // do not print for the last item
+ if i != len(items)-1 {
+ buf.WriteByte(newline)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+ if p.isSingleLineList(l) {
+ return p.singleLineList(l)
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString("[")
+ buf.WriteByte(newline)
+
+ var longestLine int
+ for _, item := range l.List {
+ // for now we assume that the list only contains literal types
+ if lit, ok := item.(*ast.LiteralType); ok {
+ lineLen := len(lit.Token.Text)
+ if lineLen > longestLine {
+ longestLine = lineLen
+ }
+ }
+ }
+
+ haveEmptyLine := false
+ for i, item := range l.List {
+ // If we have a lead comment, then we want to write that first
+ leadComment := false
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
+ leadComment = true
+
+ // Ensure an empty line before every element with a
+ // lead comment (except the first item in a list).
+ if !haveEmptyLine && i != 0 {
+ buf.WriteByte(newline)
+ }
+
+ for _, comment := range lit.LeadComment.List {
+ buf.Write(p.indent([]byte(comment.Text)))
+ buf.WriteByte(newline)
+ }
+ }
+
+ // also indent each line
+ val := p.output(item)
+ curLen := len(val)
+ buf.Write(p.indent(val))
+
+ // if this item is a heredoc, then we output the comma on
+ // the next line. This is the only case this happens.
+ comma := []byte{','}
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ comma = p.indent(comma)
+ }
+
+ buf.Write(comma)
+
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+ // if the next item doesn't have any comments, do not align
+ buf.WriteByte(blank) // align one space
+ for i := 0; i < longestLine-curLen; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range lit.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ buf.WriteByte(newline)
+
+ // Ensure an empty line after every element with a
+ // lead comment (except the first item in a list).
+ haveEmptyLine = leadComment && i != len(l.List)-1
+ if haveEmptyLine {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// isSingleLineList returns true if:
+// * they were previously formatted entirely on one line
+// * they consist entirely of literals
+// * there are either no heredoc strings or the list has exactly one element
+// * there are no line comments
+func (printer) isSingleLineList(l *ast.ListType) bool {
+ for _, item := range l.List {
+ if item.Pos().Line != l.Lbrack.Line {
+ return false
+ }
+
+ lit, ok := item.(*ast.LiteralType)
+ if !ok {
+ return false
+ }
+
+ if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
+ return false
+ }
+
+ if lit.LineComment != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// singleLineList prints a simple single line list.
+// For a definition of "simple", see isSingleLineList above.
+func (p *printer) singleLineList(l *ast.ListType) []byte {
+ buf := &bytes.Buffer{}
+
+ buf.WriteString("[")
+ for i, item := range l.List {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+
+ // Output the item itself
+ buf.Write(p.output(item))
+
+ // The heredoc marker needs to be at the end of line.
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+ var prefix []byte
+ if p.cfg.SpacesWidth != 0 {
+ for i := 0; i < p.cfg.SpacesWidth; i++ {
+ prefix = append(prefix, blank)
+ }
+ } else {
+ prefix = []byte{tab}
+ }
+
+ var res []byte
+ bol := true
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+ var res []byte
+ for i := 0; i < len(buf); i++ {
+ skip := len(buf)-i <= len(unindent)
+ if !skip {
+ skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+ }
+ if skip {
+ res = append(res, buf[i])
+ continue
+ }
+
+ // We have a marker. we have to backtrace here and clean out
+ // any whitespace ahead of our tombstone up to a \n
+ for j := len(res) - 1; j >= 0; j-- {
+ if res[j] == '\n' {
+ break
+ }
+
+ res = res[:j]
+ }
+
+ // Skip the entire unindent marker
+ i += len(unindent) - 1
+ }
+
+ return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+ var res []byte
+ bol := false
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, unindent...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// isSingleLineObject tells whether the given object item is a single
+// line object such as "obj {}".
+//
+// A single line object:
+//
+// * has no lead comments (hence multi-line)
+// * has no assignment
+// * has no values in the stanza (within {})
+//
+func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
+ // If there is a lead comment, can't be one line
+ if val.LeadComment != nil {
+ return false
+ }
+
+ // If there is assignment, we always break by line
+ if val.Assign.IsValid() {
+ return false
+ }
+
+ // If it isn't an object type, then its not a single line object
+ ot, ok := val.Val.(*ast.ObjectType)
+ if !ok {
+ return false
+ }
+
+ // If the object has no items, it is single line!
+ return len(ot.List.Items) == 0
+}
+
+func lines(txt string) int {
+ endline := 1
+ for i := 0; i < len(txt); i++ {
+ if txt[i] == '\n' {
+ endline++
+ }
+ }
+ return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ i := 2 * p.indentTrace
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+ p.printTrace(msg, "(")
+ p.indentTrace++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+ p.indentTrace--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
new file mode 100644
index 0000000..6617ab8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
@@ -0,0 +1,66 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+ "bytes"
+ "io"
+ "text/tabwriter"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+)
+
+var DefaultConfig = Config{
+ SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+ SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+ p := &printer{
+ cfg: *c,
+ comments: make([]*ast.CommentGroup, 0),
+ standaloneComments: make([]*ast.CommentGroup, 0),
+ // enableTrace: true,
+ }
+
+ p.collectComments(node)
+
+ if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+ return err
+ }
+
+ // flush tabwriter, if any
+ var err error
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
+ }
+
+ return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+ return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+ node, err := parser.Parse(src)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if err := DefaultConfig.Fprint(&buf, node); err != nil {
+ return nil, err
+ }
+
+ // Add trailing newline to result
+ buf.WriteString("\n")
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
index 6601ef7..624a18f 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -74,14 +74,6 @@ func (s *Scanner) next() rune {
return eof
}
- if ch == utf8.RuneError && size == 1 {
- s.srcPos.Column++
- s.srcPos.Offset += size
- s.lastCharLen = size
- s.err("illegal UTF-8 encoding")
- return ch
- }
-
// remember last position
s.prevPos = s.srcPos
@@ -89,18 +81,27 @@ func (s *Scanner) next() rune {
s.lastCharLen = size
s.srcPos.Offset += size
+ if ch == utf8.RuneError && size == 1 {
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
if ch == '\n' {
s.srcPos.Line++
s.lastLineLen = s.srcPos.Column
s.srcPos.Column = 0
}
- // If we see a null character with data left, then that is an error
- if ch == '\x00' && s.buf.Len() > 0 {
+ if ch == '\x00' {
s.err("unexpected null character (0x00)")
return eof
}
+ if ch == '\uE123' {
+ s.err("unicode code point U+E123 reserved for internal use")
+ return utf8.RuneError
+ }
+
// debug
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
return ch
@@ -432,16 +433,16 @@ func (s *Scanner) scanHeredoc() {
// Read the identifier
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
- if len(identBytes) == 0 {
+ if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') {
s.err("zero-length heredoc anchor")
return
}
var identRegexp *regexp.Regexp
if identBytes[0] == '-' {
- identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:]))
} else {
- identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes))
}
// Read the actual string value
@@ -551,7 +552,7 @@ func (s *Scanner) scanDigits(ch rune, base, n int) rune {
s.err("illegal char escape")
}
- if n != start {
+ if n != start && ch != eof {
// we scanned all digits, put the last non digit char back,
// only if we read anything at all
s.unread()
diff --git a/vendor/github.com/hashicorp/vault/api/SPEC.md b/vendor/github.com/hashicorp/vault/api/SPEC.md
deleted file mode 100644
index 15345f3..0000000
--- a/vendor/github.com/hashicorp/vault/api/SPEC.md
+++ /dev/null
@@ -1,611 +0,0 @@
-FORMAT: 1A
-
-# vault
-
-The Vault API gives you full access to the Vault project.
-
-If you're browsing this API specifiction in GitHub or in raw
-format, please excuse some of the odd formatting. This document
-is in api-blueprint format that is read by viewers such as
-Apiary.
-
-## Sealed vs. Unsealed
-
-Whenever an individual Vault server is started, it is started
-in the _sealed_ state. In this state, it knows where its data
-is located, but the data is encrypted and Vault doesn't have the
-encryption keys to access it. Before Vault can operate, it must
-be _unsealed_.
-
-**Note:** Sealing/unsealing has no relationship to _authentication_
-which is separate and still required once the Vault is unsealed.
-
-Instead of being sealed with a single key, we utilize
-[Shamir's Secret Sharing](http://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing)
-to shard a key into _n_ parts such that _t_ parts are required
-to reconstruct the original key, where `t <= n`. This means that
-Vault itself doesn't know the original key, and no single person
-has the original key (unless `n = 1`, or `t` parts are given to
-a single person).
-
-Unsealing is done via an unauthenticated
-[unseal API](#reference/seal/unseal/unseal). This API takes a single
-master shard and progresses the unsealing process. Once all shards
-are given, the Vault is either unsealed or resets the unsealing
-process if the key was invalid.
-
-The entire seal/unseal state is server-wide. This allows multiple
-distinct operators to use the unseal API (or more likely the
-`vault unseal` command) from separate computers/networks and never
-have to transmit their key in order to unseal the vault in a
-distributed fashion.
-
-## Transport
-
-The API is expected to be accessed over a TLS connection at
-all times, with a valid certificate that is verified by a well
-behaved client.
-
-## Authentication
-
-Once the Vault is unsealed, every other operation requires
-authentication. There are multiple methods for authentication
-that can be enabled (see
-[authentication](#reference/authentication)).
-
-Authentication is done with the login endpoint. The login endpoint
-returns an access token that is set as the `X-Vault-Token` header.
-
-## Help
-
-To retrieve the help for any API within Vault, including mounted
-backends, credential providers, etc. then append `?help=1` to any
-URL. If you have valid permission to access the path, then the help text
-will be returned with the following structure:
-
- {
- "help": "help text"
- }
-
-## Error Response
-
-A common JSON structure is always returned to return errors:
-
- {
- "errors": [
- "message",
- "another message"
- ]
- }
-
-This structure will be sent down for any non-20x HTTP status.
-
-## HTTP Status Codes
-
-The following HTTP status codes are used throughout the API.
-
-- `200` - Success with data.
-- `204` - Success, no data returned.
-- `400` - Invalid request, missing or invalid data.
-- `403` - Forbidden, your authentication details are either
- incorrect or you don't have access to this feature.
-- `404` - Invalid path. This can both mean that the path truly
- doesn't exist or that you don't have permission to view a
- specific path. We use 404 in some cases to avoid state leakage.
-- `429` - Rate limit exceeded. Try again after waiting some period
- of time.
-- `500` - Internal server error. An internal error has occurred,
- try again later. If the error persists, report a bug.
-- `503` - Vault is down for maintenance or is currently sealed.
- Try again later.
-
-# Group Initialization
-
-## Initialization [/sys/init]
-### Initialization Status [GET]
-Returns the status of whether the vault is initialized or not. The
-vault doesn't have to be unsealed for this operation.
-
-+ Response 200 (application/json)
-
- {
- "initialized": true
- }
-
-### Initialize [POST]
-Initialize the vault. This is an unauthenticated request to initially
-setup a new vault. Although this is unauthenticated, it is still safe:
-data cannot be in vault prior to initialization, and any future
-authentication will fail if you didn't initialize it yourself.
-Additionally, once initialized, a vault cannot be reinitialized.
-
-This API is the only time Vault will ever be aware of your keys, and
-the only time the keys will ever be returned in one unit. Care should
-be taken to ensure that the output of this request is never logged,
-and that the keys are properly distributed.
-
-The response also contains the initial root token that can be used
-as authentication in order to initially configure Vault once it is
-unsealed. Just as with the unseal keys, this is the only time Vault is
-ever aware of this token.
-
-+ Request (application/json)
-
- {
- "secret_shares": 5,
- "secret_threshold": 3,
- }
-
-+ Response 200 (application/json)
-
- {
- "keys": ["one", "two", "three"],
- "root_token": "foo"
- }
-
-# Group Seal/Unseal
-
-## Seal Status [/sys/seal-status]
-### Seal Status [GET]
-Returns the status of whether the vault is currently
-sealed or not, as well as the progress of unsealing.
-
-The response has the following attributes:
-
-- sealed (boolean) - If true, the vault is sealed. Otherwise,
- it is unsealed.
-- t (int) - The "t" value for the master key, or the number
- of shards needed total to unseal the vault.
-- n (int) - The "n" value for the master key, or the total
- number of shards of the key distributed.
-- progress (int) - The number of master key shards that have
- been entered so far towards unsealing the vault.
-
-+ Response 200 (application/json)
-
- {
- "sealed": true,
- "t": 3,
- "n": 5,
- "progress": 1
- }
-
-## Seal [/sys/seal]
-### Seal [PUT]
-Seal the vault.
-
-Sealing the vault locks Vault from any future operations on any
-secrets or system configuration until the vault is once again
-unsealed. Internally, sealing throws away the keys to access the
-encrypted vault data, so Vault is unable to access the data without
-unsealing to get the encryption keys.
-
-+ Response 204
-
-## Unseal [/sys/unseal]
-### Unseal [PUT]
-Unseal the vault.
-
-Unseal the vault by entering a portion of the master key. The
-response object will tell you if the unseal is complete or
-only partial.
-
-If the vault is already unsealed, this does nothing. It is
-not an error, the return value just says the vault is unsealed.
-Due to the architecture of Vault, we cannot validate whether
-any portion of the unseal key given is valid until all keys
-are inputted, therefore unsealing an already unsealed vault
-is still a success even if the input key is invalid.
-
-+ Request (application/json)
-
- {
- "key": "value"
- }
-
-+ Response 200 (application/json)
-
- {
- "sealed": true,
- "t": 3,
- "n": 5,
- "progress": 1
- }
-
-# Group Authentication
-
-## List Auth Methods [/sys/auth]
-### List all auth methods [GET]
-Lists all available authentication methods.
-
-This returns the name of the authentication method as well as
-a human-friendly long-form help text for the method that can be
-shown to the user as documentation.
-
-+ Response 200 (application/json)
-
- {
- "token": {
- "type": "token",
- "description": "Token authentication"
- },
- "oauth": {
- "type": "oauth",
- "description": "OAuth authentication"
- }
- }
-
-## Single Auth Method [/sys/auth/{id}]
-
-+ Parameters
- + id (required, string) ... The ID of the auth method.
-
-### Enable an auth method [PUT]
-Enables an authentication method.
-
-The body of the request depends on the authentication method
-being used. Please reference the documentation for the specific
-authentication method you're enabling in order to determine what
-parameters you must give it.
-
-If an authentication method is already enabled, then this can be
-used to change the configuration, including even the type of
-the configuration.
-
-+ Request (application/json)
-
- {
- "type": "type",
- "key": "value",
- "key2": "value2"
- }
-
-+ Response 204
-
-### Disable an auth method [DELETE]
-Disables an authentication method. Previously authenticated sessions
-are immediately invalidated.
-
-+ Response 204
-
-# Group Policies
-
-Policies are named permission sets that identities returned by
-credential stores are bound to. This separates _authentication_
-from _authorization_.
-
-## Policies [/sys/policy]
-### List all Policies [GET]
-
-List all the policies.
-
-+ Response 200 (application/json)
-
- {
- "policies": ["root"]
- }
-
-## Single Policy [/sys/policy/{id}]
-
-+ Parameters
- + id (required, string) ... The name of the policy
-
-### Upsert [PUT]
-
-Create or update a policy with the given ID.
-
-+ Request (application/json)
-
- {
- "rules": "HCL"
- }
-
-+ Response 204
-
-### Delete [DELETE]
-
-Delete a policy with the given ID. Any identities bound to this
-policy will immediately become "deny all" despite already being
-authenticated.
-
-+ Response 204
-
-# Group Mounts
-
-Logical backends are mounted at _mount points_, similar to
-filesystems. This allows you to mount the "aws" logical backend
-at the "aws-us-east" path, so all access is at `/aws-us-east/keys/foo`
-for example. This enables multiple logical backends to be enabled.
-
-## Mounts [/sys/mounts]
-### List all mounts [GET]
-
-Lists all the active mount points.
-
-+ Response 200 (application/json)
-
- {
- "aws": {
- "type": "aws",
- "description": "AWS"
- },
- "pg": {
- "type": "postgresql",
- "description": "PostgreSQL dynamic users"
- }
- }
-
-## Single Mount [/sys/mounts/{path}]
-### New Mount [POST]
-
-Mount a logical backend to a new path.
-
-Configuration for this new backend is done via the normal
-read/write mechanism once it is mounted.
-
-+ Request (application/json)
-
- {
- "type": "aws",
- "description": "EU AWS tokens"
- }
-
-+ Response 204
-
-### Unmount [DELETE]
-
-Unmount a mount point.
-
-+ Response 204
-
-## Remount [/sys/remount]
-### Remount [POST]
-
-Move an already-mounted backend to a new path.
-
-+ Request (application/json)
-
- {
- "from": "aws",
- "to": "aws-east"
- }
-
-+ Response 204
-
-# Group Audit Backends
-
-Audit backends are responsible for shuttling the audit logs that
-Vault generates to a durable system for future querying. By default,
-audit logs are not stored anywhere.
-
-## Audit Backends [/sys/audit]
-### List Enabled Audit Backends [GET]
-
-List all the enabled audit backends
-
-+ Response 200 (application/json)
-
- {
- "file": {
- "type": "file",
- "description": "Send audit logs to a file",
- "options": {}
- }
- }
-
-## Single Audit Backend [/sys/audit/{path}]
-
-+ Parameters
- + path (required, string) ... The path where the audit backend is mounted
-
-### Enable [PUT]
-
-Enable an audit backend.
-
-+ Request (application/json)
-
- {
- "type": "file",
- "description": "send to a file",
- "options": {
- "path": "/var/log/vault.audit.log"
- }
- }
-
-+ Response 204
-
-### Disable [DELETE]
-
-Disable an audit backend.
-
-+ Request (application/json)
-
-+ Response 204
-
-# Group Secrets
-
-## Generic [/{mount}/{path}]
-
-This group documents the general format of reading and writing
-to Vault. The exact structure of the keyspace is defined by the
-logical backends in use, so documentation related to
-a specific backend should be referenced for details on what keys
-and routes are expected.
-
-The path for examples are `/prefix/path`, but in practice
-these will be defined by the backends that are mounted. For
-example, reading an AWS key might be at the `/aws/root` path.
-These paths are defined by the logical backends.
-
-+ Parameters
- + mount (required, string) ... The mount point for the
- logical backend. Example: `aws`.
- + path (optional, string) ... The path within the backend
- to read or write data.
-
-### Read [GET]
-
-Read data from vault.
-
-The data read from the vault can either be a secret or
-arbitrary configuration data. The type of data returned
-depends on the path, and is defined by the logical backend.
-
-If the return value is a secret, then the return structure
-is a mixture of arbitrary key/value along with the following
-fields which are guaranteed to exist:
-
-- `lease_id` (string) - A unique ID used for renewal and
- revocation.
-
-- `renewable` (bool) - If true, then this key can be renewed.
- If a key can't be renewed, then a new key must be requested
- after the lease duration period.
-
-- `lease_duration` (int) - The time in seconds that a secret is
- valid for before it must be renewed.
-
-- `lease_duration_max` (int) - The maximum amount of time in
- seconds that a secret is valid for. This will always be
- greater than or equal to `lease_duration`. The difference
- between this and `lease_duration` is an overlap window
- where multiple keys may be valid.
-
-If the return value is not a secret, then the return structure
-is an arbitrary JSON object.
-
-+ Response 200 (application/json)
-
- {
- "lease_id": "UUID",
- "lease_duration": 3600,
- "key": "value"
- }
-
-### Write [PUT]
-
-Write data to vault.
-
-The behavior and arguments to the write are defined by
-the logical backend.
-
-+ Request (application/json)
-
- {
- "key": "value"
- }
-
-+ Response 204
-
-# Group Lease Management
-
-## Renew Key [/sys/renew/{id}]
-
-+ Parameters
- + id (required, string) ... The `lease_id` of the secret
- to renew.
-
-### Renew [PUT]
-
-+ Response 200 (application/json)
-
- {
- "lease_id": "...",
- "lease_duration": 3600,
- "access_key": "foo",
- "secret_key": "bar"
- }
-
-## Revoke Key [/sys/revoke/{id}]
-
-+ Parameters
- + id (required, string) ... The `lease_id` of the secret
- to revoke.
-
-### Revoke [PUT]
-
-+ Response 204
-
-# Group Backend: AWS
-
-## Root Key [/aws/root]
-### Set the Key [PUT]
-
-Set the root key that the logical backend will use to create
-new secrets, IAM policies, etc.
-
-+ Request (application/json)
-
- {
- "access_key": "key",
- "secret_key": "key",
- "region": "us-east-1"
- }
-
-+ Response 204
-
-## Policies [/aws/policies]
-### List Policies [GET]
-
-List all the policies that can be used to create keys.
-
-+ Response 200 (application/json)
-
- [{
- "name": "root",
- "description": "Root access"
- }, {
- "name": "web-deploy",
- "description": "Enough permissions to deploy the web app."
- }]
-
-## Single Policy [/aws/policies/{name}]
-
-+ Parameters
- + name (required, string) ... Name of the policy.
-
-### Read [GET]
-
-Read a policy.
-
-+ Response 200 (application/json)
-
- {
- "policy": "base64-encoded policy"
- }
-
-### Upsert [PUT]
-
-Create or update a policy.
-
-+ Request (application/json)
-
- {
- "policy": "base64-encoded policy"
- }
-
-+ Response 204
-
-### Delete [DELETE]
-
-Delete the policy with the given name.
-
-+ Response 204
-
-## Generate Access Keys [/aws/keys/{policy}]
-### Create [GET]
-
-This generates a new keypair for the given policy.
-
-+ Parameters
- + policy (required, string) ... The policy under which to create
- the key pair.
-
-+ Response 200 (application/json)
-
- {
- "lease_id": "...",
- "lease_duration": 3600,
- "access_key": "foo",
- "secret_key": "bar"
- }
diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go
index 0204cec..8f0d3f8 100644
--- a/vendor/github.com/hashicorp/vault/api/client.go
+++ b/vendor/github.com/hashicorp/vault/api/client.go
@@ -1,6 +1,7 @@
package api
import (
+ "context"
"crypto/tls"
"fmt"
"net"
@@ -12,13 +13,15 @@ import (
"strings"
"sync"
"time"
+ "unicode"
- "golang.org/x/net/http2"
-
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
+ retryablehttp "github.com/hashicorp/go-retryablehttp"
"github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/vault/helper/parseutil"
- "github.com/sethgrid/pester"
+ "golang.org/x/net/http2"
+ "golang.org/x/time/rate"
)
const EnvVaultAddress = "VAULT_ADDR"
@@ -32,6 +35,8 @@ const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME"
const EnvVaultWrapTTL = "VAULT_WRAP_TTL"
const EnvVaultMaxRetries = "VAULT_MAX_RETRIES"
const EnvVaultToken = "VAULT_TOKEN"
+const EnvVaultMFA = "VAULT_MFA"
+const EnvRateLimit = "VAULT_RATE_LIMIT"
// WrappingLookupFunc is a function that, given an HTTP verb and a path,
// returns an optional string duration to be used for response wrapping (e.g.
@@ -42,24 +47,42 @@ type WrappingLookupFunc func(operation, path string) string
// Config is used to configure the creation of the client.
type Config struct {
+ modifyLock sync.RWMutex
+
// Address is the address of the Vault server. This should be a complete
// URL such as "http://vault.example.com". If you need a custom SSL
// cert or want to enable insecure mode, you need to specify a custom
// HttpClient.
Address string
- // HttpClient is the HTTP client to use, which will currently always have the
- // same values as http.DefaultClient. This is used to control redirect behavior.
+ // HttpClient is the HTTP client to use. Vault sets sane defaults for the
+ // http.Client and its associated http.Transport created in DefaultConfig.
+ // If you must modify Vault's defaults, it is suggested that you start with
+ // that client and modify as needed rather than start with an empty client
+ // (or http.DefaultClient).
HttpClient *http.Client
- redirectSetup sync.Once
-
- // MaxRetries controls the maximum number of times to retry when a 5xx error
- // occurs. Set to 0 or less to disable retrying. Defaults to 0.
+ // MaxRetries controls the maximum number of times to retry when a 5xx
+ // error occurs. Set to 0 to disable retrying. Defaults to 2 (for a total
+ // of three tries).
MaxRetries int
// Timeout is for setting custom timeout parameter in the HttpClient
Timeout time.Duration
+
+ // If there is an error when creating the configuration, this will be the
+ // error
+ Error error
+
+ // The Backoff function to use; a default is used if not provided
+ Backoff retryablehttp.Backoff
+
+ // Limiter is the rate limiter used by the client.
+ // If this pointer is nil, then there will be no limit set.
+ // In contrast, if this pointer is set, even to an empty struct,
+ // then that limiter will be used. Note that an empty Limiter
+ // is equivalent blocking all events.
+ Limiter *rate.Limiter
}
// TLSConfig contains the parameters needed to configure TLS on the HTTP client
@@ -92,60 +115,94 @@ type TLSConfig struct {
//
// The default Address is https://127.0.0.1:8200, but this can be overridden by
// setting the `VAULT_ADDR` environment variable.
+//
+// If an error is encountered, this will return nil.
func DefaultConfig() *Config {
config := &Config{
Address: "https://127.0.0.1:8200",
HttpClient: cleanhttp.DefaultClient(),
}
config.HttpClient.Timeout = time.Second * 60
+
transport := config.HttpClient.Transport.(*http.Transport)
transport.TLSHandshakeTimeout = 10 * time.Second
transport.TLSClientConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
}
+ if err := http2.ConfigureTransport(transport); err != nil {
+ config.Error = err
+ return config
+ }
- if v := os.Getenv(EnvVaultAddress); v != "" {
- config.Address = v
+ if err := config.ReadEnvironment(); err != nil {
+ config.Error = err
+ return config
+ }
+
+ // Ensure redirects are not automatically followed
+ // Note that this is sane for the API client as it has its own
+ // redirect handling logic (and thus also for command/meta),
+ // but in e.g. http_test actual redirect handling is necessary
+ config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ // Returning this value causes the Go net library to not close the
+ // response body and to nil out the error. Otherwise retry clients may
+ // try three times on every redirect because it sees an error from this
+ // function (to prevent redirects) passing through to it.
+ return http.ErrUseLastResponse
}
+ config.Backoff = retryablehttp.LinearJitterBackoff
+ config.MaxRetries = 2
+
return config
}
-// ConfigureTLS takes a set of TLS configurations and applies those to the the HTTP client.
+// ConfigureTLS takes a set of TLS configurations and applies those to the the
+// HTTP client.
func (c *Config) ConfigureTLS(t *TLSConfig) error {
if c.HttpClient == nil {
c.HttpClient = DefaultConfig().HttpClient
}
+ clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig
var clientCert tls.Certificate
foundClientCert := false
- if t.CACert != "" || t.CAPath != "" || t.ClientCert != "" || t.ClientKey != "" || t.Insecure {
- if t.ClientCert != "" && t.ClientKey != "" {
- var err error
- clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey)
- if err != nil {
- return err
- }
- foundClientCert = true
- } else if t.ClientCert != "" || t.ClientKey != "" {
- return fmt.Errorf("Both client cert and client key must be provided")
+
+ switch {
+ case t.ClientCert != "" && t.ClientKey != "":
+ var err error
+ clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey)
+ if err != nil {
+ return err
}
+ foundClientCert = true
+ case t.ClientCert != "" || t.ClientKey != "":
+ return fmt.Errorf("both client cert and client key must be provided")
}
- clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig
- rootConfig := &rootcerts.Config{
- CAFile: t.CACert,
- CAPath: t.CAPath,
- }
- if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil {
- return err
+ if t.CACert != "" || t.CAPath != "" {
+ rootConfig := &rootcerts.Config{
+ CAFile: t.CACert,
+ CAPath: t.CAPath,
+ }
+ if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil {
+ return err
+ }
}
- clientTLSConfig.InsecureSkipVerify = t.Insecure
+ if t.Insecure {
+ clientTLSConfig.InsecureSkipVerify = true
+ }
if foundClientCert {
- clientTLSConfig.Certificates = []tls.Certificate{clientCert}
+ // We use this function to ignore the server's preferential list of
+ // CAs, otherwise any CA used for the cert auth backend must be in the
+ // server's CA pool
+ clientTLSConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ return &clientCert, nil
+ }
}
+
if t.TLSServerName != "" {
clientTLSConfig.ServerName = t.TLSServerName
}
@@ -153,9 +210,8 @@ func (c *Config) ConfigureTLS(t *TLSConfig) error {
return nil
}
-// ReadEnvironment reads configuration information from the
-// environment. If there is an error, no configuration value
-// is updated.
+// ReadEnvironment reads configuration information from the environment. If
+// there is an error, no configuration value is updated.
func (c *Config) ReadEnvironment() error {
var envAddress string
var envCACert string
@@ -166,6 +222,7 @@ func (c *Config) ReadEnvironment() error {
var envInsecure bool
var envTLSServerName string
var envMaxRetries *uint64
+ var limit *rate.Limiter
// Parse the environment variables
if v := os.Getenv(EnvVaultAddress); v != "" {
@@ -190,10 +247,17 @@ func (c *Config) ReadEnvironment() error {
if v := os.Getenv(EnvVaultClientKey); v != "" {
envClientKey = v
}
+ if v := os.Getenv(EnvRateLimit); v != "" {
+ rateLimit, burstLimit, err := parseRateLimit(v)
+ if err != nil {
+ return err
+ }
+ limit = rate.NewLimiter(rate.Limit(rateLimit), burstLimit)
+ }
if t := os.Getenv(EnvVaultClientTimeout); t != "" {
clientTimeout, err := parseutil.ParseDurationSecond(t)
if err != nil {
- return fmt.Errorf("Could not parse %s", EnvVaultClientTimeout)
+ return fmt.Errorf("could not parse %q", EnvVaultClientTimeout)
}
envClientTimeout = clientTimeout
}
@@ -201,7 +265,7 @@ func (c *Config) ReadEnvironment() error {
var err error
envInsecure, err = strconv.ParseBool(v)
if err != nil {
- return fmt.Errorf("Could not parse VAULT_SKIP_VERIFY")
+ return fmt.Errorf("could not parse VAULT_SKIP_VERIFY")
}
}
if v := os.Getenv(EnvVaultTLSServerName); v != "" {
@@ -217,6 +281,12 @@ func (c *Config) ReadEnvironment() error {
TLSServerName: envTLSServerName,
Insecure: envInsecure,
}
+
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
+ c.Limiter = limit
+
if err := c.ConfigureTLS(t); err != nil {
return err
}
@@ -226,7 +296,7 @@ func (c *Config) ReadEnvironment() error {
}
if envMaxRetries != nil {
- c.MaxRetries = int(*envMaxRetries) + 1
+ c.MaxRetries = int(*envMaxRetries)
}
if envClientTimeout != 0 {
@@ -236,70 +306,76 @@ func (c *Config) ReadEnvironment() error {
return nil
}
-// Client is the client to the Vault API. Create a client with
-// NewClient.
+func parseRateLimit(val string) (rate float64, burst int, err error) {
+
+ _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst)
+ if err != nil {
+ rate, err = strconv.ParseFloat(val, 64)
+ if err != nil {
+ err = fmt.Errorf("%v was provided but incorrectly formatted", EnvRateLimit)
+ }
+ burst = int(rate)
+ }
+
+ return rate, burst, err
+
+}
+
+// Client is the client to the Vault API. Create a client with NewClient.
type Client struct {
+ modifyLock sync.RWMutex
addr *url.URL
config *Config
token string
headers http.Header
wrappingLookupFunc WrappingLookupFunc
+ mfaCreds []string
+ policyOverride bool
}
// NewClient returns a new client for the given configuration.
//
+// If the configuration is nil, Vault will use configuration from
+// DefaultConfig(), which is the recommended starting configuration.
+//
// If the environment variable `VAULT_TOKEN` is present, the token will be
// automatically added to the client. Otherwise, you must manually call
// `SetToken()`.
func NewClient(c *Config) (*Client, error) {
+ def := DefaultConfig()
+ if def == nil {
+ return nil, fmt.Errorf("could not create/read default configuration")
+ }
+ if def.Error != nil {
+ return nil, errwrap.Wrapf("error encountered setting up default configuration: {{err}}", def.Error)
+ }
+
if c == nil {
- c = DefaultConfig()
- if err := c.ReadEnvironment(); err != nil {
- return nil, fmt.Errorf("error reading environment: %v", err)
- }
+ c = def
}
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
u, err := url.Parse(c.Address)
if err != nil {
return nil, err
}
if c.HttpClient == nil {
- c.HttpClient = DefaultConfig().HttpClient
+ c.HttpClient = def.HttpClient
}
if c.HttpClient.Transport == nil {
- c.HttpClient.Transport = cleanhttp.DefaultTransport()
+ c.HttpClient.Transport = def.HttpClient.Transport
}
- if tp, ok := c.HttpClient.Transport.(*http.Transport); ok {
- if err := http2.ConfigureTransport(tp); err != nil {
- return nil, err
- }
- }
-
- redirFunc := func() {
- // Ensure redirects are not automatically followed
- // Note that this is sane for the API client as it has its own
- // redirect handling logic (and thus also for command/meta),
- // but in e.g. http_test actual redirect handling is necessary
- c.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- // Returning this value causes the Go net library to not close the
- // response body and to nil out the error. Otherwise pester tries
- // three times on every redirect because it sees an error from this
- // function (to prevent redirects) passing through to it.
- return http.ErrUseLastResponse
- }
- }
-
- c.redirectSetup.Do(redirFunc)
-
client := &Client{
addr: u,
config: c,
}
if token := os.Getenv(EnvVaultToken); token != "" {
- client.SetToken(token)
+ client.token = token
}
return client, nil
@@ -309,72 +385,181 @@ func NewClient(c *Config) (*Client, error) {
// "<Scheme>://<Host>:<Port>". Setting this on a client will override the
// value of VAULT_ADDR environment variable.
func (c *Client) SetAddress(addr string) error {
- var err error
- if c.addr, err = url.Parse(addr); err != nil {
- return fmt.Errorf("failed to set address: %v", err)
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
+ parsedAddr, err := url.Parse(addr)
+ if err != nil {
+ return errwrap.Wrapf("failed to set address: {{err}}", err)
}
+ c.addr = parsedAddr
return nil
}
// Address returns the Vault URL the client is configured to connect to
func (c *Client) Address() string {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+
return c.addr.String()
}
+// SetLimiter will set the rate limiter for this client.
+// This method is thread-safe.
+// rateLimit and burst are specified according to https://godoc.org/golang.org/x/time/rate#NewLimiter
+func (c *Client) SetLimiter(rateLimit float64, burst int) {
+ c.modifyLock.RLock()
+ c.config.modifyLock.Lock()
+ defer c.config.modifyLock.Unlock()
+ c.modifyLock.RUnlock()
+
+ c.config.Limiter = rate.NewLimiter(rate.Limit(rateLimit), burst)
+}
+
// SetMaxRetries sets the number of retries that will be used in the case of certain errors
func (c *Client) SetMaxRetries(retries int) {
+ c.modifyLock.RLock()
+ c.config.modifyLock.Lock()
+ defer c.config.modifyLock.Unlock()
+ c.modifyLock.RUnlock()
+
c.config.MaxRetries = retries
}
// SetClientTimeout sets the client request timeout
func (c *Client) SetClientTimeout(timeout time.Duration) {
+ c.modifyLock.RLock()
+ c.config.modifyLock.Lock()
+ defer c.config.modifyLock.Unlock()
+ c.modifyLock.RUnlock()
+
c.config.Timeout = timeout
}
+// CurrentWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
+// for a given operation and path
+func (c *Client) CurrentWrappingLookupFunc() WrappingLookupFunc {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+
+ return c.wrappingLookupFunc
+}
+
// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
// for a given operation and path
func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
c.wrappingLookupFunc = lookupFunc
}
+// SetMFACreds sets the MFA credentials supplied either via the environment
+// variable or via the command line.
+func (c *Client) SetMFACreds(creds []string) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
+ c.mfaCreds = creds
+}
+
// Token returns the access token being used by this client. It will
// return the empty string if there is no token set.
func (c *Client) Token() string {
+ c.modifyLock.RLock()
+ defer c.modifyLock.RUnlock()
+
return c.token
}
// SetToken sets the token directly. This won't perform any auth
// verification, it simply sets the token properly for future requests.
func (c *Client) SetToken(v string) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
c.token = v
}
// ClearToken deletes the token if it is set or does nothing otherwise.
func (c *Client) ClearToken() {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
c.token = ""
}
// SetHeaders sets the headers to be used for future requests.
func (c *Client) SetHeaders(headers http.Header) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
c.headers = headers
}
-// Clone creates a copy of this client.
+// SetBackoff sets the backoff function to be used for future requests.
+func (c *Client) SetBackoff(backoff retryablehttp.Backoff) {
+ c.modifyLock.RLock()
+ c.config.modifyLock.Lock()
+ defer c.config.modifyLock.Unlock()
+ c.modifyLock.RUnlock()
+
+ c.config.Backoff = backoff
+}
+
+// Clone creates a new client with the same configuration. Note that the same
+// underlying http.Client is used; modifying the client from more than one
+// goroutine at once may not be safe, so modify the client as needed and then
+// clone.
func (c *Client) Clone() (*Client, error) {
- return NewClient(c.config)
+ c.modifyLock.RLock()
+ c.config.modifyLock.RLock()
+ config := c.config
+ c.modifyLock.RUnlock()
+
+ newConfig := &Config{
+ Address: config.Address,
+ HttpClient: config.HttpClient,
+ MaxRetries: config.MaxRetries,
+ Timeout: config.Timeout,
+ Backoff: config.Backoff,
+ Limiter: config.Limiter,
+ }
+ config.modifyLock.RUnlock()
+
+ return NewClient(newConfig)
+}
+
+// SetPolicyOverride sets whether requests should be sent with the policy
+// override flag to request overriding soft-mandatory Sentinel policies (both
+// RGPs and EGPs)
+func (c *Client) SetPolicyOverride(override bool) {
+ c.modifyLock.Lock()
+ defer c.modifyLock.Unlock()
+
+ c.policyOverride = override
}
// NewRequest creates a new raw request object to query the Vault server
// configured for this client. This is an advanced method and generally
// doesn't need to be called externally.
func (c *Client) NewRequest(method, requestPath string) *Request {
+ c.modifyLock.RLock()
+ addr := c.addr
+ token := c.token
+ mfaCreds := c.mfaCreds
+ wrappingLookupFunc := c.wrappingLookupFunc
+ headers := c.headers
+ policyOverride := c.policyOverride
+ c.modifyLock.RUnlock()
+
// if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV
// record and take the highest match; this is not designed for high-availability, just discovery
- var host string = c.addr.Host
- if c.addr.Port() == "" {
+ var host string = addr.Host
+ if addr.Port() == "" {
// Internet Draft specifies that the SRV record is ignored if a port is given
- _, addrs, err := net.LookupSRV("http", "tcp", c.addr.Hostname())
+ _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname())
if err == nil && len(addrs) > 0 {
host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port)
}
@@ -383,12 +568,12 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
req := &Request{
Method: method,
URL: &url.URL{
- User: c.addr.User,
- Scheme: c.addr.Scheme,
+ User: addr.User,
+ Scheme: addr.Scheme,
Host: host,
- Path: path.Join(c.addr.Path, requestPath),
+ Path: path.Join(addr.Path, requestPath),
},
- ClientToken: c.token,
+ ClientToken: token,
Params: make(map[string][]string),
}
@@ -401,18 +586,21 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
default:
lookupPath = requestPath
}
- if c.wrappingLookupFunc != nil {
- req.WrapTTL = c.wrappingLookupFunc(method, lookupPath)
+
+ req.MFAHeaderVals = mfaCreds
+
+ if wrappingLookupFunc != nil {
+ req.WrapTTL = wrappingLookupFunc(method, lookupPath)
} else {
req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath)
}
- if c.config.Timeout != 0 {
- c.config.HttpClient.Timeout = c.config.Timeout
- }
- if c.headers != nil {
- req.Headers = c.headers
+
+ if headers != nil {
+ req.Headers = headers
}
+ req.PolicyOverride = policyOverride
+
return req
}
@@ -420,26 +608,75 @@ func (c *Client) NewRequest(method, requestPath string) *Request {
// a Vault server not configured with this client. This is an advanced operation
// that generally won't need to be called externally.
func (c *Client) RawRequest(r *Request) (*Response, error) {
+ c.modifyLock.RLock()
+ token := c.token
+
+ c.config.modifyLock.RLock()
+ limiter := c.config.Limiter
+ maxRetries := c.config.MaxRetries
+ backoff := c.config.Backoff
+ httpClient := c.config.HttpClient
+ timeout := c.config.Timeout
+ c.config.modifyLock.RUnlock()
+
+ c.modifyLock.RUnlock()
+
+ if limiter != nil {
+ limiter.Wait(context.Background())
+ }
+
+ // Sanity check the token before potentially erroring from the API
+ idx := strings.IndexFunc(token, func(c rune) bool {
+ return !unicode.IsPrint(c)
+ })
+ if idx != -1 {
+ return nil, fmt.Errorf("configured Vault token contains non-printable characters and cannot be used")
+ }
+
redirectCount := 0
START:
- req, err := r.ToHTTP()
+ req, err := r.toRetryableHTTP()
if err != nil {
return nil, err
}
+ if req == nil {
+ return nil, fmt.Errorf("nil request created")
+ }
- client := pester.NewExtendedClient(c.config.HttpClient)
- client.Backoff = pester.LinearJitterBackoff
- client.MaxRetries = c.config.MaxRetries
+ // Set the timeout, if any
+ var cancelFunc context.CancelFunc
+ if timeout != 0 {
+ var ctx context.Context
+ ctx, cancelFunc = context.WithTimeout(context.Background(), timeout)
+ req.Request = req.Request.WithContext(ctx)
+ }
+
+ if backoff == nil {
+ backoff = retryablehttp.LinearJitterBackoff
+ }
+
+ client := &retryablehttp.Client{
+ HTTPClient: httpClient,
+ RetryWaitMin: 1000 * time.Millisecond,
+ RetryWaitMax: 1500 * time.Millisecond,
+ RetryMax: maxRetries,
+ CheckRetry: retryablehttp.DefaultRetryPolicy,
+ Backoff: backoff,
+ ErrorHandler: retryablehttp.PassthroughErrorHandler,
+ }
var result *Response
resp, err := client.Do(req)
+ if cancelFunc != nil {
+ cancelFunc()
+ }
if resp != nil {
result = &Response{Response: resp}
}
if err != nil {
if strings.Contains(err.Error(), "tls: oversized") {
- err = fmt.Errorf(
- "%s\n\n"+
+ err = errwrap.Wrapf(
+ "{{err}}\n\n"+
"This error usually means that the server is running with TLS disabled\n"+
"but the client is configured to use TLS. Please either enable TLS\n"+
"on the server or run the client with -address set to an address\n"+
diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go
index 0d5e7d4..346a711 100644
--- a/vendor/github.com/hashicorp/vault/api/logical.go
+++ b/vendor/github.com/hashicorp/vault/api/logical.go
@@ -3,9 +3,10 @@ package api
import (
"bytes"
"fmt"
- "net/http"
+ "io"
"os"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/jsonutil"
)
@@ -50,6 +51,17 @@ func (c *Logical) Read(path string) (*Secret, error) {
defer resp.Body.Close()
}
if resp != nil && resp.StatusCode == 404 {
+ secret, parseErr := ParseSecret(resp.Body)
+ switch parseErr {
+ case nil:
+ case io.EOF:
+ return nil, nil
+ default:
+ return nil, err
+ }
+ if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
+ return secret, nil
+ }
return nil, nil
}
if err != nil {
@@ -70,6 +82,17 @@ func (c *Logical) List(path string) (*Secret, error) {
defer resp.Body.Close()
}
if resp != nil && resp.StatusCode == 404 {
+ secret, parseErr := ParseSecret(resp.Body)
+ switch parseErr {
+ case nil:
+ case io.EOF:
+ return nil, nil
+ default:
+ return nil, err
+ }
+ if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
+ return secret, nil
+ }
return nil, nil
}
if err != nil {
@@ -89,6 +112,19 @@ func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, erro
if resp != nil {
defer resp.Body.Close()
}
+ if resp != nil && resp.StatusCode == 404 {
+ secret, parseErr := ParseSecret(resp.Body)
+ switch parseErr {
+ case nil:
+ case io.EOF:
+ return nil, nil
+ default:
+ return nil, err
+ }
+ if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
+ return secret, err
+ }
+ }
if err != nil {
return nil, err
}
@@ -106,6 +142,19 @@ func (c *Logical) Delete(path string) (*Secret, error) {
if resp != nil {
defer resp.Body.Close()
}
+ if resp != nil && resp.StatusCode == 404 {
+ secret, parseErr := ParseSecret(resp.Body)
+ switch parseErr {
+ case nil:
+ case io.EOF:
+ return nil, nil
+ default:
+ return nil, err
+ }
+ if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
+ return secret, err
+ }
+ }
if err != nil {
return nil, err
}
@@ -138,35 +187,43 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
if resp != nil {
defer resp.Body.Close()
}
- if err != nil {
- if resp != nil && resp.StatusCode != 404 {
+ if resp == nil || resp.StatusCode != 404 {
+ if err != nil {
return nil, err
}
- }
- if resp == nil {
- return nil, nil
+ if resp == nil {
+ return nil, nil
+ }
+ return ParseSecret(resp.Body)
}
- switch resp.StatusCode {
- case http.StatusOK: // New method is supported
- return ParseSecret(resp.Body)
- case http.StatusNotFound: // Fall back to old method
- default:
+ // In the 404 case this may actually be a wrapped 404 error
+ secret, parseErr := ParseSecret(resp.Body)
+ switch parseErr {
+ case nil:
+ case io.EOF:
return nil, nil
+ default:
+ return nil, err
+ }
+ if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) {
+ return secret, nil
}
+ // Otherwise this might be an old-style wrapping token so attempt the old
+ // method
if wrappingToken != "" {
origToken := c.c.Token()
defer c.c.SetToken(origToken)
c.c.SetToken(wrappingToken)
}
- secret, err := c.Read(wrappedResponseLocation)
+ secret, err = c.Read(wrappedResponseLocation)
if err != nil {
- return nil, fmt.Errorf("error reading %s: %s", wrappedResponseLocation, err)
+ return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err)
}
if secret == nil {
- return nil, fmt.Errorf("no value found at %s", wrappedResponseLocation)
+ return nil, fmt.Errorf("no value found at %q", wrappedResponseLocation)
}
if secret.Data == nil {
return nil, fmt.Errorf("\"data\" not found in wrapping response")
@@ -178,7 +235,7 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) {
wrappedSecret := new(Secret)
buf := bytes.NewBufferString(secret.Data["response"].(string))
if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil {
- return nil, fmt.Errorf("error unmarshaling wrapped secret: %s", err)
+ return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err)
}
return wrappedSecret, nil
diff --git a/vendor/github.com/hashicorp/vault/api/renewer.go b/vendor/github.com/hashicorp/vault/api/renewer.go
index a2a4b66..1d37a19 100644
--- a/vendor/github.com/hashicorp/vault/api/renewer.go
+++ b/vendor/github.com/hashicorp/vault/api/renewer.go
@@ -13,9 +13,6 @@ var (
ErrRenewerNotRenewable = errors.New("secret is not renewable")
ErrRenewerNoSecretData = errors.New("returned empty secret data")
- // DefaultRenewerGrace is the default grace period
- DefaultRenewerGrace = 15 * time.Second
-
// DefaultRenewerRenewBuffer is the default size of the buffer for renew
// messages on the channel.
DefaultRenewerRenewBuffer = 5
@@ -50,12 +47,13 @@ var (
type Renewer struct {
l sync.Mutex
- client *Client
- secret *Secret
- grace time.Duration
- random *rand.Rand
- doneCh chan error
- renewCh chan *RenewOutput
+ client *Client
+ secret *Secret
+ grace time.Duration
+ random *rand.Rand
+ increment int
+ doneCh chan error
+ renewCh chan *RenewOutput
stopped bool
stopCh chan struct{}
@@ -66,9 +64,7 @@ type RenewerInput struct {
// Secret is the secret to renew
Secret *Secret
- // Grace is a minimum renewal before returning so the upstream client
- // can do a re-read. This can be used to prevent clients from waiting
- // too long to read a new credential and incur downtime.
+ // DEPRECATED: this does not do anything.
Grace time.Duration
// Rand is the randomizer to use for underlying randomization. If not
@@ -79,6 +75,11 @@ type RenewerInput struct {
// RenewBuffer is the size of the buffered channel where renew messages are
// dispatched.
RenewBuffer int
+
+ // The new TTL, in seconds, that should be set on the lease. The TTL set
+ // here may or may not be honored by the vault server, based on Vault
+ // configuration or any associated max TTL values.
+ Increment int
}
// RenewOutput is the metadata returned to the client (if it's listening) to
@@ -104,11 +105,6 @@ func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) {
return nil, ErrRenewerMissingSecret
}
- grace := i.Grace
- if grace == 0 {
- grace = DefaultRenewerGrace
- }
-
random := i.Rand
if random == nil {
random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
@@ -120,12 +116,12 @@ func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) {
}
return &Renewer{
- client: c,
- secret: secret,
- grace: grace,
- random: random,
- doneCh: make(chan error, 1),
- renewCh: make(chan *RenewOutput, renewBuffer),
+ client: c,
+ secret: secret,
+ increment: i.Increment,
+ random: random,
+ doneCh: make(chan error, 1),
+ renewCh: make(chan *RenewOutput, renewBuffer),
stopped: false,
stopCh: make(chan struct{}),
@@ -155,8 +151,8 @@ func (r *Renewer) Stop() {
}
// Renew starts a background process for renewing this secret. When the secret
-// is has auth data, this attempts to renew the auth (token). When the secret
-// has a lease, this attempts to renew the lease.
+// has auth data, this attempts to renew the auth (token). When the secret has
+// a lease, this attempts to renew the lease.
func (r *Renewer) Renew() {
var result error
if r.secret.Auth != nil {
@@ -165,10 +161,7 @@ func (r *Renewer) Renew() {
result = r.renewLease()
}
- select {
- case r.doneCh <- result:
- case <-r.stopCh:
- }
+ r.doneCh <- result
}
// renewAuth is a helper for renewing authentication.
@@ -177,6 +170,9 @@ func (r *Renewer) renewAuth() error {
return ErrRenewerNotRenewable
}
+ priorDuration := time.Duration(r.secret.Auth.LeaseDuration) * time.Second
+ r.calculateGrace(priorDuration)
+
client, token := r.client, r.secret.Auth.ClientToken
for {
@@ -188,7 +184,7 @@ func (r *Renewer) renewAuth() error {
}
// Renew the auth.
- renewal, err := client.Auth().Token().RenewTokenAsSelf(token, 0)
+ renewal, err := client.Auth().Token().RenewTokenAsSelf(token, r.increment)
if err != nil {
return err
}
@@ -209,13 +205,28 @@ func (r *Renewer) renewAuth() error {
return ErrRenewerNotRenewable
}
- // Grab the lease duration and sleep duration - note that we grab the auth
- // lease duration, not the secret lease duration.
+ // Grab the lease duration
leaseDuration := time.Duration(renewal.Auth.LeaseDuration) * time.Second
- sleepDuration := r.sleepDuration(leaseDuration)
- // If we are within grace, return now.
- if leaseDuration <= r.grace || sleepDuration <= r.grace {
+ // We keep evaluating a new grace period so long as the lease is
+ // extending. Once it stops extending, we've hit the max and need to
+ // rely on the grace duration.
+ if leaseDuration > priorDuration {
+ r.calculateGrace(leaseDuration)
+ }
+ priorDuration = leaseDuration
+
+ // The sleep duration is set to 2/3 of the current lease duration plus
+ // 1/3 of the current grace period, which adds jitter.
+ sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3)
+
+ // If we are within grace, return now; or, if the amount of time we
+ // would sleep would land us in the grace period. This helps with short
+ // tokens; for example, you don't want a current lease duration of 4
+ // seconds, a grace period of 3 seconds, and end up sleeping for more
+ // than three of those seconds and having a very small budget of time
+ // to renew.
+ if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace {
return nil
}
@@ -234,6 +245,9 @@ func (r *Renewer) renewLease() error {
return ErrRenewerNotRenewable
}
+ priorDuration := time.Duration(r.secret.LeaseDuration) * time.Second
+ r.calculateGrace(priorDuration)
+
client, leaseID := r.client, r.secret.LeaseID
for {
@@ -245,7 +259,7 @@ func (r *Renewer) renewLease() error {
}
// Renew the lease.
- renewal, err := client.Sys().Renew(leaseID, 0)
+ renewal, err := client.Sys().Renew(leaseID, r.increment)
if err != nil {
return err
}
@@ -266,12 +280,28 @@ func (r *Renewer) renewLease() error {
return ErrRenewerNotRenewable
}
- // Grab the lease duration and sleep duration
+ // Grab the lease duration
leaseDuration := time.Duration(renewal.LeaseDuration) * time.Second
- sleepDuration := r.sleepDuration(leaseDuration)
- // If we are within grace, return now.
- if leaseDuration <= r.grace || sleepDuration <= r.grace {
+ // We keep evaluating a new grace period so long as the lease is
+ // extending. Once it stops extending, we've hit the max and need to
+ // rely on the grace duration.
+ if leaseDuration > priorDuration {
+ r.calculateGrace(leaseDuration)
+ }
+ priorDuration = leaseDuration
+
+ // The sleep duration is set to 2/3 of the current lease duration plus
+ // 1/3 of the current grace period, which adds jitter.
+ sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3)
+
+ // If we are within grace, return now; or, if the amount of time we
+ // would sleep would land us in the grace period. This helps with short
+ // tokens; for example, you don't want a current lease duration of 4
+ // seconds, a grace period of 3 seconds, and end up sleeping for more
+ // than three of those seconds and having a very small budget of time
+ // to renew.
+ if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace {
return nil
}
@@ -300,3 +330,20 @@ func (r *Renewer) sleepDuration(base time.Duration) time.Duration {
return time.Duration(sleep)
}
+
+// calculateGrace calculates the grace period based on a reasonable set of
+// assumptions given the total lease time; it also adds some jitter to not have
+// clients be in sync.
+func (r *Renewer) calculateGrace(leaseDuration time.Duration) {
+ if leaseDuration == 0 {
+ r.grace = 0
+ return
+ }
+
+ leaseNanos := float64(leaseDuration.Nanoseconds())
+ jitterMax := 0.1 * leaseNanos
+
+ // For a given lease duration, we want to allow 80-90% of that to elapse,
+ // so the remaining amount is the grace period
+ r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax))
+}
diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go
index 83a28bd..5bcff8c 100644
--- a/vendor/github.com/hashicorp/vault/api/request.go
+++ b/vendor/github.com/hashicorp/vault/api/request.go
@@ -4,54 +4,108 @@ import (
"bytes"
"encoding/json"
"io"
+ "io/ioutil"
"net/http"
"net/url"
+
+ retryablehttp "github.com/hashicorp/go-retryablehttp"
)
// Request is a raw request configuration structure used to initiate
// API requests to the Vault server.
type Request struct {
- Method string
- URL *url.URL
- Params url.Values
- Headers http.Header
- ClientToken string
- WrapTTL string
- Obj interface{}
- Body io.Reader
- BodySize int64
+ Method string
+ URL *url.URL
+ Params url.Values
+ Headers http.Header
+ ClientToken string
+ MFAHeaderVals []string
+ WrapTTL string
+ Obj interface{}
+
+ // When possible, use BodyBytes as it is more efficient due to how the
+ // retry logic works
+ BodyBytes []byte
+
+ // Fallback
+ Body io.Reader
+ BodySize int64
+
+ // Whether to request overriding soft-mandatory Sentinel policies (RGPs and
+ // EGPs). If set, the override flag will take effect for all policies
+ // evaluated during the request.
+ PolicyOverride bool
}
// SetJSONBody is used to set a request body that is a JSON-encoded value.
func (r *Request) SetJSONBody(val interface{}) error {
- buf := bytes.NewBuffer(nil)
- enc := json.NewEncoder(buf)
- if err := enc.Encode(val); err != nil {
+ buf, err := json.Marshal(val)
+ if err != nil {
return err
}
r.Obj = val
- r.Body = buf
- r.BodySize = int64(buf.Len())
+ r.BodyBytes = buf
return nil
}
// ResetJSONBody is used to reset the body for a redirect
func (r *Request) ResetJSONBody() error {
- if r.Body == nil {
+ if r.BodyBytes == nil {
return nil
}
return r.SetJSONBody(r.Obj)
}
-// ToHTTP turns this request into a valid *http.Request for use with the
-// net/http package.
+// DEPRECATED: ToHTTP turns this request into a valid *http.Request for use
+// with the net/http package.
func (r *Request) ToHTTP() (*http.Request, error) {
+ req, err := r.toRetryableHTTP()
+ if err != nil {
+ return nil, err
+ }
+
+ switch {
+ case r.BodyBytes == nil && r.Body == nil:
+ // No body
+
+ case r.BodyBytes != nil:
+ req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes))
+
+ default:
+ if c, ok := r.Body.(io.ReadCloser); ok {
+ req.Request.Body = c
+ } else {
+ req.Request.Body = ioutil.NopCloser(r.Body)
+ }
+ }
+
+ return req.Request, nil
+}
+
+func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) {
// Encode the query parameters
r.URL.RawQuery = r.Params.Encode()
- // Create the HTTP request
- req, err := http.NewRequest(r.Method, r.URL.RequestURI(), r.Body)
+ // Create the HTTP request, defaulting to retryable
+ var req *retryablehttp.Request
+
+ var err error
+ var body interface{}
+
+ switch {
+ case r.BodyBytes == nil && r.Body == nil:
+ // No body
+
+ case r.BodyBytes != nil:
+ // Use bytes, it's more efficient
+ body = r.BodyBytes
+
+ default:
+ body = r.Body
+ }
+
+ req, err = retryablehttp.NewRequest(r.Method, r.URL.RequestURI(), body)
if err != nil {
return nil, err
}
@@ -77,5 +131,15 @@ func (r *Request) ToHTTP() (*http.Request, error) {
req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL)
}
+ if len(r.MFAHeaderVals) != 0 {
+ for _, mfaHeaderVal := range r.MFAHeaderVals {
+ req.Header.Add("X-Vault-MFA", mfaHeaderVal)
+ }
+ }
+
+ if r.PolicyOverride {
+ req.Header.Set("X-Vault-Policy-Override", "true")
+ }
+
return req, nil
}
diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go
index 05502e1..053a277 100644
--- a/vendor/github.com/hashicorp/vault/api/response.go
+++ b/vendor/github.com/hashicorp/vault/api/response.go
@@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"io"
+ "io/ioutil"
"net/http"
"github.com/hashicorp/vault/helper/jsonutil"
@@ -33,11 +34,14 @@ func (r *Response) Error() error {
// We have an error. Let's copy the body into our own buffer first,
// so that if we can't decode JSON, we can at least copy it raw.
- var bodyBuf bytes.Buffer
- if _, err := io.Copy(&bodyBuf, r.Body); err != nil {
+ bodyBuf := &bytes.Buffer{}
+ if _, err := io.Copy(bodyBuf, r.Body); err != nil {
return err
}
+ r.Body.Close()
+ r.Body = ioutil.NopCloser(bodyBuf)
+
// Decode the error response if we can. Note that we wrap the bodyBuf
// in a bytes.Reader here so that the JSON decoder doesn't move the
// read pointer for the original buffer.
diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go
index 7478a0c..b6517c4 100644
--- a/vendor/github.com/hashicorp/vault/api/secret.go
+++ b/vendor/github.com/hashicorp/vault/api/secret.go
@@ -1,10 +1,13 @@
package api
import (
+ "fmt"
"io"
"time"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/jsonutil"
+ "github.com/hashicorp/vault/helper/parseutil"
)
// Secret is the structure returned for every secret within Vault.
@@ -35,11 +38,245 @@ type Secret struct {
WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"`
}
+// TokenID returns the standardized token ID (token) for the given secret.
+func (s *Secret) TokenID() (string, error) {
+ if s == nil {
+ return "", nil
+ }
+
+ if s.Auth != nil && len(s.Auth.ClientToken) > 0 {
+ return s.Auth.ClientToken, nil
+ }
+
+ if s.Data == nil || s.Data["id"] == nil {
+ return "", nil
+ }
+
+ id, ok := s.Data["id"].(string)
+ if !ok {
+ return "", fmt.Errorf("token found but in the wrong format")
+ }
+
+ return id, nil
+}
+
+// TokenAccessor returns the standardized token accessor for the given secret.
+// If the secret is nil or does not contain an accessor, this returns the empty
+// string.
+func (s *Secret) TokenAccessor() (string, error) {
+ if s == nil {
+ return "", nil
+ }
+
+ if s.Auth != nil && len(s.Auth.Accessor) > 0 {
+ return s.Auth.Accessor, nil
+ }
+
+ if s.Data == nil || s.Data["accessor"] == nil {
+ return "", nil
+ }
+
+ accessor, ok := s.Data["accessor"].(string)
+ if !ok {
+ return "", fmt.Errorf("token found but in the wrong format")
+ }
+
+ return accessor, nil
+}
+
+// TokenRemainingUses returns the standardized remaining uses for the given
+// secret. If the secret is nil or does not contain the "num_uses", this
+// returns -1. On error, this will return -1 and a non-nil error.
+func (s *Secret) TokenRemainingUses() (int, error) {
+ if s == nil || s.Data == nil || s.Data["num_uses"] == nil {
+ return -1, nil
+ }
+
+ uses, err := parseutil.ParseInt(s.Data["num_uses"])
+ if err != nil {
+ return 0, err
+ }
+
+ return int(uses), nil
+}
+
+// TokenPolicies returns the standardized list of policies for the given secret.
+// If the secret is nil or does not contain any policies, this returns nil. It
+// also populates the secret's Auth info with identity/token policy info.
+func (s *Secret) TokenPolicies() ([]string, error) {
+ if s == nil {
+ return nil, nil
+ }
+
+ if s.Auth != nil && len(s.Auth.Policies) > 0 {
+ return s.Auth.Policies, nil
+ }
+
+ if s.Data == nil || s.Data["policies"] == nil {
+ return nil, nil
+ }
+
+ var tokenPolicies []string
+
+ // Token policies
+ {
+ _, ok := s.Data["policies"]
+ if !ok {
+ goto TOKEN_DONE
+ }
+
+ sList, ok := s.Data["policies"].([]string)
+ if ok {
+ tokenPolicies = sList
+ goto TOKEN_DONE
+ }
+
+ list, ok := s.Data["policies"].([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to convert token policies to expected format")
+ }
+ for _, v := range list {
+ p, ok := v.(string)
+ if !ok {
+ return nil, fmt.Errorf("unable to convert policy %v to string", v)
+ }
+ tokenPolicies = append(tokenPolicies, p)
+ }
+ }
+
+TOKEN_DONE:
+ var identityPolicies []string
+
+ // Identity policies
+ {
+ _, ok := s.Data["identity_policies"]
+ if !ok {
+ goto DONE
+ }
+
+ sList, ok := s.Data["identity_policies"].([]string)
+ if ok {
+ identityPolicies = sList
+ goto DONE
+ }
+
+ list, ok := s.Data["identity_policies"].([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to convert identity policies to expected format")
+ }
+ for _, v := range list {
+ p, ok := v.(string)
+ if !ok {
+ return nil, fmt.Errorf("unable to convert policy %v to string", v)
+ }
+ identityPolicies = append(identityPolicies, p)
+ }
+ }
+
+DONE:
+
+ if s.Auth == nil {
+ s.Auth = &SecretAuth{}
+ }
+
+ policies := append(tokenPolicies, identityPolicies...)
+
+ s.Auth.TokenPolicies = tokenPolicies
+ s.Auth.IdentityPolicies = identityPolicies
+ s.Auth.Policies = policies
+
+ return policies, nil
+}
+
+// TokenMetadata returns the map of metadata associated with this token, if any
+// exists. If the secret is nil or does not contain the "metadata" key, this
+// returns nil.
+func (s *Secret) TokenMetadata() (map[string]string, error) {
+ if s == nil {
+ return nil, nil
+ }
+
+ if s.Auth != nil && len(s.Auth.Metadata) > 0 {
+ return s.Auth.Metadata, nil
+ }
+
+ if s.Data == nil || (s.Data["metadata"] == nil && s.Data["meta"] == nil) {
+ return nil, nil
+ }
+
+ data, ok := s.Data["metadata"].(map[string]interface{})
+ if !ok {
+ data, ok = s.Data["meta"].(map[string]interface{})
+ if !ok {
+ return nil, fmt.Errorf("unable to convert metadata field to expected format")
+ }
+ }
+
+ metadata := make(map[string]string, len(data))
+ for k, v := range data {
+ typed, ok := v.(string)
+ if !ok {
+ return nil, fmt.Errorf("unable to convert metadata value %v to string", v)
+ }
+ metadata[k] = typed
+ }
+
+ return metadata, nil
+}
+
+// TokenIsRenewable returns the standardized token renewability for the given
+// secret. If the secret is nil or does not contain the "renewable" key, this
+// returns false.
+func (s *Secret) TokenIsRenewable() (bool, error) {
+ if s == nil {
+ return false, nil
+ }
+
+ if s.Auth != nil && s.Auth.Renewable {
+ return s.Auth.Renewable, nil
+ }
+
+ if s.Data == nil || s.Data["renewable"] == nil {
+ return false, nil
+ }
+
+ renewable, err := parseutil.ParseBool(s.Data["renewable"])
+ if err != nil {
+ return false, errwrap.Wrapf("could not convert renewable value to a boolean: {{err}}", err)
+ }
+
+ return renewable, nil
+}
+
+// TokenTTL returns the standardized remaining token TTL for the given secret.
+// If the secret is nil or does not contain a TTL, this returns 0.
+func (s *Secret) TokenTTL() (time.Duration, error) {
+ if s == nil {
+ return 0, nil
+ }
+
+ if s.Auth != nil && s.Auth.LeaseDuration > 0 {
+ return time.Duration(s.Auth.LeaseDuration) * time.Second, nil
+ }
+
+ if s.Data == nil || s.Data["ttl"] == nil {
+ return 0, nil
+ }
+
+ ttl, err := parseutil.ParseDurationSecond(s.Data["ttl"])
+ if err != nil {
+ return 0, err
+ }
+
+ return ttl, nil
+}
+
// SecretWrapInfo contains wrapping information if we have it. If what is
// contained is an authentication token, the accessor for the token will be
// available in WrappedAccessor.
type SecretWrapInfo struct {
Token string `json:"token"`
+ Accessor string `json:"accessor"`
TTL int `json:"ttl"`
CreationTime time.Time `json:"creation_time"`
CreationPath string `json:"creation_path"`
@@ -48,10 +285,12 @@ type SecretWrapInfo struct {
// SecretAuth is the structure containing auth information if we have it.
type SecretAuth struct {
- ClientToken string `json:"client_token"`
- Accessor string `json:"accessor"`
- Policies []string `json:"policies"`
- Metadata map[string]string `json:"metadata"`
+ ClientToken string `json:"client_token"`
+ Accessor string `json:"accessor"`
+ Policies []string `json:"policies"`
+ TokenPolicies []string `json:"token_policies"`
+ IdentityPolicies []string `json:"identity_policies"`
+ Metadata map[string]string `json:"metadata"`
LeaseDuration int `json:"lease_duration"`
Renewable bool `json:"renewable"`
diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go
index 729fd99..032fb43 100644
--- a/vendor/github.com/hashicorp/vault/api/ssh_agent.go
+++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go
@@ -7,11 +7,13 @@ import (
"io/ioutil"
"os"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-rootcerts"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/vault/helper/hclutil"
"github.com/mitchellh/mapstructure"
)
@@ -41,16 +43,16 @@ type SSHHelper struct {
type SSHVerifyResponse struct {
// Usually empty. If the request OTP is echo request message, this will
// be set to the corresponding echo response message.
- Message string `json:"message" structs:"message" mapstructure:"message"`
+ Message string `json:"message" mapstructure:"message"`
// Username associated with the OTP
- Username string `json:"username" structs:"username" mapstructure:"username"`
+ Username string `json:"username" mapstructure:"username"`
// IP associated with the OTP
- IP string `json:"ip" structs:"ip" mapstructure:"ip"`
+ IP string `json:"ip" mapstructure:"ip"`
// Name of the role against which the OTP was issued
- RoleName string `json:"role_name" structs:"role_name" mapstructure:"role_name"`
+ RoleName string `json:"role_name" mapstructure:"role_name"`
}
// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file.
@@ -141,12 +143,12 @@ func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) {
func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
root, err := hcl.Parse(string(contents))
if err != nil {
- return nil, fmt.Errorf("ssh_helper: error parsing config: %s", err)
+ return nil, errwrap.Wrapf("error parsing config: {{err}}", err)
}
list, ok := root.Node.(*ast.ObjectList)
if !ok {
- return nil, fmt.Errorf("ssh_helper: error parsing config: file doesn't contain a root object")
+ return nil, fmt.Errorf("error parsing config: file doesn't contain a root object")
}
valid := []string{
@@ -159,7 +161,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
"tls_skip_verify",
"tls_server_name",
}
- if err := checkHCLKeys(list, valid); err != nil {
+ if err := hclutil.CheckHCLKeys(list, valid); err != nil {
return nil, multierror.Prefix(err, "ssh_helper:")
}
@@ -170,7 +172,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) {
}
if c.VaultAddr == "" {
- return nil, fmt.Errorf("ssh_helper: missing config 'vault_addr'")
+ return nil, fmt.Errorf(`missing config "vault_addr"`)
}
return &c, nil
}
@@ -227,31 +229,3 @@ func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) {
}
return &verifyResp, nil
}
-
-func checkHCLKeys(node ast.Node, valid []string) error {
- var list *ast.ObjectList
- switch n := node.(type) {
- case *ast.ObjectList:
- list = n
- case *ast.ObjectType:
- list = n.List
- default:
- return fmt.Errorf("cannot check HCL keys of type %T", n)
- }
-
- validMap := make(map[string]struct{}, len(valid))
- for _, v := range valid {
- validMap[v] = struct{}{}
- }
-
- var result error
- for _, item := range list.Items {
- key := item.Keys[0].Token.Value().(string)
- if _, ok := validMap[key]; !ok {
- result = multierror.Append(result, fmt.Errorf(
- "invalid key '%s' on line %d", key, item.Assign.Line))
- }
- }
-
- return result
-}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go
index 89f2141..05cd756 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_audit.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go
@@ -3,7 +3,6 @@ package api
import (
"fmt"
- "github.com/fatih/structs"
"github.com/mitchellh/mapstructure"
)
@@ -83,10 +82,8 @@ func (c *Sys) EnableAudit(
}
func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error {
- body := structs.Map(options)
-
r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path))
- if err := r.SetJSONBody(body); err != nil {
+ if err := r.SetJSONBody(options); err != nil {
return err
}
@@ -113,10 +110,10 @@ func (c *Sys) DisableAudit(path string) error {
// documentation. Please refer to that documentation for more details.
type EnableAuditOptions struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Options map[string]string `json:"options" structs:"options"`
- Local bool `json:"local" structs:"local"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Options map[string]string `json:"options"`
+ Local bool `json:"local"`
}
type Audit struct {
diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go
index 32f4bbd..0b1a319 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_auth.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go
@@ -3,7 +3,6 @@ package api
import (
"fmt"
- "github.com/fatih/structs"
"github.com/mitchellh/mapstructure"
)
@@ -52,10 +51,8 @@ func (c *Sys) EnableAuth(path, authType, desc string) error {
}
func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error {
- body := structs.Map(options)
-
r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path))
- if err := r.SetJSONBody(body); err != nil {
+ if err := r.SetJSONBody(options); err != nil {
return err
}
@@ -78,31 +75,45 @@ func (c *Sys) DisableAuth(path string) error {
}
// Structures for the requests/resposne are all down here. They aren't
-// individually documentd because the map almost directly to the raw HTTP API
+// individually documented because the map almost directly to the raw HTTP API
// documentation. Please refer to that documentation for more details.
type EnableAuthOptions struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Config AuthConfigInput `json:"config" structs:"config"`
- Local bool `json:"local" structs:"local"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Config AuthConfigInput `json:"config"`
+ Local bool `json:"local"`
+ PluginName string `json:"plugin_name,omitempty"`
+ SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"`
+ Options map[string]string `json:"options" mapstructure:"options"`
}
type AuthConfigInput struct {
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"`
}
type AuthMount struct {
- Type string `json:"type" structs:"type" mapstructure:"type"`
- Description string `json:"description" structs:"description" mapstructure:"description"`
- Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"`
- Config AuthConfigOutput `json:"config" structs:"config" mapstructure:"config"`
- Local bool `json:"local" structs:"local" mapstructure:"local"`
+ Type string `json:"type" mapstructure:"type"`
+ Description string `json:"description" mapstructure:"description"`
+ Accessor string `json:"accessor" mapstructure:"accessor"`
+ Config AuthConfigOutput `json:"config" mapstructure:"config"`
+ Local bool `json:"local" mapstructure:"local"`
+ SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"`
+ Options map[string]string `json:"options" mapstructure:"options"`
}
type AuthConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go
index 80f6218..cbb3a72 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go
@@ -34,8 +34,14 @@ func (c *Sys) Capabilities(token, path string) ([]string, error) {
return nil, err
}
+ if result["capabilities"] == nil {
+ return nil, nil
+ }
var capabilities []string
- capabilitiesRaw := result["capabilities"].([]interface{})
+ capabilitiesRaw, ok := result["capabilities"].([]interface{})
+ if !ok {
+ return nil, fmt.Errorf("error interpreting returned capabilities")
+ }
for _, capability := range capabilitiesRaw {
capabilities = append(capabilities, capability.(string))
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go
index 8dc2095..adb5496 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go
@@ -1,7 +1,15 @@
package api
func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) {
- r := c.c.NewRequest("GET", "/v1/sys/generate-root/attempt")
+ return c.generateRootStatusCommon("/v1/sys/generate-root/attempt")
+}
+
+func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, error) {
+ return c.generateRootStatusCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt")
+}
+
+func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse, error) {
+ r := c.c.NewRequest("GET", path)
resp, err := c.c.RawRequest(r)
if err != nil {
return nil, err
@@ -14,12 +22,20 @@ func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) {
}
func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) {
+ return c.generateRootInitCommon("/v1/sys/generate-root/attempt", otp, pgpKey)
+}
+
+func (c *Sys) GenerateDROperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) {
+ return c.generateRootInitCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey)
+}
+
+func (c *Sys) generateRootInitCommon(path, otp, pgpKey string) (*GenerateRootStatusResponse, error) {
body := map[string]interface{}{
"otp": otp,
"pgp_key": pgpKey,
}
- r := c.c.NewRequest("PUT", "/v1/sys/generate-root/attempt")
+ r := c.c.NewRequest("PUT", path)
if err := r.SetJSONBody(body); err != nil {
return nil, err
}
@@ -36,7 +52,15 @@ func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse,
}
func (c *Sys) GenerateRootCancel() error {
- r := c.c.NewRequest("DELETE", "/v1/sys/generate-root/attempt")
+ return c.generateRootCancelCommon("/v1/sys/generate-root/attempt")
+}
+
+func (c *Sys) GenerateDROperationTokenCancel() error {
+ return c.generateRootCancelCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt")
+}
+
+func (c *Sys) generateRootCancelCommon(path string) error {
+ r := c.c.NewRequest("DELETE", path)
resp, err := c.c.RawRequest(r)
if err == nil {
defer resp.Body.Close()
@@ -45,12 +69,20 @@ func (c *Sys) GenerateRootCancel() error {
}
func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) {
+ return c.generateRootUpdateCommon("/v1/sys/generate-root/update", shard, nonce)
+}
+
+func (c *Sys) GenerateDROperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) {
+ return c.generateRootUpdateCommon("/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce)
+}
+
+func (c *Sys) generateRootUpdateCommon(path, shard, nonce string) (*GenerateRootStatusResponse, error) {
body := map[string]interface{}{
"key": shard,
"nonce": nonce,
}
- r := c.c.NewRequest("PUT", "/v1/sys/generate-root/update")
+ r := c.c.NewRequest("PUT", path)
if err := r.SetJSONBody(body); err != nil {
return nil, err
}
@@ -67,11 +99,12 @@ func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusRespon
}
type GenerateRootStatusResponse struct {
- Nonce string
- Started bool
- Progress int
- Required int
- Complete bool
+ Nonce string `json:"nonce"`
+ Started bool `json:"started"`
+ Progress int `json:"progress"`
+ Required int `json:"required"`
+ Complete bool `json:"complete"`
+ EncodedToken string `json:"encoded_token"`
EncodedRootToken string `json:"encoded_root_token"`
PGPFingerprint string `json:"pgp_fingerprint"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go
index 822354c..82fd1f6 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_health.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_health.go
@@ -5,8 +5,10 @@ func (c *Sys) Health() (*HealthResponse, error) {
// If the code is 400 or above it will automatically turn into an error,
// but the sys/health API defaults to returning 5xx when not sealed or
// inited, so we force this code to be something else so we parse correctly
- r.Params.Add("sealedcode", "299")
r.Params.Add("uninitcode", "299")
+ r.Params.Add("sealedcode", "299")
+ r.Params.Add("standbycode", "299")
+ r.Params.Add("drsecondarycode", "299")
resp, err := c.c.RawRequest(r)
if err != nil {
return nil, err
@@ -19,11 +21,13 @@ func (c *Sys) Health() (*HealthResponse, error) {
}
type HealthResponse struct {
- Initialized bool `json:"initialized"`
- Sealed bool `json:"sealed"`
- Standby bool `json:"standby"`
- ServerTimeUTC int64 `json:"server_time_utc"`
- Version string `json:"version"`
- ClusterName string `json:"cluster_name,omitempty"`
- ClusterID string `json:"cluster_id,omitempty"`
+ Initialized bool `json:"initialized"`
+ Sealed bool `json:"sealed"`
+ Standby bool `json:"standby"`
+ ReplicationPerformanceMode string `json:"replication_performance_mode"`
+ ReplicationDRMode string `json:"replication_dr_mode"`
+ ServerTimeUTC int64 `json:"server_time_utc"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
index 091a8f6..8ac5b45 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
@@ -3,7 +3,6 @@ package api
import (
"fmt"
- "github.com/fatih/structs"
"github.com/mitchellh/mapstructure"
)
@@ -44,10 +43,8 @@ func (c *Sys) ListMounts() (map[string]*MountOutput, error) {
}
func (c *Sys) Mount(path string, mountInfo *MountInput) error {
- body := structs.Map(mountInfo)
-
r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path))
- if err := r.SetJSONBody(body); err != nil {
+ if err := r.SetJSONBody(mountInfo); err != nil {
return err
}
@@ -88,9 +85,8 @@ func (c *Sys) Remount(from, to string) error {
}
func (c *Sys) TuneMount(path string, config MountConfigInput) error {
- body := structs.Map(config)
r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path))
- if err := r.SetJSONBody(body); err != nil {
+ if err := r.SetJSONBody(config); err != nil {
return err
}
@@ -120,31 +116,44 @@ func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) {
}
type MountInput struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Config MountConfigInput `json:"config" structs:"config"`
- Local bool `json:"local" structs:"local"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Config MountConfigInput `json:"config"`
+ Options map[string]string `json:"options"`
+ Local bool `json:"local"`
+ PluginName string `json:"plugin_name,omitempty"`
+ SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"`
}
type MountConfigInput struct {
- DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ Options map[string]string `json:"options" mapstructure:"options"`
+ DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"`
}
type MountOutput struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Accessor string `json:"accessor" structs:"accessor"`
- Config MountConfigOutput `json:"config" structs:"config"`
- Local bool `json:"local" structs:"local"`
+ Type string `json:"type"`
+ Description string `json:"description"`
+ Accessor string `json:"accessor"`
+ Config MountConfigOutput `json:"config"`
+ Options map[string]string `json:"options"`
+ Local bool `json:"local"`
+ SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"`
}
type MountConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
- PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"`
+ AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"`
+ AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"`
+ ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"`
+ PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go
new file mode 100644
index 0000000..8183b10
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go
@@ -0,0 +1,117 @@
+package api
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// ListPluginsInput is used as input to the ListPlugins function.
+type ListPluginsInput struct{}
+
+// ListPluginsResponse is the response from the ListPlugins call.
+type ListPluginsResponse struct {
+ // Names is the list of names of the plugins.
+ Names []string
+}
+
+// ListPlugins lists all plugins in the catalog and returns their names as a
+// list of strings.
+func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) {
+ path := "/v1/sys/plugins/catalog"
+ req := c.c.NewRequest("LIST", path)
+ resp, err := c.c.RawRequest(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result struct {
+ Data struct {
+ Keys []string `json:"keys"`
+ } `json:"data"`
+ }
+ if err := resp.DecodeJSON(&result); err != nil {
+ return nil, err
+ }
+
+ return &ListPluginsResponse{Names: result.Data.Keys}, nil
+}
+
+// GetPluginInput is used as input to the GetPlugin function.
+type GetPluginInput struct {
+ Name string `json:"-"`
+}
+
+// GetPluginResponse is the response from the GetPlugin call.
+type GetPluginResponse struct {
+ Args []string `json:"args"`
+ Builtin bool `json:"builtin"`
+ Command string `json:"command"`
+ Name string `json:"name"`
+ SHA256 string `json:"sha256"`
+}
+
+func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) {
+ path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name)
+ req := c.c.NewRequest(http.MethodGet, path)
+ resp, err := c.c.RawRequest(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result GetPluginResponse
+ err = resp.DecodeJSON(&result)
+ if err != nil {
+ return nil, err
+ }
+ return &result, err
+}
+
+// RegisterPluginInput is used as input to the RegisterPlugin function.
+type RegisterPluginInput struct {
+ // Name is the name of the plugin. Required.
+ Name string `json:"-"`
+
+ // Args is the list of args to spawn the process with.
+ Args []string `json:"args,omitempty"`
+
+ // Command is the command to run.
+ Command string `json:"command,omitempty"`
+
+ // SHA256 is the shasum of the plugin.
+ SHA256 string `json:"sha256,omitempty"`
+}
+
+// RegisterPlugin registers the plugin with the given information.
+func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error {
+ path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name)
+ req := c.c.NewRequest(http.MethodPut, path)
+ if err := req.SetJSONBody(i); err != nil {
+ return err
+ }
+
+ resp, err := c.c.RawRequest(req)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+// DeregisterPluginInput is used as input to the DeregisterPlugin function.
+type DeregisterPluginInput struct {
+ // Name is the name of the plugin. Required.
+ Name string `json:"-"`
+}
+
+// DeregisterPlugin removes the plugin with the given name from the plugin
+// catalog.
+func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error {
+ path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name)
+ req := c.c.NewRequest(http.MethodDelete, path)
+ resp, err := c.c.RawRequest(req)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go
index ba0e17f..9c9d9c0 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_policy.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go
@@ -50,12 +50,14 @@ func (c *Sys) GetPolicy(name string) (string, error) {
return "", err
}
- var ok bool
- if _, ok = result["rules"]; !ok {
- return "", fmt.Errorf("rules not found in response")
+ if rulesRaw, ok := result["rules"]; ok {
+ return rulesRaw.(string), nil
+ }
+ if policyRaw, ok := result["policy"]; ok {
+ return policyRaw.(string), nil
}
- return result["rules"].(string), nil
+ return "", fmt.Errorf("no policy found in response")
}
func (c *Sys) PutPolicy(name, rules string) error {
diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go
index e6d039e..ddeac01 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_rekey.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go
@@ -26,6 +26,32 @@ func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) {
return &result, err
}
+func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/rekey/verify")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyVerificationStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/verify")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyVerificationStatusResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) {
r := c.c.NewRequest("PUT", "/v1/sys/rekey/init")
if err := r.SetJSONBody(config); err != nil {
@@ -78,6 +104,24 @@ func (c *Sys) RekeyRecoveryKeyCancel() error {
return err
}
+func (c *Sys) RekeyVerificationCancel() error {
+ r := c.c.NewRequest("DELETE", "/v1/sys/rekey/verify")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
+func (c *Sys) RekeyRecoveryKeyVerificationCancel() error {
+ r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/verify")
+ resp, err := c.c.RawRequest(r)
+ if err == nil {
+ defer resp.Body.Close()
+ }
+ return err
+}
+
func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) {
body := map[string]interface{}{
"key": shard,
@@ -168,35 +212,98 @@ func (c *Sys) RekeyDeleteRecoveryBackup() error {
return err
}
+func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) {
+ body := map[string]interface{}{
+ "key": shard,
+ "nonce": nonce,
+ }
+
+ r := c.c.NewRequest("PUT", "/v1/sys/rekey/verify")
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyVerificationUpdateResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) {
+ body := map[string]interface{}{
+ "key": shard,
+ "nonce": nonce,
+ }
+
+ r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/verify")
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result RekeyVerificationUpdateResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
type RekeyInitRequest struct {
- SecretShares int `json:"secret_shares"`
- SecretThreshold int `json:"secret_threshold"`
- PGPKeys []string `json:"pgp_keys"`
- Backup bool
+ SecretShares int `json:"secret_shares"`
+ SecretThreshold int `json:"secret_threshold"`
+ StoredShares int `json:"stored_shares"`
+ PGPKeys []string `json:"pgp_keys"`
+ Backup bool
+ RequireVerification bool `json:"require_verification"`
}
type RekeyStatusResponse struct {
- Nonce string
- Started bool
- T int
- N int
- Progress int
- Required int
- PGPFingerprints []string `json:"pgp_fingerprints"`
- Backup bool
+ Nonce string `json:"nonce"`
+ Started bool `json:"started"`
+ T int `json:"t"`
+ N int `json:"n"`
+ Progress int `json:"progress"`
+ Required int `json:"required"`
+ PGPFingerprints []string `json:"pgp_fingerprints"`
+ Backup bool `json:"backup"`
+ VerificationRequired bool `json:"verification_required"`
+ VerificationNonce string `json:"verification_nonce"`
}
type RekeyUpdateResponse struct {
- Nonce string
- Complete bool
- Keys []string
- KeysB64 []string `json:"keys_base64"`
- PGPFingerprints []string `json:"pgp_fingerprints"`
- Backup bool
+ Nonce string `json:"nonce"`
+ Complete bool `json:"complete"`
+ Keys []string `json:"keys"`
+ KeysB64 []string `json:"keys_base64"`
+ PGPFingerprints []string `json:"pgp_fingerprints"`
+ Backup bool `json:"backup"`
+ VerificationRequired bool `json:"verification_required"`
+ VerificationNonce string `json:"verification_nonce,omitempty"`
}
type RekeyRetrieveResponse struct {
- Nonce string
- Keys map[string][]string
+ Nonce string `json:"nonce"`
+ Keys map[string][]string `json:"keys"`
KeysB64 map[string][]string `json:"keys_base64"`
}
+
+type RekeyVerificationStatusResponse struct {
+ Nonce string `json:"nonce"`
+ Started bool `json:"started"`
+ T int `json:"t"`
+ N int `json:"n"`
+ Progress int `json:"progress"`
+}
+
+type RekeyVerificationUpdateResponse struct {
+ Nonce string `json:"nonce"`
+ Complete bool `json:"complete"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go
index 97a49ae..3d594ba 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_seal.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go
@@ -49,12 +49,14 @@ func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) {
}
type SealStatusResponse struct {
- Sealed bool `json:"sealed"`
- T int `json:"t"`
- N int `json:"n"`
- Progress int `json:"progress"`
- Nonce string `json:"nonce"`
- Version string `json:"version"`
- ClusterName string `json:"cluster_name,omitempty"`
- ClusterID string `json:"cluster_id,omitempty"`
+ Type string `json:"type"`
+ Sealed bool `json:"sealed"`
+ T int `json:"t"`
+ N int `json:"n"`
+ Progress int `json:"progress"`
+ Nonce string `json:"nonce"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
+ RecoverySeal bool `json:"recovery_seal"`
}
diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
index 31a2dcd..a7fb87b 100644
--- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
+++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
@@ -8,6 +8,7 @@ import (
"io"
"github.com/golang/snappy"
+ "github.com/hashicorp/errwrap"
)
const (
@@ -33,7 +34,7 @@ const (
)
// SnappyReadCloser embeds the snappy reader which implements the io.Reader
-// interface. The decompress procedure in this utility expectes an
+// interface. The decompress procedure in this utility expects an
// io.ReadCloser. This type implements the io.Closer interface to retain the
// generic way of decompression.
type SnappyReadCloser struct {
@@ -107,7 +108,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
}
if err != nil {
- return nil, fmt.Errorf("failed to create a compression writer; err: %v", err)
+ return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err)
}
if writer == nil {
@@ -117,7 +118,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
// Compress the input and place it in the same buffer containing the
// canary byte.
if _, err = writer.Write(data); err != nil {
- return nil, fmt.Errorf("failed to compress input data; err: %v", err)
+ return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err)
}
// Close the io.WriteCloser
@@ -172,7 +173,7 @@ func Decompress(data []byte) ([]byte, bool, error) {
return nil, true, nil
}
if err != nil {
- return nil, false, fmt.Errorf("failed to create a compression reader; err: %v", err)
+ return nil, false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err)
}
if reader == nil {
return nil, false, fmt.Errorf("failed to create a compression reader")
diff --git a/vendor/github.com/hashicorp/vault/helper/hclutil/hcl.go b/vendor/github.com/hashicorp/vault/helper/hclutil/hcl.go
new file mode 100644
index 0000000..0b12036
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/hclutil/hcl.go
@@ -0,0 +1,36 @@
+package hclutil
+
+import (
+ "fmt"
+
+ multierror "github.com/hashicorp/go-multierror"
+ "github.com/hashicorp/hcl/hcl/ast"
+)
+
+// CheckHCLKeys checks whether the keys in the AST list contains any of the valid keys provided.
+func CheckHCLKeys(node ast.Node, valid []string) error {
+ var list *ast.ObjectList
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ list = n
+ case *ast.ObjectType:
+ list = n.List
+ default:
+ return fmt.Errorf("cannot check HCL keys of type %T", n)
+ }
+
+ validMap := make(map[string]struct{}, len(valid))
+ for _, v := range valid {
+ validMap[v] = struct{}{}
+ }
+
+ var result error
+ for _, item := range list.Items {
+ key := item.Keys[0].Token.Value().(string)
+ if _, ok := validMap[key]; !ok {
+ result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line))
+ }
+ }
+
+ return result
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go b/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
index a96745b..d03ddef 100644
--- a/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
+++ b/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
+ "github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/compressutil"
)
@@ -64,7 +65,7 @@ func DecodeJSON(data []byte, out interface{}) error {
// Decompress the data if it was compressed in the first place
decompressedBytes, uncompressed, err := compressutil.Decompress(data)
if err != nil {
- return fmt.Errorf("failed to decompress JSON: err: %v", err)
+ return errwrap.Wrapf("failed to decompress JSON: {{err}}", err)
}
if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) {
return fmt.Errorf("decompressed data being decoded is invalid")
@@ -91,7 +92,7 @@ func DecodeJSONFromReader(r io.Reader, out interface{}) error {
dec := json.NewDecoder(r)
- // While decoding JSON values, intepret the integer values as `json.Number`s instead of `float64`.
+ // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`.
dec.UseNumber()
// Since 'out' is an interface representing a pointer, pass it to the decoder without an '&'
diff --git a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
index 957d533..ae8c58b 100644
--- a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
+++ b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
@@ -3,10 +3,14 @@ package parseutil
import (
"encoding/json"
"errors"
+ "fmt"
"strconv"
"strings"
"time"
+ "github.com/hashicorp/errwrap"
+ sockaddr "github.com/hashicorp/go-sockaddr"
+ "github.com/hashicorp/vault/helper/strutil"
"github.com/mitchellh/mapstructure"
)
@@ -56,6 +60,43 @@ func ParseDurationSecond(in interface{}) (time.Duration, error) {
return dur, nil
}
+func ParseInt(in interface{}) (int64, error) {
+ var ret int64
+ jsonIn, ok := in.(json.Number)
+ if ok {
+ in = jsonIn.String()
+ }
+ switch in.(type) {
+ case string:
+ inp := in.(string)
+ if inp == "" {
+ return 0, nil
+ }
+ var err error
+ left, err := strconv.ParseInt(inp, 10, 64)
+ if err != nil {
+ return ret, err
+ }
+ ret = left
+ case int:
+ ret = int64(in.(int))
+ case int32:
+ ret = int64(in.(int32))
+ case int64:
+ ret = in.(int64)
+ case uint:
+ ret = int64(in.(uint))
+ case uint32:
+ ret = int64(in.(uint32))
+ case uint64:
+ ret = int64(in.(uint64))
+ default:
+ return 0, errors.New("could not parse value from input")
+ }
+
+ return ret, nil
+}
+
func ParseBool(in interface{}) (bool, error) {
var result bool
if err := mapstructure.WeakDecode(in, &result); err != nil {
@@ -63,3 +104,60 @@ func ParseBool(in interface{}) (bool, error) {
}
return result, nil
}
+
+func ParseCommaStringSlice(in interface{}) ([]string, error) {
+ var result []string
+ config := &mapstructure.DecoderConfig{
+ Result: &result,
+ WeaklyTypedInput: true,
+ DecodeHook: mapstructure.StringToSliceHookFunc(","),
+ }
+ decoder, err := mapstructure.NewDecoder(config)
+ if err != nil {
+ return nil, err
+ }
+ if err := decoder.Decode(in); err != nil {
+ return nil, err
+ }
+ return strutil.TrimStrings(result), nil
+}
+
+func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) {
+ out := make([]*sockaddr.SockAddrMarshaler, 0)
+ stringAddrs := make([]string, 0)
+
+ switch addrs.(type) {
+ case string:
+ stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",")
+ if len(stringAddrs) == 0 {
+ return nil, fmt.Errorf("unable to parse addresses from %v", addrs)
+ }
+
+ case []string:
+ stringAddrs = addrs.([]string)
+
+ case []interface{}:
+ for _, v := range addrs.([]interface{}) {
+ stringAddr, ok := v.(string)
+ if !ok {
+ return nil, fmt.Errorf("error parsing %v as string", v)
+ }
+ stringAddrs = append(stringAddrs, stringAddr)
+ }
+
+ default:
+ return nil, fmt.Errorf("unknown address input type %T", addrs)
+ }
+
+ for _, addr := range stringAddrs {
+ sa, err := sockaddr.NewSockAddr(addr)
+ if err != nil {
+ return nil, errwrap.Wrapf(fmt.Sprintf("error parsing address %q: {{err}}", addr), err)
+ }
+ out = append(out, &sockaddr.SockAddrMarshaler{
+ SockAddr: sa,
+ })
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
new file mode 100644
index 0000000..a77e60d
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go
@@ -0,0 +1,327 @@
+package strutil
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/hashicorp/errwrap"
+ glob "github.com/ryanuber/go-glob"
+)
+
+// StrListContainsGlob looks for a string in a list of strings and allows
+// globs.
+func StrListContainsGlob(haystack []string, needle string) bool {
+ for _, item := range haystack {
+ if glob.Glob(item, needle) {
+ return true
+ }
+ }
+ return false
+}
+
+// StrListContains looks for a string in a list of strings.
+func StrListContains(haystack []string, needle string) bool {
+ for _, item := range haystack {
+ if item == needle {
+ return true
+ }
+ }
+ return false
+}
+
+// StrListSubset checks if a given list is a subset
+// of another set
+func StrListSubset(super, sub []string) bool {
+ for _, item := range sub {
+ if !StrListContains(super, item) {
+ return false
+ }
+ }
+ return true
+}
+
+// Parses a comma separated list of strings into a slice of strings.
+// The return slice will be sorted and will not contain duplicate or
+// empty items.
+func ParseDedupAndSortStrings(input string, sep string) []string {
+ input = strings.TrimSpace(input)
+ parsed := []string{}
+ if input == "" {
+ // Don't return nil
+ return parsed
+ }
+ return RemoveDuplicates(strings.Split(input, sep), false)
+}
+
+// Parses a comma separated list of strings into a slice of strings.
+// The return slice will be sorted and will not contain duplicate or
+// empty items. The values will be converted to lower case.
+func ParseDedupLowercaseAndSortStrings(input string, sep string) []string {
+ input = strings.TrimSpace(input)
+ parsed := []string{}
+ if input == "" {
+ // Don't return nil
+ return parsed
+ }
+ return RemoveDuplicates(strings.Split(input, sep), true)
+}
+
+// Parses a comma separated list of `<key>=<value>` tuples into a
+// map[string]string.
+func ParseKeyValues(input string, out map[string]string, sep string) error {
+ if out == nil {
+ return fmt.Errorf("'out is nil")
+ }
+
+ keyValues := ParseDedupLowercaseAndSortStrings(input, sep)
+ if len(keyValues) == 0 {
+ return nil
+ }
+
+ for _, keyValue := range keyValues {
+ shards := strings.Split(keyValue, "=")
+ if len(shards) != 2 {
+ return fmt.Errorf("invalid <key,value> format")
+ }
+
+ key := strings.TrimSpace(shards[0])
+ value := strings.TrimSpace(shards[1])
+ if key == "" || value == "" {
+ return fmt.Errorf("invalid <key,value> pair: key: %q value: %q", key, value)
+ }
+ out[key] = value
+ }
+ return nil
+}
+
+// Parses arbitrary <key,value> tuples. The input can be one of
+// the following:
+// * JSON string
+// * Base64 encoded JSON string
+// * Comma separated list of `<key>=<value>` pairs
+// * Base64 encoded string containing comma separated list of
+// `<key>=<value>` pairs
+//
+// Input will be parsed into the output parameter, which should
+// be a non-nil map[string]string.
+func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error {
+ input = strings.TrimSpace(input)
+ if input == "" {
+ return nil
+ }
+ if out == nil {
+ return fmt.Errorf("'out' is nil")
+ }
+
+ // Try to base64 decode the input. If successful, consider the decoded
+ // value as input.
+ inputBytes, err := base64.StdEncoding.DecodeString(input)
+ if err == nil {
+ input = string(inputBytes)
+ }
+
+ // Try to JSON unmarshal the input. If successful, consider that the
+ // metadata was supplied as JSON input.
+ err = json.Unmarshal([]byte(input), &out)
+ if err != nil {
+ // If JSON unmarshalling fails, consider that the input was
+ // supplied as a comma separated string of 'key=value' pairs.
+ if err = ParseKeyValues(input, out, sep); err != nil {
+ return errwrap.Wrapf("failed to parse the input: {{err}}", err)
+ }
+ }
+
+ // Validate the parsed input
+ for key, value := range out {
+ if key != "" && value == "" {
+ return fmt.Errorf("invalid value for key %q", key)
+ }
+ }
+
+ return nil
+}
+
+// Parses a `sep`-separated list of strings into a
+// []string.
+//
+// The output will always be a valid slice but may be of length zero.
+func ParseStringSlice(input string, sep string) []string {
+ input = strings.TrimSpace(input)
+ if input == "" {
+ return []string{}
+ }
+
+ splitStr := strings.Split(input, sep)
+ ret := make([]string, len(splitStr))
+ for i, val := range splitStr {
+ ret[i] = val
+ }
+
+ return ret
+}
+
+// Parses arbitrary string slice. The input can be one of
+// the following:
+// * JSON string
+// * Base64 encoded JSON string
+// * `sep` separated list of values
+// * Base64-encoded string containing a `sep` separated list of values
+//
+// Note that the separator is ignored if the input is found to already be in a
+// structured format (e.g., JSON)
+//
+// The output will always be a valid slice but may be of length zero.
+func ParseArbitraryStringSlice(input string, sep string) []string {
+ input = strings.TrimSpace(input)
+ if input == "" {
+ return []string{}
+ }
+
+ // Try to base64 decode the input. If successful, consider the decoded
+ // value as input.
+ inputBytes, err := base64.StdEncoding.DecodeString(input)
+ if err == nil {
+ input = string(inputBytes)
+ }
+
+ ret := []string{}
+
+ // Try to JSON unmarshal the input. If successful, consider that the
+ // metadata was supplied as JSON input.
+ err = json.Unmarshal([]byte(input), &ret)
+ if err != nil {
+ // If JSON unmarshalling fails, consider that the input was
+ // supplied as a separated string of values.
+ return ParseStringSlice(input, sep)
+ }
+
+ if ret == nil {
+ return []string{}
+ }
+
+ return ret
+}
+
+// TrimStrings takes a slice of strings and returns a slice of strings
+// with trimmed spaces
+func TrimStrings(items []string) []string {
+ ret := make([]string, len(items))
+ for i, item := range items {
+ ret[i] = strings.TrimSpace(item)
+ }
+ return ret
+}
+
+// Removes duplicate and empty elements from a slice of strings. This also may
+// convert the items in the slice to lower case and returns a sorted slice.
+func RemoveDuplicates(items []string, lowercase bool) []string {
+ itemsMap := map[string]bool{}
+ for _, item := range items {
+ item = strings.TrimSpace(item)
+ if lowercase {
+ item = strings.ToLower(item)
+ }
+ if item == "" {
+ continue
+ }
+ itemsMap[item] = true
+ }
+ items = make([]string, 0, len(itemsMap))
+ for item, _ := range itemsMap {
+ items = append(items, item)
+ }
+ sort.Strings(items)
+ return items
+}
+
+// EquivalentSlices checks whether the given string sets are equivalent, as in,
+// they contain the same values.
+func EquivalentSlices(a, b []string) bool {
+ if a == nil && b == nil {
+ return true
+ }
+
+ if a == nil || b == nil {
+ return false
+ }
+
+ // First we'll build maps to ensure unique values
+ mapA := map[string]bool{}
+ mapB := map[string]bool{}
+ for _, keyA := range a {
+ mapA[keyA] = true
+ }
+ for _, keyB := range b {
+ mapB[keyB] = true
+ }
+
+ // Now we'll build our checking slices
+ var sortedA, sortedB []string
+ for keyA, _ := range mapA {
+ sortedA = append(sortedA, keyA)
+ }
+ for keyB, _ := range mapB {
+ sortedB = append(sortedB, keyB)
+ }
+ sort.Strings(sortedA)
+ sort.Strings(sortedB)
+
+ // Finally, compare
+ if len(sortedA) != len(sortedB) {
+ return false
+ }
+
+ for i := range sortedA {
+ if sortedA[i] != sortedB[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// StrListDelete removes the first occurrence of the given item from the slice
+// of strings if the item exists.
+func StrListDelete(s []string, d string) []string {
+ if s == nil {
+ return s
+ }
+
+ for index, element := range s {
+ if element == d {
+ return append(s[:index], s[index+1:]...)
+ }
+ }
+
+ return s
+}
+
+func GlobbedStringsMatch(item, val string) bool {
+ if len(item) < 2 {
+ return val == item
+ }
+
+ hasPrefix := strings.HasPrefix(item, "*")
+ hasSuffix := strings.HasSuffix(item, "*")
+
+ if hasPrefix && hasSuffix {
+ return strings.Contains(val, item[1:len(item)-1])
+ } else if hasPrefix {
+ return strings.HasSuffix(val, item[1:])
+ } else if hasSuffix {
+ return strings.HasPrefix(val, item[:len(item)-1])
+ }
+
+ return val == item
+}
+
+// AppendIfMissing adds a string to a slice if the given string is not present
+func AppendIfMissing(slice []string, i string) []string {
+ if StrListContains(slice, i) {
+ return slice
+ }
+ return append(slice, i)
+}