aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNiall Sheridan <nsheridan@gmail.com>2017-01-20 00:52:56 +0000
committerNiall Sheridan <nsheridan@gmail.com>2017-01-22 22:25:35 +0000
commit51cc4c07b2a2b6345b1496baac865f5faf955e7d (patch)
treeedd51d045954eb802c470be4481a1d130d5f988c
parentfb4a1232be3b2d00483a7399e7131c211d8cd551 (diff)
Switch from database/sql to sqlx
-rw-r--r--db/seed.sql8
-rw-r--r--server/store/sqldb.go98
-rw-r--r--server/store/store.go15
-rw-r--r--server/store/store_test.go5
-rw-r--r--server/store/types/string_slice.go37
-rw-r--r--vendor/github.com/jmoiron/sqlx/LICENSE23
-rw-r--r--vendor/github.com/jmoiron/sqlx/README.md183
-rw-r--r--vendor/github.com/jmoiron/sqlx/bind.go186
-rw-r--r--vendor/github.com/jmoiron/sqlx/doc.go12
-rw-r--r--vendor/github.com/jmoiron/sqlx/named.go344
-rw-r--r--vendor/github.com/jmoiron/sqlx/reflectx/README.md17
-rw-r--r--vendor/github.com/jmoiron/sqlx/reflectx/reflect.go422
-rw-r--r--vendor/github.com/jmoiron/sqlx/sqlx.go1028
-rw-r--r--vendor/vendor.json12
14 files changed, 2304 insertions, 86 deletions
diff --git a/db/seed.sql b/db/seed.sql
index cf5e62a..7c438fd 100644
--- a/db/seed.sql
+++ b/db/seed.sql
@@ -4,10 +4,10 @@ USE `certs`;
CREATE TABLE `issued_certs` (
`key_id` varchar(255) NOT NULL,
- `principals` varchar(255) DEFAULT NULL,
- `created_at` datetime DEFAULT NULL,
- `expires_at` datetime DEFAULT NULL,
- `revoked` tinyint(1) DEFAULT NULL,
+ `principals` varchar(255) DEFAULT "[]",
+ `created_at` datetime DEFAULT '1970-01-01 00:00:01',
+ `expires_at` datetime DEFAULT '1970-01-01 00:00:01',
+ `revoked` tinyint(1) DEFAULT 0,
`raw_key` text,
PRIMARY KEY (`key_id`)
);
diff --git a/server/store/sqldb.go b/server/store/sqldb.go
index a51678e..2efca0e 100644
--- a/server/store/sqldb.go
+++ b/server/store/sqldb.go
@@ -1,8 +1,6 @@
package store
import (
- "database/sql"
- "encoding/json"
"fmt"
"net"
"time"
@@ -10,6 +8,7 @@ import (
"golang.org/x/crypto/ssh"
"github.com/go-sql-driver/mysql"
+ "github.com/jmoiron/sqlx"
"github.com/nsheridan/cashier/server/config"
)
@@ -17,14 +16,14 @@ var _ CertStorer = (*SQLStore)(nil)
// SQLStore is an sql-based CertStorer
type SQLStore struct {
- conn *sql.DB
-
- get *sql.Stmt
- set *sql.Stmt
- listAll *sql.Stmt
- listCurrent *sql.Stmt
- revoke *sql.Stmt
- revoked *sql.Stmt
+ conn *sqlx.DB
+
+ get *sqlx.Stmt
+ set *sqlx.Stmt
+ listAll *sqlx.Stmt
+ listCurrent *sqlx.Stmt
+ revoke *sqlx.Stmt
+ revoked *sqlx.Stmt
}
// NewSQLStore returns a *sql.DB CertStorer.
@@ -52,7 +51,7 @@ func NewSQLStore(c config.Database) (*SQLStore, error) {
driver = "sqlite3"
dsn = c["filename"]
}
- conn, err := sql.Open(driver, dsn)
+ conn, err := sqlx.Open(driver, dsn)
if err != nil {
return nil, fmt.Errorf("SQLStore: could not get a connection: %v", err)
}
@@ -65,22 +64,22 @@ func NewSQLStore(c config.Database) (*SQLStore, error) {
conn: conn,
}
- if db.set, err = conn.Prepare("INSERT INTO issued_certs (key_id, principals, created_at, expires_at, raw_key) VALUES (?, ?, ?, ?, ?)"); err != nil {
+ if db.set, err = conn.Preparex("INSERT INTO issued_certs (key_id, principals, created_at, expires_at, raw_key) VALUES (?, ?, ?, ?, ?)"); err != nil {
return nil, fmt.Errorf("SQLStore: prepare set: %v", err)
}
- if db.get, err = conn.Prepare("SELECT * FROM issued_certs WHERE key_id = ?"); err != nil {
+ if db.get, err = conn.Preparex("SELECT * FROM issued_certs WHERE key_id = ?"); err != nil {
return nil, fmt.Errorf("SQLStore: prepare get: %v", err)
}
- if db.listAll, err = conn.Prepare("SELECT * FROM issued_certs"); err != nil {
+ if db.listAll, err = conn.Preparex("SELECT * FROM issued_certs"); err != nil {
return nil, fmt.Errorf("SQLStore: prepare listAll: %v", err)
}
- if db.listCurrent, err = conn.Prepare("SELECT * FROM issued_certs WHERE ? <= expires_at"); err != nil {
+ if db.listCurrent, err = conn.Preparex("SELECT * FROM issued_certs WHERE ? <= expires_at"); err != nil {
return nil, fmt.Errorf("SQLStore: prepare listCurrent: %v", err)
}
- if db.revoke, err = conn.Prepare("UPDATE issued_certs SET revoked = 1 WHERE key_id = ?"); err != nil {
+ if db.revoke, err = conn.Preparex("UPDATE issued_certs SET revoked = 1 WHERE key_id = ?"); err != nil {
return nil, fmt.Errorf("SQLStore: prepare revoke: %v", err)
}
- if db.revoked, err = conn.Prepare("SELECT * FROM issued_certs WHERE revoked = 1 AND ? <= expires_at"); err != nil {
+ if db.revoked, err = conn.Preparex("SELECT * FROM issued_certs WHERE revoked = 1 AND ? <= expires_at"); err != nil {
return nil, fmt.Errorf("SQLStore: prepare revoked: %v", err)
}
return db, nil
@@ -91,38 +90,13 @@ type rowScanner interface {
Scan(dest ...interface{}) error
}
-func scanCert(s rowScanner) (*CertRecord, error) {
- var (
- keyID sql.NullString
- principals sql.NullString
- createdAt time.Time
- expires time.Time
- revoked sql.NullBool
- raw sql.NullString
- )
- if err := s.Scan(&keyID, &principals, &createdAt, &expires, &revoked, &raw); err != nil {
- return nil, err
- }
- var p []string
- if err := json.Unmarshal([]byte(principals.String), &p); err != nil {
- return nil, err
- }
- return &CertRecord{
- KeyID: keyID.String,
- Principals: p,
- CreatedAt: createdAt,
- Expires: expires,
- Revoked: revoked.Bool,
- Raw: raw.String,
- }, nil
-}
-
// Get a single *CertRecord
func (db *SQLStore) Get(id string) (*CertRecord, error) {
if err := db.conn.Ping(); err != nil {
return nil, err
}
- return scanCert(db.get.QueryRow(id))
+ r := &CertRecord{}
+ return r, db.get.Get(r, id)
}
// SetCert parses a *ssh.Certificate and records it
@@ -132,14 +106,10 @@ func (db *SQLStore) SetCert(cert *ssh.Certificate) error {
// SetRecord records a *CertRecord
func (db *SQLStore) SetRecord(rec *CertRecord) error {
- principals, err := json.Marshal(rec.Principals)
- if err != nil {
- return err
- }
if err := db.conn.Ping(); err != nil {
return err
}
- _, err = db.set.Exec(rec.KeyID, principals, rec.CreatedAt, rec.Expires, rec.Raw)
+ _, err := db.set.Exec(rec.KeyID, rec.Principals, rec.CreatedAt, rec.Expires, rec.Raw)
return err
}
@@ -149,20 +119,9 @@ func (db *SQLStore) List(includeExpired bool) ([]*CertRecord, error) {
if err := db.conn.Ping(); err != nil {
return nil, err
}
- var recs []*CertRecord
- var rows *sql.Rows
- if includeExpired {
- rows, _ = db.listAll.Query()
- } else {
- rows, _ = db.listCurrent.Query(time.Now().UTC())
- }
- defer rows.Close()
- for rows.Next() {
- cert, err := scanCert(rows)
- if err != nil {
- return nil, err
- }
- recs = append(recs, cert)
+ recs := []*CertRecord{}
+ if err := db.listAll.Select(&recs); err != nil {
+ return nil, err
}
return recs, nil
}
@@ -172,8 +131,7 @@ func (db *SQLStore) Revoke(id string) error {
if err := db.conn.Ping(); err != nil {
return err
}
- _, err := db.revoke.Exec(id)
- if err != nil {
+ if _, err := db.revoke.Exec(id); err != nil {
return err
}
return nil
@@ -185,14 +143,8 @@ func (db *SQLStore) GetRevoked() ([]*CertRecord, error) {
return nil, err
}
var recs []*CertRecord
- rows, _ := db.revoked.Query(time.Now().UTC())
- defer rows.Close()
- for rows.Next() {
- cert, err := scanCert(rows)
- if err != nil {
- return nil, err
- }
- recs = append(recs, cert)
+ if err := db.revoked.Select(&recs, time.Now().UTC()); err != nil {
+ return nil, err
}
return recs, nil
}
diff --git a/server/store/store.go b/server/store/store.go
index 8af77e3..249489a 100644
--- a/server/store/store.go
+++ b/server/store/store.go
@@ -7,6 +7,7 @@ import (
"github.com/nsheridan/cashier/lib"
"github.com/nsheridan/cashier/server/config"
+ "github.com/nsheridan/cashier/server/store/types"
)
// New returns a new configured database.
@@ -36,12 +37,12 @@ type CertStorer interface {
// A CertRecord is a representation of a ssh certificate used by a CertStorer.
type CertRecord struct {
- KeyID string `json:"key_id"`
- Principals []string `json:"principals"`
- CreatedAt time.Time `json:"created_at"`
- Expires time.Time `json:"expires"`
- Revoked bool `json:"revoked"`
- Raw string `json:"-"`
+ KeyID string `json:"key_id" db:"key_id"`
+ Principals types.StringSlice `json:"principals" db:"principals"`
+ CreatedAt time.Time `json:"created_at" db:"created_at"`
+ Expires time.Time `json:"expires" db:"expires_at"`
+ Revoked bool `json:"revoked" db:"revoked"`
+ Raw string `json:"-" db:"raw_key"`
}
func parseTime(t uint64) time.Time {
@@ -51,7 +52,7 @@ func parseTime(t uint64) time.Time {
func parseCertificate(cert *ssh.Certificate) *CertRecord {
return &CertRecord{
KeyID: cert.KeyId,
- Principals: cert.ValidPrincipals,
+ Principals: types.StringSlice(cert.ValidPrincipals),
CreatedAt: parseTime(cert.ValidAfter),
Expires: parseTime(cert.ValidBefore),
Raw: lib.GetPublicKey(cert),
diff --git a/server/store/store_test.go b/server/store/store_test.go
index afe6c03..4196c37 100644
--- a/server/store/store_test.go
+++ b/server/store/store_test.go
@@ -11,6 +11,7 @@ import (
"testing"
"time"
+ "github.com/nsheridan/cashier/server/store/types"
"github.com/nsheridan/cashier/testdata"
"github.com/stretchr/testify/assert"
@@ -25,7 +26,7 @@ func TestParseCertificate(t *testing.T) {
pub, _ := ssh.NewPublicKey(r.Public())
c := &ssh.Certificate{
KeyId: "id",
- ValidPrincipals: []string{"principal"},
+ ValidPrincipals: types.StringSlice{"principal"},
ValidBefore: now,
CertType: ssh.UserCert,
Key: pub,
@@ -35,7 +36,7 @@ func TestParseCertificate(t *testing.T) {
rec := parseCertificate(c)
a.Equal(c.KeyId, rec.KeyID)
- a.Equal(c.ValidPrincipals, rec.Principals)
+ a.Equal(c.ValidPrincipals, []string(rec.Principals))
a.Equal(c.ValidBefore, uint64(rec.Expires.Unix()))
a.Equal(c.ValidAfter, uint64(rec.CreatedAt.Unix()))
}
diff --git a/server/store/types/string_slice.go b/server/store/types/string_slice.go
new file mode 100644
index 0000000..81b38c3
--- /dev/null
+++ b/server/store/types/string_slice.go
@@ -0,0 +1,37 @@
+package types
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+)
+
+// StringSlice is a []string which will be stored in a database as a JSON array.
+type StringSlice []string
+
+var _ driver.Valuer = (*StringSlice)(nil)
+
+// Value implements the driver.Valuer interface, marshalling the raw value to
+// a JSON array.
+func (s StringSlice) Value() (driver.Value, error) {
+ v, err := json.Marshal(s)
+ if err != nil {
+ return nil, err
+ }
+ return string(v), err
+}
+
+// Scan implements the sql.Scanner interface, unmarshalling the value coming
+// off the wire and storing the result in the StringSlice.
+func (s *StringSlice) Scan(value interface{}) error {
+ if value == nil {
+ s = &StringSlice{}
+ return nil
+ }
+ var err error
+ if v, err := driver.String.ConvertValue(value); err == nil {
+ if v, ok := v.([]byte); ok {
+ err = json.Unmarshal(v, s)
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE
new file mode 100644
index 0000000..0d31edf
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/LICENSE
@@ -0,0 +1,23 @@
+ Copyright (c) 2013, Jason Moiron
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without
+ restriction, including without limitation the rights to use,
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following
+ conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md
new file mode 100644
index 0000000..e3956e8
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/README.md
@@ -0,0 +1,183 @@
+#sqlx
+
+[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
+
+sqlx is a library which provides a set of extensions on go's standard
+`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
+et al. all leave the underlying interfaces untouched, so that their interfaces
+are a superset on the standard ones. This makes it relatively painless to
+integrate existing codebases using database/sql with sqlx.
+
+Major additional concepts are:
+
+* Marshal rows into structs (with embedded struct support), maps, and slices
+* Named parameter support including prepared statements
+* `Get` and `Select` to go quickly from query to struct/slice
+
+In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx),
+there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that
+explains how to use `database/sql` along with sqlx.
+
+## Recent Changes
+
+* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions.
+
+This breaks backwards compatibility, but it's in a way that is trivially fixable
+(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in
+active development currently.
+
+* Using Go 1.6 and below with `types.JSONText` and `types.GzippedText` can be _potentially unsafe_, **especially** when used with common auto-scan sqlx idioms like `Select` and `Get`. See [golang bug #13905](https://github.com/golang/go/issues/13905).
+
+### Backwards Compatibility
+
+There is no Go1-like promise of absolute stability, but I take the issue seriously
+and will maintain the library in a compatible state unless vital bugs prevent me
+from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and
+[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior,
+a wider API cleanup was done at the time of fixing. It's possible this will happen
+in future; if it does, a git tag will be provided for users requiring the old
+behavior to continue to use it until such a time as they can migrate.
+
+## install
+
+ go get github.com/jmoiron/sqlx
+
+## issues
+
+Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
+`Columns()` does not fully qualify column names in queries like:
+
+```sql
+SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
+```
+
+making a struct or map destination ambiguous. Use `AS` in your queries
+to give columns distinct names, `rows.Scan` to scan them manually, or
+`SliceScan` to get a slice of results.
+
+## usage
+
+Below is an example which shows some common use cases for sqlx. Check
+[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
+usage.
+
+
+```go
+package main
+
+import (
+ _ "github.com/lib/pq"
+ "database/sql"
+ "github.com/jmoiron/sqlx"
+ "log"
+)
+
+var schema = `
+CREATE TABLE person (
+ first_name text,
+ last_name text,
+ email text
+);
+
+CREATE TABLE place (
+ country text,
+ city text NULL,
+ telcode integer
+)`
+
+type Person struct {
+ FirstName string `db:"first_name"`
+ LastName string `db:"last_name"`
+ Email string
+}
+
+type Place struct {
+ Country string
+ City sql.NullString
+ TelCode int
+}
+
+func main() {
+ // this Pings the database trying to connect, panics on error
+ // use sqlx.Open() for sql.Open() semantics
+ db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // exec the schema or fail; multi-statement Exec behavior varies between
+ // database drivers; pq will exec them all, sqlite3 won't, ymmv
+ db.MustExec(schema)
+
+ tx := db.MustBegin()
+ tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
+ tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
+ tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
+ tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
+ tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
+ // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
+ tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
+ tx.Commit()
+
+ // Query the database, storing results in a []Person (wrapped in []interface{})
+ people := []Person{}
+ db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
+ jason, john := people[0], people[1]
+
+ fmt.Printf("%#v\n%#v", jason, john)
+ // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
+ // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
+
+ // You can also get a single result, a la QueryRow
+ jason = Person{}
+ err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
+ fmt.Printf("%#v\n", jason)
+ // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
+
+ // if you have null fields and use SELECT *, you must use sql.Null* in your struct
+ places := []Place{}
+ err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+ usa, singsing, honkers := places[0], places[1], places[2]
+
+ fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
+ // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
+ // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
+ // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
+
+ // Loop through rows using only one struct
+ place := Place{}
+ rows, err := db.Queryx("SELECT * FROM place")
+ for rows.Next() {
+ err := rows.StructScan(&place)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ fmt.Printf("%#v\n", place)
+ }
+ // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
+ // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
+ // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
+
+ // Named queries, using `:name` as the bindvar. Automatic bindvar support
+ // which takes into account the dbtype based on the driverName on sqlx.Open/Connect
+ _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
+ map[string]interface{}{
+ "first": "Bin",
+ "last": "Smuth",
+ "email": "bensmith@allblacks.nz",
+ })
+
+ // Selects Mr. Smith from the database
+ rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
+
+ // Named queries can also use structs. Their bind names follow the same rules
+ // as the name -> db mapping, so struct fields are lowercased and the `db` tag
+ // is taken into consideration.
+ rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
+}
+```
+
diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go
new file mode 100644
index 0000000..53659bc
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/bind.go
@@ -0,0 +1,186 @@
+package sqlx
+
+import (
+ "bytes"
+ "errors"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/jmoiron/sqlx/reflectx"
+)
+
+// Bindvar types supported by Rebind, BindMap and BindStruct.
+const (
+ UNKNOWN = iota
+ QUESTION
+ DOLLAR
+ NAMED
+)
+
+// BindType returns the bindtype for a given database given a drivername.
+func BindType(driverName string) int {
+ switch driverName {
+ case "postgres", "pgx":
+ return DOLLAR
+ case "mysql":
+ return QUESTION
+ case "sqlite3":
+ return QUESTION
+ case "oci8", "ora", "goracle":
+ return NAMED
+ }
+ return UNKNOWN
+}
+
+// FIXME: this should be able to be tolerant of escaped ?'s in queries without
+// losing much speed, and should be to avoid confusion.
+
+// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
+func Rebind(bindType int, query string) string {
+ switch bindType {
+ case QUESTION, UNKNOWN:
+ return query
+ }
+
+ qb := []byte(query)
+ // Add space enough for 10 params before we have to allocate
+ rqb := make([]byte, 0, len(qb)+10)
+ j := 1
+ for _, b := range qb {
+ if b == '?' {
+ switch bindType {
+ case DOLLAR:
+ rqb = append(rqb, '$')
+ case NAMED:
+ rqb = append(rqb, ':', 'a', 'r', 'g')
+ }
+ for _, b := range strconv.Itoa(j) {
+ rqb = append(rqb, byte(b))
+ }
+ j++
+ } else {
+ rqb = append(rqb, b)
+ }
+ }
+ return string(rqb)
+}
+
+// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
+// much simpler and should be more resistant to odd unicode, but it is twice as
+// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
+// problems arise with its somewhat naive handling of unicode.
+func rebindBuff(bindType int, query string) string {
+ if bindType != DOLLAR {
+ return query
+ }
+
+ b := make([]byte, 0, len(query))
+ rqb := bytes.NewBuffer(b)
+ j := 1
+ for _, r := range query {
+ if r == '?' {
+ rqb.WriteRune('$')
+ rqb.WriteString(strconv.Itoa(j))
+ j++
+ } else {
+ rqb.WriteRune(r)
+ }
+ }
+
+ return rqb.String()
+}
+
+// In expands slice values in args, returning the modified query string
+// and a new arg list that can be executed by a database. The `query` should
+// use the `?` bindVar. The return value uses the `?` bindVar.
+func In(query string, args ...interface{}) (string, []interface{}, error) {
+ // argMeta stores reflect.Value and length for slices and
+ // the value itself for non-slice arguments
+ type argMeta struct {
+ v reflect.Value
+ i interface{}
+ length int
+ }
+
+ var flatArgsCount int
+ var anySlices bool
+
+ meta := make([]argMeta, len(args))
+
+ for i, arg := range args {
+ v := reflect.ValueOf(arg)
+ t := reflectx.Deref(v.Type())
+
+ if t.Kind() == reflect.Slice {
+ meta[i].length = v.Len()
+ meta[i].v = v
+
+ anySlices = true
+ flatArgsCount += meta[i].length
+
+ if meta[i].length == 0 {
+ return "", nil, errors.New("empty slice passed to 'in' query")
+ }
+ } else {
+ meta[i].i = arg
+ flatArgsCount++
+ }
+ }
+
+ // don't do any parsing if there aren't any slices; note that this means
+ // some errors that we might have caught below will not be returned.
+ if !anySlices {
+ return query, args, nil
+ }
+
+ newArgs := make([]interface{}, 0, flatArgsCount)
+
+ var arg, offset int
+ var buf bytes.Buffer
+
+ for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
+ if arg >= len(meta) {
+ // if an argument wasn't passed, lets return an error; this is
+ // not actually how database/sql Exec/Query works, but since we are
+ // creating an argument list programmatically, we want to be able
+ // to catch these programmer errors earlier.
+ return "", nil, errors.New("number of bindVars exceeds arguments")
+ }
+
+ argMeta := meta[arg]
+ arg++
+
+ // not a slice, continue.
+ // our questionmark will either be written before the next expansion
+ // of a slice or after the loop when writing the rest of the query
+ if argMeta.length == 0 {
+ offset = offset + i + 1
+ newArgs = append(newArgs, argMeta.i)
+ continue
+ }
+
+ // write everything up to and including our ? character
+ buf.WriteString(query[:offset+i+1])
+
+ newArgs = append(newArgs, argMeta.v.Index(0).Interface())
+
+ for si := 1; si < argMeta.length; si++ {
+ buf.WriteString(", ?")
+ newArgs = append(newArgs, argMeta.v.Index(si).Interface())
+ }
+
+ // slice the query and reset the offset. this avoids some bookkeeping for
+ // the write after the loop
+ query = query[offset+i+1:]
+ offset = 0
+ }
+
+ buf.WriteString(query)
+
+ if arg < len(meta) {
+ return "", nil, errors.New("number of bindVars less than number arguments")
+ }
+
+ return buf.String(), newArgs, nil
+}
diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go
new file mode 100644
index 0000000..e2b4e60
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/doc.go
@@ -0,0 +1,12 @@
+// Package sqlx provides general purpose extensions to database/sql.
+//
+// It is intended to seamlessly wrap database/sql and provide convenience
+// methods which are useful in the development of database driven applications.
+// None of the underlying database/sql methods are changed. Instead all extended
+// behavior is implemented through new methods defined on wrapper types.
+//
+// Additions include scanning into structs, named query support, rebinding
+// queries for different drivers, convenient shorthands for common error handling
+// and more.
+//
+package sqlx
diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go
new file mode 100644
index 0000000..dd899d3
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/named.go
@@ -0,0 +1,344 @@
+package sqlx
+
+// Named Query Support
+//
+// * BindMap - bind query bindvars to map/struct args
+// * NamedExec, NamedQuery - named query w/ struct or map
+// * NamedStmt - a pre-compiled named query which is a prepared statement
+//
+// Internal Interfaces:
+//
+// * compileNamedQuery - rebind a named query, returning a query and list of names
+// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
+//
+import (
+ "database/sql"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "unicode"
+
+ "github.com/jmoiron/sqlx/reflectx"
+)
+
+// NamedStmt is a prepared statement that executes named queries. Prepare it
+// how you would execute a NamedQuery, but pass in a struct or map when executing.
+type NamedStmt struct {
+ Params []string
+ QueryString string
+ Stmt *Stmt
+}
+
+// Close closes the named statement.
+func (n *NamedStmt) Close() error {
+ return n.Stmt.Close()
+}
+
+// Exec executes a named statement using the struct passed.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return *new(sql.Result), err
+ }
+ return n.Stmt.Exec(args...)
+}
+
+// Query executes a named statement using the struct argument, returning rows.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return nil, err
+ }
+ return n.Stmt.Query(args...)
+}
+
+// QueryRow executes a named statement against the database. Because sqlx cannot
+// create a *sql.Row with an error condition pre-set for binding errors, sqlx
+// returns a *sqlx.Row instead.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRow(arg interface{}) *Row {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return &Row{err: err}
+ }
+ return n.Stmt.QueryRowx(args...)
+}
+
+// MustExec execs a NamedStmt, panicing on error
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
+ res, err := n.Exec(arg)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// Queryx using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
+ r, err := n.Query(arg)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
+}
+
+// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
+// an alias for QueryRow.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
+ return n.QueryRow(arg)
+}
+
+// Select using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
+ rows, err := n.Queryx(arg)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// Get using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
+ r := n.QueryRowx(arg)
+ return r.scanAny(dest, false)
+}
+
+// Unsafe creates an unsafe version of the NamedStmt
+func (n *NamedStmt) Unsafe() *NamedStmt {
+ r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString}
+ r.Stmt.unsafe = true
+ return r
+}
+
+// A union interface of preparer and binder, required to be able to prepare
+// named statements (as the bindtype must be determined).
+type namedPreparer interface {
+ Preparer
+ binder
+}
+
+func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
+ bindType := BindType(p.DriverName())
+ q, args, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return nil, err
+ }
+ stmt, err := Preparex(p, q)
+ if err != nil {
+ return nil, err
+ }
+ return &NamedStmt{
+ QueryString: q,
+ Params: args,
+ Stmt: stmt,
+ }, nil
+}
+
+func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
+ if maparg, ok := arg.(map[string]interface{}); ok {
+ return bindMapArgs(names, maparg)
+ }
+ return bindArgs(names, arg, m)
+}
+
+// private interface to generate a list of interfaces from a given struct
+// type, given a list of names to pull out of the struct. Used by public
+// BindStruct interface.
+func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
+ arglist := make([]interface{}, 0, len(names))
+
+ // grab the indirected value of arg
+ v := reflect.ValueOf(arg)
+ for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
+ v = v.Elem()
+ }
+
+ fields := m.TraversalsByName(v.Type(), names)
+ for i, t := range fields {
+ if len(t) == 0 {
+ return arglist, fmt.Errorf("could not find name %s in %#v", names[i], arg)
+ }
+ val := reflectx.FieldByIndexesReadOnly(v, t)
+ arglist = append(arglist, val.Interface())
+ }
+
+ return arglist, nil
+}
+
+// like bindArgs, but for maps.
+func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
+ arglist := make([]interface{}, 0, len(names))
+
+ for _, name := range names {
+ val, ok := arg[name]
+ if !ok {
+ return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
+ }
+ arglist = append(arglist, val)
+ }
+ return arglist, nil
+}
+
+// bindStruct binds a named parameter query with fields from a struct argument.
+// The rules for binding field names to parameter names follow the same
+// conventions as for StructScan, including obeying the `db` struct tags.
+func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
+ bound, names, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+
+ arglist, err := bindArgs(names, arg, m)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+
+ return bound, arglist, nil
+}
+
+// bindMap binds a named parameter query with a map of arguments.
+func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
+ bound, names, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return "", []interface{}{}, err
+ }
+
+ arglist, err := bindMapArgs(names, args)
+ return bound, arglist, err
+}
+
+// -- Compilation of Named Queries
+
+// Allow digits and letters in bind params; additionally runes are
+// checked against underscores, meaning that bind params can have be
+// alphanumeric with underscores. Mind the difference between unicode
+// digits and numbers, where '5' is a digit but '五' is not.
+var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
+
+// FIXME: this function isn't safe for unicode named params, as a failing test
+// can testify. This is not a regression but a failure of the original code
+// as well. It should be modified to range over runes in a string rather than
+// bytes, even though this is less convenient and slower. Hopefully the
+// addition of the prepared NamedStmt (which will only do this once) will make
+// up for the slightly slower ad-hoc NamedExec/NamedQuery.
+
+// compile a NamedQuery into an unbound query (using the '?' bindvar) and
+// a list of names.
+func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
+ names = make([]string, 0, 10)
+ rebound := make([]byte, 0, len(qs))
+
+ inName := false
+ last := len(qs) - 1
+ currentVar := 1
+ name := make([]byte, 0, 10)
+
+ for i, b := range qs {
+ // a ':' while we're in a name is an error
+ if b == ':' {
+ // if this is the second ':' in a '::' escape sequence, append a ':'
+ if inName && i > 0 && qs[i-1] == ':' {
+ rebound = append(rebound, ':')
+ inName = false
+ continue
+ } else if inName {
+ err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
+ return query, names, err
+ }
+ inName = true
+ name = []byte{}
+ // if we're in a name, and this is an allowed character, continue
+ } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last {
+ // append the byte to the name if we are in a name and not on the last byte
+ name = append(name, b)
+ // if we're in a name and it's not an allowed character, the name is done
+ } else if inName {
+ inName = false
+ // if this is the final byte of the string and it is part of the name, then
+ // make sure to add it to the name
+ if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
+ name = append(name, b)
+ }
+ // add the string representation to the names list
+ names = append(names, string(name))
+ // add a proper bindvar for the bindType
+ switch bindType {
+ // oracle only supports named type bind vars even for positional
+ case NAMED:
+ rebound = append(rebound, ':')
+ rebound = append(rebound, name...)
+ case QUESTION, UNKNOWN:
+ rebound = append(rebound, '?')
+ case DOLLAR:
+ rebound = append(rebound, '$')
+ for _, b := range strconv.Itoa(currentVar) {
+ rebound = append(rebound, byte(b))
+ }
+ currentVar++
+ }
+ // add this byte to string unless it was not part of the name
+ if i != last {
+ rebound = append(rebound, b)
+ } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
+ rebound = append(rebound, b)
+ }
+ } else {
+ // this is a normal byte and should just go onto the rebound query
+ rebound = append(rebound, b)
+ }
+ }
+
+ return string(rebound), names, err
+}
+
+// BindNamed binds a struct or a map to a query with named parameters.
+// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.
+func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(bindType, query, arg, mapper())
+}
+
+// Named takes a query using named parameters and an argument and
+// returns a new query with a list of args that can be executed by
+// a database. The return value uses the `?` bindvar.
+func Named(query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(QUESTION, query, arg, mapper())
+}
+
+func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
+ if maparg, ok := arg.(map[string]interface{}); ok {
+ return bindMap(bindType, query, maparg)
+ }
+ return bindStruct(bindType, query, arg, m)
+}
+
+// NamedQuery binds a named query and then runs Query on the result using the
+// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
+// map[string]interface{} types.
+func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.Queryx(q, args...)
+}
+
+// NamedExec uses BindStruct to get a query executable by the driver and
+// then runs Exec on the result. Returns an error from the binding
+// or the query excution itself.
+func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.Exec(q, args...)
+}
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md
new file mode 100644
index 0000000..f01d3d1
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/reflectx/README.md
@@ -0,0 +1,17 @@
+# reflectx
+
+The sqlx package has special reflect needs. In particular, it needs to:
+
+* be able to map a name to a field
+* understand embedded structs
+* understand mapping names to fields by a particular tag
+* user specified name -> field mapping functions
+
+These behaviors mimic the behaviors by the standard library marshallers and also the
+behavior of standard Go accessors.
+
+The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
+addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
+tags in the ways that are vital to most marshallers, and they are slow.
+
+This reflectx package extends reflect to achieve these goals.
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
new file mode 100644
index 0000000..427ed2a
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
@@ -0,0 +1,422 @@
+// Package reflectx implements extensions to the standard reflect lib suitable
+// for implementing marshalling and unmarshalling packages. The main Mapper type
+// allows for Go-compatible named attribute access, including accessing embedded
+// struct attributes and the ability to use functions and struct tags to
+// customize field names.
+//
+package reflectx
+
+import (
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+// A FieldInfo is metadata for a struct field.
+type FieldInfo struct {
+ Index []int
+ Path string
+ Field reflect.StructField
+ Zero reflect.Value
+ Name string
+ Options map[string]string
+ Embedded bool
+ Children []*FieldInfo
+ Parent *FieldInfo
+}
+
+// A StructMap is an index of field metadata for a struct.
+type StructMap struct {
+ Tree *FieldInfo
+ Index []*FieldInfo
+ Paths map[string]*FieldInfo
+ Names map[string]*FieldInfo
+}
+
+// GetByPath returns a *FieldInfo for a given string path.
+func (f StructMap) GetByPath(path string) *FieldInfo {
+ return f.Paths[path]
+}
+
+// GetByTraversal returns a *FieldInfo for a given integer path. It is
+// analogous to reflect.FieldByIndex, but using the cached traversal
+// rather than re-executing the reflect machinery each time.
+func (f StructMap) GetByTraversal(index []int) *FieldInfo {
+ if len(index) == 0 {
+ return nil
+ }
+
+ tree := f.Tree
+ for _, i := range index {
+ if i >= len(tree.Children) || tree.Children[i] == nil {
+ return nil
+ }
+ tree = tree.Children[i]
+ }
+ return tree
+}
+
+// Mapper is a general purpose mapper of names to struct fields. A Mapper
+// behaves like most marshallers in the standard library, obeying a field tag
+// for name mapping but also providing a basic transform function.
+type Mapper struct {
+ cache map[reflect.Type]*StructMap
+ tagName string
+ tagMapFunc func(string) string
+ mapFunc func(string) string
+ mutex sync.Mutex
+}
+
+// NewMapper returns a new mapper using the tagName as its struct field tag.
+// If tagName is the empty string, it is ignored.
+func NewMapper(tagName string) *Mapper {
+ return &Mapper{
+ cache: make(map[reflect.Type]*StructMap),
+ tagName: tagName,
+ }
+}
+
+// NewMapperTagFunc returns a new mapper which contains a mapper for field names
+// AND a mapper for tag values. This is useful for tags like json which can
+// have values like "name,omitempty".
+func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper {
+ return &Mapper{
+ cache: make(map[reflect.Type]*StructMap),
+ tagName: tagName,
+ mapFunc: mapFunc,
+ tagMapFunc: tagMapFunc,
+ }
+}
+
+// NewMapperFunc returns a new mapper which optionally obeys a field tag and
+// a struct field name mapper func given by f. Tags will take precedence, but
+// for any other field, the mapped name will be f(field.Name)
+func NewMapperFunc(tagName string, f func(string) string) *Mapper {
+ return &Mapper{
+ cache: make(map[reflect.Type]*StructMap),
+ tagName: tagName,
+ mapFunc: f,
+ }
+}
+
+// TypeMap returns a mapping of field strings to int slices representing
+// the traversal down the struct to reach the field.
+func (m *Mapper) TypeMap(t reflect.Type) *StructMap {
+ m.mutex.Lock()
+ mapping, ok := m.cache[t]
+ if !ok {
+ mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc)
+ m.cache[t] = mapping
+ }
+ m.mutex.Unlock()
+ return mapping
+}
+
+// FieldMap returns the mapper's mapping of field names to reflect values. Panics
+// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
+func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
+ v = reflect.Indirect(v)
+ mustBe(v, reflect.Struct)
+
+ r := map[string]reflect.Value{}
+ tm := m.TypeMap(v.Type())
+ for tagName, fi := range tm.Names {
+ r[tagName] = FieldByIndexes(v, fi.Index)
+ }
+ return r
+}
+
+// FieldByName returns a field by the its mapped name as a reflect.Value.
+// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
+// Returns zero Value if the name is not found.
+func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
+ v = reflect.Indirect(v)
+ mustBe(v, reflect.Struct)
+
+ tm := m.TypeMap(v.Type())
+ fi, ok := tm.Names[name]
+ if !ok {
+ return v
+ }
+ return FieldByIndexes(v, fi.Index)
+}
+
+// FieldsByName returns a slice of values corresponding to the slice of names
+// for the value. Panics if v's Kind is not Struct or v is not Indirectable
+// to a struct Kind. Returns zero Value for each name not found.
+func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
+ v = reflect.Indirect(v)
+ mustBe(v, reflect.Struct)
+
+ tm := m.TypeMap(v.Type())
+ vals := make([]reflect.Value, 0, len(names))
+ for _, name := range names {
+ fi, ok := tm.Names[name]
+ if !ok {
+ vals = append(vals, *new(reflect.Value))
+ } else {
+ vals = append(vals, FieldByIndexes(v, fi.Index))
+ }
+ }
+ return vals
+}
+
+// TraversalsByName returns a slice of int slices which represent the struct
+// traversals for each mapped name. Panics if t is not a struct or Indirectable
+// to a struct. Returns empty int slice for each name not found.
+func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
+ t = Deref(t)
+ mustBe(t, reflect.Struct)
+ tm := m.TypeMap(t)
+
+ r := make([][]int, 0, len(names))
+ for _, name := range names {
+ fi, ok := tm.Names[name]
+ if !ok {
+ r = append(r, []int{})
+ } else {
+ r = append(r, fi.Index)
+ }
+ }
+ return r
+}
+
+// FieldByIndexes returns a value for the field given by the struct traversal
+// for the given value.
+func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
+ for _, i := range indexes {
+ v = reflect.Indirect(v).Field(i)
+ // if this is a pointer and it's nil, allocate a new value and set it
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ alloc := reflect.New(Deref(v.Type()))
+ v.Set(alloc)
+ }
+ if v.Kind() == reflect.Map && v.IsNil() {
+ v.Set(reflect.MakeMap(v.Type()))
+ }
+ }
+ return v
+}
+
+// FieldByIndexesReadOnly returns a value for a particular struct traversal,
+// but is not concerned with allocating nil pointers because the value is
+// going to be used for reading and not setting.
+func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
+ for _, i := range indexes {
+ v = reflect.Indirect(v).Field(i)
+ }
+ return v
+}
+
+// Deref is Indirect for reflect.Types
+func Deref(t reflect.Type) reflect.Type {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t
+}
+
+// -- helpers & utilities --
+
+type kinder interface {
+ Kind() reflect.Kind
+}
+
+// mustBe checks a value against a kind, panicing with a reflect.ValueError
+// if the kind isn't that which is required.
+func mustBe(v kinder, expected reflect.Kind) {
+ if k := v.Kind(); k != expected {
+ panic(&reflect.ValueError{Method: methodName(), Kind: k})
+ }
+}
+
+// methodName returns the caller of the function calling methodName
+func methodName() string {
+ pc, _, _, _ := runtime.Caller(2)
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return "unknown method"
+ }
+ return f.Name()
+}
+
+type typeQueue struct {
+ t reflect.Type
+ fi *FieldInfo
+ pp string // Parent path
+}
+
+// A copying append that creates a new slice each time.
+func apnd(is []int, i int) []int {
+ x := make([]int, len(is)+1)
+ for p, n := range is {
+ x[p] = n
+ }
+ x[len(x)-1] = i
+ return x
+}
+
+type mapf func(string) string
+
+// parseName parses the tag and the target name for the given field using
+// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the
+// field's name to a target name, and tagMapFunc for mapping the tag to
+// a target name.
+func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) {
+ // first, set the fieldName to the field's name
+ fieldName = field.Name
+ // if a mapFunc is set, use that to override the fieldName
+ if mapFunc != nil {
+ fieldName = mapFunc(fieldName)
+ }
+
+ // if there's no tag to look for, return the field name
+ if tagName == "" {
+ return "", fieldName
+ }
+
+ // if this tag is not set using the normal convention in the tag,
+ // then return the fieldname.. this check is done because according
+ // to the reflect documentation:
+ // If the tag does not have the conventional format,
+ // the value returned by Get is unspecified.
+ // which doesn't sound great.
+ if !strings.Contains(string(field.Tag), tagName+":") {
+ return "", fieldName
+ }
+
+ // at this point we're fairly sure that we have a tag, so lets pull it out
+ tag = field.Tag.Get(tagName)
+
+ // if we have a mapper function, call it on the whole tag
+ // XXX: this is a change from the old version, which pulled out the name
+ // before the tagMapFunc could be run, but I think this is the right way
+ if tagMapFunc != nil {
+ tag = tagMapFunc(tag)
+ }
+
+ // finally, split the options from the name
+ parts := strings.Split(tag, ",")
+ fieldName = parts[0]
+
+ return tag, fieldName
+}
+
+// parseOptions parses options out of a tag string, skipping the name
+func parseOptions(tag string) map[string]string {
+ parts := strings.Split(tag, ",")
+ options := make(map[string]string, len(parts))
+ if len(parts) > 1 {
+ for _, opt := range parts[1:] {
+ // short circuit potentially expensive split op
+ if strings.Contains(opt, "=") {
+ kv := strings.Split(opt, "=")
+ options[kv[0]] = kv[1]
+ continue
+ }
+ options[opt] = ""
+ }
+ }
+ return options
+}
+
+// getMapping returns a mapping for the t type, using the tagName, mapFunc and
+// tagMapFunc to determine the canonical names of fields.
+func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap {
+ m := []*FieldInfo{}
+
+ root := &FieldInfo{}
+ queue := []typeQueue{}
+ queue = append(queue, typeQueue{Deref(t), root, ""})
+
+QueueLoop:
+ for len(queue) != 0 {
+ // pop the first item off of the queue
+ tq := queue[0]
+ queue = queue[1:]
+
+ // ignore recursive field
+ for p := tq.fi.Parent; p != nil; p = p.Parent {
+ if tq.fi.Field.Type == p.Field.Type {
+ continue QueueLoop
+ }
+ }
+
+ nChildren := 0
+ if tq.t.Kind() == reflect.Struct {
+ nChildren = tq.t.NumField()
+ }
+ tq.fi.Children = make([]*FieldInfo, nChildren)
+
+ // iterate through all of its fields
+ for fieldPos := 0; fieldPos < nChildren; fieldPos++ {
+
+ f := tq.t.Field(fieldPos)
+
+ // parse the tag and the target name using the mapping options for this field
+ tag, name := parseName(f, tagName, mapFunc, tagMapFunc)
+
+ // if the name is "-", disabled via a tag, skip it
+ if name == "-" {
+ continue
+ }
+
+ fi := FieldInfo{
+ Field: f,
+ Name: name,
+ Zero: reflect.New(f.Type).Elem(),
+ Options: parseOptions(tag),
+ }
+
+ // if the path is empty this path is just the name
+ if tq.pp == "" {
+ fi.Path = fi.Name
+ } else {
+ fi.Path = tq.pp + "." + fi.Name
+ }
+
+ // skip unexported fields
+ if len(f.PkgPath) != 0 && !f.Anonymous {
+ continue
+ }
+
+ // bfs search of anonymous embedded structs
+ if f.Anonymous {
+ pp := tq.pp
+ if tag != "" {
+ pp = fi.Path
+ }
+
+ fi.Embedded = true
+ fi.Index = apnd(tq.fi.Index, fieldPos)
+ nChildren := 0
+ ft := Deref(f.Type)
+ if ft.Kind() == reflect.Struct {
+ nChildren = ft.NumField()
+ }
+ fi.Children = make([]*FieldInfo, nChildren)
+ queue = append(queue, typeQueue{Deref(f.Type), &fi, pp})
+ } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) {
+ fi.Index = apnd(tq.fi.Index, fieldPos)
+ fi.Children = make([]*FieldInfo, Deref(f.Type).NumField())
+ queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path})
+ }
+
+ fi.Index = apnd(tq.fi.Index, fieldPos)
+ fi.Parent = tq.fi
+ tq.fi.Children[fieldPos] = &fi
+ m = append(m, &fi)
+ }
+ }
+
+ flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}}
+ for _, fi := range flds.Index {
+ flds.Paths[fi.Path] = fi
+ if fi.Name != "" && !fi.Embedded {
+ flds.Names[fi.Path] = fi
+ }
+ }
+
+ return flds
+}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go
new file mode 100644
index 0000000..c7a0bf3
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/sqlx.go
@@ -0,0 +1,1028 @@
+package sqlx
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "strings"
+
+ "github.com/jmoiron/sqlx/reflectx"
+)
+
+// Although the NameMapper is convenient, in practice it should not
+// be relied on except for application code. If you are writing a library
+// that uses sqlx, you should be aware that the name mappings you expect
+// can be overridden by your user's application.
+
+// NameMapper is used to map column names to struct field names. By default,
+// it uses strings.ToLower to lowercase struct field names. It can be set
+// to whatever you want, but it is encouraged to be set before sqlx is used
+// as name-to-field mappings are cached after first use on a type.
+var NameMapper = strings.ToLower
+var origMapper = reflect.ValueOf(NameMapper)
+
+// Rather than creating on init, this is created when necessary so that
+// importers have time to customize the NameMapper.
+var mpr *reflectx.Mapper
+
+// mapper returns a valid mapper using the configured NameMapper func.
+func mapper() *reflectx.Mapper {
+ if mpr == nil {
+ mpr = reflectx.NewMapperFunc("db", NameMapper)
+ } else if origMapper != reflect.ValueOf(NameMapper) {
+ // if NameMapper has changed, create a new mapper
+ mpr = reflectx.NewMapperFunc("db", NameMapper)
+ origMapper = reflect.ValueOf(NameMapper)
+ }
+ return mpr
+}
+
+// isScannable takes the reflect.Type and the actual dest value and returns
+// whether or not it's Scannable. Something is scannable if:
+// * it is not a struct
+// * it implements sql.Scanner
+// * it has no exported fields
+func isScannable(t reflect.Type) bool {
+ if reflect.PtrTo(t).Implements(_scannerInterface) {
+ return true
+ }
+ if t.Kind() != reflect.Struct {
+ return true
+ }
+
+ // it's not important that we use the right mapper for this particular object,
+ // we're only concerned on how many exported fields this struct has
+ m := mapper()
+ if len(m.TypeMap(t).Index) == 0 {
+ return true
+ }
+ return false
+}
+
+// ColScanner is an interface used by MapScan and SliceScan
+type ColScanner interface {
+ Columns() ([]string, error)
+ Scan(dest ...interface{}) error
+ Err() error
+}
+
+// Queryer is an interface used by Get and Select
+type Queryer interface {
+ Query(query string, args ...interface{}) (*sql.Rows, error)
+ Queryx(query string, args ...interface{}) (*Rows, error)
+ QueryRowx(query string, args ...interface{}) *Row
+}
+
+// Execer is an interface used by MustExec and LoadFile
+type Execer interface {
+ Exec(query string, args ...interface{}) (sql.Result, error)
+}
+
+// Binder is an interface for something which can bind queries (Tx, DB)
+type binder interface {
+ DriverName() string
+ Rebind(string) string
+ BindNamed(string, interface{}) (string, []interface{}, error)
+}
+
+// Ext is a union interface which can bind, query, and exec, used by
+// NamedQuery and NamedExec.
+type Ext interface {
+ binder
+ Queryer
+ Execer
+}
+
+// Preparer is an interface used by Preparex.
+type Preparer interface {
+ Prepare(query string) (*sql.Stmt, error)
+}
+
+// determine if any of our extensions are unsafe
+func isUnsafe(i interface{}) bool {
+ switch v := i.(type) {
+ case Row:
+ return v.unsafe
+ case *Row:
+ return v.unsafe
+ case Rows:
+ return v.unsafe
+ case *Rows:
+ return v.unsafe
+ case NamedStmt:
+ return v.Stmt.unsafe
+ case *NamedStmt:
+ return v.Stmt.unsafe
+ case Stmt:
+ return v.unsafe
+ case *Stmt:
+ return v.unsafe
+ case qStmt:
+ return v.unsafe
+ case *qStmt:
+ return v.unsafe
+ case DB:
+ return v.unsafe
+ case *DB:
+ return v.unsafe
+ case Tx:
+ return v.unsafe
+ case *Tx:
+ return v.unsafe
+ case sql.Rows, *sql.Rows:
+ return false
+ default:
+ return false
+ }
+}
+
+func mapperFor(i interface{}) *reflectx.Mapper {
+ switch i.(type) {
+ case DB:
+ return i.(DB).Mapper
+ case *DB:
+ return i.(*DB).Mapper
+ case Tx:
+ return i.(Tx).Mapper
+ case *Tx:
+ return i.(*Tx).Mapper
+ default:
+ return mapper()
+ }
+}
+
+var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
+var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// Row is a reimplementation of sql.Row in order to gain access to the underlying
+// sql.Rows.Columns() data, necessary for StructScan.
+type Row struct {
+ err error
+ unsafe bool
+ rows *sql.Rows
+ Mapper *reflectx.Mapper
+}
+
+// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
+// underlying error from the internal rows object if it exists.
+func (r *Row) Scan(dest ...interface{}) error {
+ if r.err != nil {
+ return r.err
+ }
+
+ // TODO(bradfitz): for now we need to defensively clone all
+ // []byte that the driver returned (not permitting
+ // *RawBytes in Rows.Scan), since we're about to close
+ // the Rows in our defer, when we return from this function.
+ // the contract with the driver.Next(...) interface is that it
+ // can return slices into read-only temporary memory that's
+ // only valid until the next Scan/Close. But the TODO is that
+ // for a lot of drivers, this copy will be unnecessary. We
+ // should provide an optional interface for drivers to
+ // implement to say, "don't worry, the []bytes that I return
+ // from Next will not be modified again." (for instance, if
+ // they were obtained from the network anyway) But for now we
+ // don't care.
+ defer r.rows.Close()
+ for _, dp := range dest {
+ if _, ok := dp.(*sql.RawBytes); ok {
+ return errors.New("sql: RawBytes isn't allowed on Row.Scan")
+ }
+ }
+
+ if !r.rows.Next() {
+ if err := r.rows.Err(); err != nil {
+ return err
+ }
+ return sql.ErrNoRows
+ }
+ err := r.rows.Scan(dest...)
+ if err != nil {
+ return err
+ }
+ // Make sure the query can be processed to completion with no errors.
+ if err := r.rows.Close(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
+// returned by Row.Scan()
+func (r *Row) Columns() ([]string, error) {
+ if r.err != nil {
+ return []string{}, r.err
+ }
+ return r.rows.Columns()
+}
+
+// Err returns the error encountered while scanning.
+func (r *Row) Err() error {
+ return r.err
+}
+
+// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
+// used mostly to automatically bind named queries using the right bindvars.
+type DB struct {
+ *sql.DB
+ driverName string
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
+// driverName of the original database is required for named query support.
+func NewDb(db *sql.DB, driverName string) *DB {
+ return &DB{DB: db, driverName: driverName, Mapper: mapper()}
+}
+
+// DriverName returns the driverName passed to the Open function for this DB.
+func (db *DB) DriverName() string {
+ return db.driverName
+}
+
+// Open is the same as sql.Open, but returns an *sqlx.DB instead.
+func Open(driverName, dataSourceName string) (*DB, error) {
+ db, err := sql.Open(driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
+}
+
+// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
+func MustOpen(driverName, dataSourceName string) *DB {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// MapperFunc sets a new mapper for this db using the default sqlx struct tag
+// and the provided mapper function.
+func (db *DB) MapperFunc(mf func(string) string) {
+ db.Mapper = reflectx.NewMapperFunc("db", mf)
+}
+
+// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
+func (db *DB) Rebind(query string) string {
+ return Rebind(BindType(db.driverName), query)
+}
+
+// Unsafe returns a version of DB which will silently succeed to scan when
+// columns in the SQL result have no fields in the destination struct.
+// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
+// safety behavior.
+func (db *DB) Unsafe() *DB {
+ return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
+}
+
+// BindNamed binds a query using the DB driver's bindvar type.
+func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper)
+}
+
+// NamedQuery using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
+ return NamedQuery(db, query, arg)
+}
+
+// NamedExec using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
+ return NamedExec(db, query, arg)
+}
+
+// Select using this DB.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
+ return Select(db, dest, query, args...)
+}
+
+// Get using this DB.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
+ return Get(db, dest, query, args...)
+}
+
+// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
+// of an *sql.Tx.
+func (db *DB) MustBegin() *Tx {
+ tx, err := db.Beginx()
+ if err != nil {
+ panic(err)
+ }
+ return tx
+}
+
+// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
+func (db *DB) Beginx() (*Tx, error) {
+ tx, err := db.DB.Begin()
+ if err != nil {
+ return nil, err
+ }
+ return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// Queryx queries the database and returns an *sqlx.Rows.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
+ r, err := db.DB.Query(query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// QueryRowx queries the database and returns an *sqlx.Row.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
+ rows, err := db.DB.Query(query, args...)
+ return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
+}
+
+// MustExec (panic) runs MustExec using this database.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
+ return MustExec(db, query, args...)
+}
+
+// Preparex returns an sqlx.Stmt instead of a sql.Stmt
+func (db *DB) Preparex(query string) (*Stmt, error) {
+ return Preparex(db, query)
+}
+
+// PrepareNamed returns an sqlx.NamedStmt
+func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
+ return prepareNamed(db, query)
+}
+
+// Tx is an sqlx wrapper around sql.Tx with extra functionality
+type Tx struct {
+ *sql.Tx
+ driverName string
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// DriverName returns the driverName used by the DB which began this transaction.
+func (tx *Tx) DriverName() string {
+ return tx.driverName
+}
+
+// Rebind a query within a transaction's bindvar type.
+func (tx *Tx) Rebind(query string) string {
+ return Rebind(BindType(tx.driverName), query)
+}
+
+// Unsafe returns a version of Tx which will silently succeed to scan when
+// columns in the SQL result have no fields in the destination struct.
+func (tx *Tx) Unsafe() *Tx {
+ return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
+}
+
+// BindNamed binds a query within a transaction's bindvar type.
+func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
+ return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper)
+}
+
+// NamedQuery within a transaction.
+// Any named placeholder parameters are replaced with fields from arg.
+func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
+ return NamedQuery(tx, query, arg)
+}
+
+// NamedExec a named query within a transaction.
+// Any named placeholder parameters are replaced with fields from arg.
+func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
+ return NamedExec(tx, query, arg)
+}
+
+// Select within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
+ return Select(tx, dest, query, args...)
+}
+
+// Queryx within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
+ r, err := tx.Tx.Query(query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
+}
+
+// QueryRowx within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
+ rows, err := tx.Tx.Query(query, args...)
+ return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
+}
+
+// Get within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
+ return Get(tx, dest, query, args...)
+}
+
+// MustExec runs MustExec within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
+ return MustExec(tx, query, args...)
+}
+
+// Preparex a statement within a transaction.
+func (tx *Tx) Preparex(query string) (*Stmt, error) {
+ return Preparex(tx, query)
+}
+
+// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
+// stmt can be either *sql.Stmt or *sqlx.Stmt.
+func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
+ var s *sql.Stmt
+ switch v := stmt.(type) {
+ case Stmt:
+ s = v.Stmt
+ case *Stmt:
+ s = v.Stmt
+ case sql.Stmt:
+ s = &v
+ case *sql.Stmt:
+ s = v
+ default:
+ panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
+ }
+ return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
+}
+
+// NamedStmt returns a version of the prepared statement which runs within a transaction.
+func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
+ return &NamedStmt{
+ QueryString: stmt.QueryString,
+ Params: stmt.Params,
+ Stmt: tx.Stmtx(stmt.Stmt),
+ }
+}
+
+// PrepareNamed returns an sqlx.NamedStmt
+func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
+ return prepareNamed(tx, query)
+}
+
+// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
+type Stmt struct {
+ *sql.Stmt
+ unsafe bool
+ Mapper *reflectx.Mapper
+}
+
+// Unsafe returns a version of Stmt which will silently succeed to scan when
+// columns in the SQL result have no fields in the destination struct.
+func (s *Stmt) Unsafe() *Stmt {
+ return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
+}
+
+// Select using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
+ return Select(&qStmt{s}, dest, "", args...)
+}
+
+// Get using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
+ return Get(&qStmt{s}, dest, "", args...)
+}
+
+// MustExec (panic) using this statement. Note that the query portion of the error
+// output will be blank, as Stmt does not expose its query.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) MustExec(args ...interface{}) sql.Result {
+ return MustExec(&qStmt{s}, "", args...)
+}
+
+// QueryRowx using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) QueryRowx(args ...interface{}) *Row {
+ qs := &qStmt{s}
+ return qs.QueryRowx("", args...)
+}
+
+// Queryx using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
+ qs := &qStmt{s}
+ return qs.Queryx("", args...)
+}
+
+// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
+// implementing those interfaces and ignoring the `query` argument.
+type qStmt struct{ *Stmt }
+
+func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
+ return q.Stmt.Query(args...)
+}
+
+func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
+ r, err := q.Stmt.Query(args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
+}
+
+func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
+ rows, err := q.Stmt.Query(args...)
+ return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
+}
+
+func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
+ return q.Stmt.Exec(args...)
+}
+
+// Rows is a wrapper around sql.Rows which caches costly reflect operations
+// during a looped StructScan
+type Rows struct {
+ *sql.Rows
+ unsafe bool
+ Mapper *reflectx.Mapper
+ // these fields cache memory use for a rows during iteration w/ structScan
+ started bool
+ fields [][]int
+ values []interface{}
+}
+
+// SliceScan using this Rows.
+func (r *Rows) SliceScan() ([]interface{}, error) {
+ return SliceScan(r)
+}
+
+// MapScan using this Rows.
+func (r *Rows) MapScan(dest map[string]interface{}) error {
+ return MapScan(r, dest)
+}
+
+// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
+// Use this and iterate over Rows manually when the memory load of Select() might be
+// prohibitive. *Rows.StructScan caches the reflect work of matching up column
+// positions to fields to avoid that overhead per scan, which means it is not safe
+// to run StructScan on the same Rows instance with different struct types.
+func (r *Rows) StructScan(dest interface{}) error {
+ v := reflect.ValueOf(dest)
+
+ if v.Kind() != reflect.Ptr {
+ return errors.New("must pass a pointer, not a value, to StructScan destination")
+ }
+
+ v = reflect.Indirect(v)
+
+ if !r.started {
+ columns, err := r.Columns()
+ if err != nil {
+ return err
+ }
+ m := r.Mapper
+
+ r.fields = m.TraversalsByName(v.Type(), columns)
+ // if we are not unsafe and are missing fields, return an error
+ if f, err := missingFields(r.fields); err != nil && !r.unsafe {
+ return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
+ }
+ r.values = make([]interface{}, len(columns))
+ r.started = true
+ }
+
+ err := fieldsByTraversal(v, r.fields, r.values, true)
+ if err != nil {
+ return err
+ }
+ // scan into the struct field pointers and append to our results
+ err = r.Scan(r.values...)
+ if err != nil {
+ return err
+ }
+ return r.Err()
+}
+
+// Connect to a database and verify with a ping.
+func Connect(driverName, dataSourceName string) (*DB, error) {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ return db, err
+ }
+ err = db.Ping()
+ return db, err
+}
+
+// MustConnect connects to a database and panics on error.
+func MustConnect(driverName, dataSourceName string) *DB {
+ db, err := Connect(driverName, dataSourceName)
+ if err != nil {
+ panic(err)
+ }
+ return db
+}
+
+// Preparex prepares a statement.
+func Preparex(p Preparer, query string) (*Stmt, error) {
+ s, err := p.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
+}
+
+// Select executes a query using the provided Queryer, and StructScans each row
+// into dest, which must be a slice. If the slice elements are scannable, then
+// the result set must have only one column. Otherwise, StructScan is used.
+// The *sql.Rows are closed automatically.
+// Any placeholder parameters are replaced with supplied args.
+func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
+ rows, err := q.Queryx(query, args...)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// Get does a QueryRow using the provided Queryer, and scans the resulting row
+// to dest. If dest is scannable, the result must only have one column. Otherwise,
+// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
+ r := q.QueryRowx(query, args...)
+ return r.scanAny(dest, false)
+}
+
+// LoadFile exec's every statement in a file (as a single call to Exec).
+// LoadFile may return a nil *sql.Result if errors are encountered locating or
+// reading the file at path. LoadFile reads the entire file into memory, so it
+// is not suitable for loading large data dumps, but can be useful for initializing
+// schemas or loading indexes.
+//
+// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
+// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
+// this by requiring something with DriverName() and then attempting to split the
+// queries will be difficult to get right, and its current driver-specific behavior
+// is deemed at least not complex in its incorrectness.
+func LoadFile(e Execer, path string) (*sql.Result, error) {
+ realpath, err := filepath.Abs(path)
+ if err != nil {
+ return nil, err
+ }
+ contents, err := ioutil.ReadFile(realpath)
+ if err != nil {
+ return nil, err
+ }
+ res, err := e.Exec(string(contents))
+ return &res, err
+}
+
+// MustExec execs the query using e and panics if there was an error.
+// Any placeholder parameters are replaced with supplied args.
+func MustExec(e Execer, query string, args ...interface{}) sql.Result {
+ res, err := e.Exec(query, args...)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// SliceScan using this Rows.
+func (r *Row) SliceScan() ([]interface{}, error) {
+ return SliceScan(r)
+}
+
+// MapScan using this Rows.
+func (r *Row) MapScan(dest map[string]interface{}) error {
+ return MapScan(r, dest)
+}
+
+func (r *Row) scanAny(dest interface{}, structOnly bool) error {
+ if r.err != nil {
+ return r.err
+ }
+ if r.rows == nil {
+ r.err = sql.ErrNoRows
+ return r.err
+ }
+ defer r.rows.Close()
+
+ v := reflect.ValueOf(dest)
+ if v.Kind() != reflect.Ptr {
+ return errors.New("must pass a pointer, not a value, to StructScan destination")
+ }
+ if v.IsNil() {
+ return errors.New("nil pointer passed to StructScan destination")
+ }
+
+ base := reflectx.Deref(v.Type())
+ scannable := isScannable(base)
+
+ if structOnly && scannable {
+ return structOnlyError(base)
+ }
+
+ columns, err := r.Columns()
+ if err != nil {
+ return err
+ }
+
+ if scannable && len(columns) > 1 {
+ return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
+ }
+
+ if scannable {
+ return r.Scan(dest)
+ }
+
+ m := r.Mapper
+
+ fields := m.TraversalsByName(v.Type(), columns)
+ // if we are not unsafe and are missing fields, return an error
+ if f, err := missingFields(fields); err != nil && !r.unsafe {
+ return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
+ }
+ values := make([]interface{}, len(columns))
+
+ err = fieldsByTraversal(v, fields, values, true)
+ if err != nil {
+ return err
+ }
+ // scan into the struct field pointers and append to our results
+ return r.Scan(values...)
+}
+
+// StructScan a single Row into dest.
+func (r *Row) StructScan(dest interface{}) error {
+ return r.scanAny(dest, true)
+}
+
+// SliceScan a row, returning a []interface{} with values similar to MapScan.
+// This function is primarily intended for use where the number of columns
+// is not known. Because you can pass an []interface{} directly to Scan,
+// it's recommended that you do that as it will not have to allocate new
+// slices per row.
+func SliceScan(r ColScanner) ([]interface{}, error) {
+ // ignore r.started, since we needn't use reflect for anything.
+ columns, err := r.Columns()
+ if err != nil {
+ return []interface{}{}, err
+ }
+
+ values := make([]interface{}, len(columns))
+ for i := range values {
+ values[i] = new(interface{})
+ }
+
+ err = r.Scan(values...)
+
+ if err != nil {
+ return values, err
+ }
+
+ for i := range columns {
+ values[i] = *(values[i].(*interface{}))
+ }
+
+ return values, r.Err()
+}
+
+// MapScan scans a single Row into the dest map[string]interface{}.
+// Use this to get results for SQL that might not be under your control
+// (for instance, if you're building an interface for an SQL server that
+// executes SQL from input). Please do not use this as a primary interface!
+// This will modify the map sent to it in place, so reuse the same map with
+// care. Columns which occur more than once in the result will overwrite
+// each other!
+func MapScan(r ColScanner, dest map[string]interface{}) error {
+ // ignore r.started, since we needn't use reflect for anything.
+ columns, err := r.Columns()
+ if err != nil {
+ return err
+ }
+
+ values := make([]interface{}, len(columns))
+ for i := range values {
+ values[i] = new(interface{})
+ }
+
+ err = r.Scan(values...)
+ if err != nil {
+ return err
+ }
+
+ for i, column := range columns {
+ dest[column] = *(values[i].(*interface{}))
+ }
+
+ return r.Err()
+}
+
+type rowsi interface {
+ Close() error
+ Columns() ([]string, error)
+ Err() error
+ Next() bool
+ Scan(...interface{}) error
+}
+
+// structOnlyError returns an error appropriate for type when a non-scannable
+// struct is expected but something else is given
+func structOnlyError(t reflect.Type) error {
+ isStruct := t.Kind() == reflect.Struct
+ isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
+ if !isStruct {
+ return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
+ }
+ if isScanner {
+ return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
+ }
+ return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
+}
+
+// scanAll scans all rows into a destination, which must be a slice of any
+// type. If the destination slice type is a Struct, then StructScan will be
+// used on each row. If the destination is some other kind of base type, then
+// each row must only have one column which can scan into that type. This
+// allows you to do something like:
+//
+// rows, _ := db.Query("select id from people;")
+// var ids []int
+// scanAll(rows, &ids, false)
+//
+// and ids will be a list of the id results. I realize that this is a desirable
+// interface to expose to users, but for now it will only be exposed via changes
+// to `Get` and `Select`. The reason that this has been implemented like this is
+// this is the only way to not duplicate reflect work in the new API while
+// maintaining backwards compatibility.
+func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
+ var v, vp reflect.Value
+
+ value := reflect.ValueOf(dest)
+
+ // json.Unmarshal returns errors for these
+ if value.Kind() != reflect.Ptr {
+ return errors.New("must pass a pointer, not a value, to StructScan destination")
+ }
+ if value.IsNil() {
+ return errors.New("nil pointer passed to StructScan destination")
+ }
+ direct := reflect.Indirect(value)
+
+ slice, err := baseType(value.Type(), reflect.Slice)
+ if err != nil {
+ return err
+ }
+
+ isPtr := slice.Elem().Kind() == reflect.Ptr
+ base := reflectx.Deref(slice.Elem())
+ scannable := isScannable(base)
+
+ if structOnly && scannable {
+ return structOnlyError(base)
+ }
+
+ columns, err := rows.Columns()
+ if err != nil {
+ return err
+ }
+
+ // if it's a base type make sure it only has 1 column; if not return an error
+ if scannable && len(columns) > 1 {
+ return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
+ }
+
+ if !scannable {
+ var values []interface{}
+ var m *reflectx.Mapper
+
+ switch rows.(type) {
+ case *Rows:
+ m = rows.(*Rows).Mapper
+ default:
+ m = mapper()
+ }
+
+ fields := m.TraversalsByName(base, columns)
+ // if we are not unsafe and are missing fields, return an error
+ if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
+ return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
+ }
+ values = make([]interface{}, len(columns))
+
+ for rows.Next() {
+ // create a new struct type (which returns PtrTo) and indirect it
+ vp = reflect.New(base)
+ v = reflect.Indirect(vp)
+
+ err = fieldsByTraversal(v, fields, values, true)
+ if err != nil {
+ return err
+ }
+
+ // scan into the struct field pointers and append to our results
+ err = rows.Scan(values...)
+ if err != nil {
+ return err
+ }
+
+ if isPtr {
+ direct.Set(reflect.Append(direct, vp))
+ } else {
+ direct.Set(reflect.Append(direct, v))
+ }
+ }
+ } else {
+ for rows.Next() {
+ vp = reflect.New(base)
+ err = rows.Scan(vp.Interface())
+ if err != nil {
+ return err
+ }
+ // append
+ if isPtr {
+ direct.Set(reflect.Append(direct, vp))
+ } else {
+ direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
+ }
+ }
+ }
+
+ return rows.Err()
+}
+
+// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
+// it doesn't really feel like it's named properly. There is an incongruency
+// between this and the way that StructScan (which might better be ScanStruct
+// anyway) works on a rows object.
+
+// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
+// StructScan will scan in the entire rows result, so if you do not want to
+// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
+// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
+func StructScan(rows rowsi, dest interface{}) error {
+ return scanAll(rows, dest, true)
+
+}
+
+// reflect helpers
+
+func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
+ t = reflectx.Deref(t)
+ if t.Kind() != expected {
+ return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
+ }
+ return t, nil
+}
+
+// fieldsByName fills a values interface with fields from the passed value based
+// on the traversals in int. If ptrs is true, return addresses instead of values.
+// We write this instead of using FieldsByName to save allocations and map lookups
+// when iterating over many rows. Empty traversals will get an interface pointer.
+// Because of the necessity of requesting ptrs or values, it's considered a bit too
+// specialized for inclusion in reflectx itself.
+func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
+ v = reflect.Indirect(v)
+ if v.Kind() != reflect.Struct {
+ return errors.New("argument not a struct")
+ }
+
+ for i, traversal := range traversals {
+ if len(traversal) == 0 {
+ values[i] = new(interface{})
+ continue
+ }
+ f := reflectx.FieldByIndexes(v, traversal)
+ if ptrs {
+ values[i] = f.Addr().Interface()
+ } else {
+ values[i] = f.Interface()
+ }
+ }
+ return nil
+}
+
+func missingFields(transversals [][]int) (field int, err error) {
+ for i, t := range transversals {
+ if len(t) == 0 {
+ return i, errors.New("missing field")
+ }
+ }
+ return 0, nil
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 48a6e98..84c528b 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -369,6 +369,18 @@
"revisionTime": "2016-08-03T19:07:31Z"
},
{
+ "checksumSHA1": "hmjY4MUlyj/kx/zw+R1iSu3FTlo=",
+ "path": "github.com/jmoiron/sqlx",
+ "revision": "cac998c4f0959c19c638c523e374fa8e4e0bcfe3",
+ "revisionTime": "2016-12-09T02:45:31Z"
+ },
+ {
+ "checksumSHA1": "NrhkMPKlj0N6LaD2JMb2KKnR03w=",
+ "path": "github.com/jmoiron/sqlx/reflectx",
+ "revision": "cac998c4f0959c19c638c523e374fa8e4e0bcfe3",
+ "revisionTime": "2016-12-09T02:45:31Z"
+ },
+ {
"checksumSHA1": "506eXGmFfB7mgzbMcsdT/UAXJgI=",
"path": "github.com/magiconair/properties",
"revision": "9c47895dc1ce54302908ab8a43385d1f5df2c11c",