aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md70
-rw-r--r--database.go82
-rw-r--r--go.mod5
-rw-r--r--main.go114
-rw-r--r--own.go155
-rw-r--r--sumdb/cache.go59
-rw-r--r--sumdb/client.go671
-rw-r--r--sumdb/client_test.go460
-rw-r--r--sumdb/test.go128
9 files changed, 1658 insertions, 86 deletions
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..998003e
--- /dev/null
+++ b/README.md
@@ -0,0 +1,70 @@
+# sdbv
+
+Resources:
+
+* https://research.swtch.com/tlog
+* https://sum.golang.org/
+* https://go.googlesource.com/proposal/+/master/design/25530-sumdb.md
+* https://github.com/crtsh/ct_monitor/blob/master/ct_monitor.go
+* https://tools.ietf.org/html/rfc6962#section-2.1
+
+Go Checksum DB verifier, helps you prove that a checksum database is
+trustworthy.
+
+Cryptographically prove that the current checksum database is a superset of a
+previous database. This prevents a checksum database from silently modifying
+checksums and simply recomputing its Merkle tree.
+
+To do this, we need to periodically check for the latest tree head and prove
+that the previous tree head is contained within the new tree.
+
+GET $GOSUMDB/lookup/M@V returns the 1) record # of the module version, 2) its
+go.sum lines, and 3) a tree head
+
+GET $GOSUMDB/tile/H/L/K[.p/W]
+GET $GOSUMDB/tile/H/data/K[.p/W] returns record data
+GET $GOSUMDB/latest returns the latest signed tree head for a log
+
+
+$GOSUMDB/lookup/go.dog/breeds@v0.3.2
+
+9
+go.dog/breeds v0.3.2 <hash>
+go.dog/breeds v0.3.2/go.mod <hash>
+<STH>
+
+$GOSUMDB/tile/8/0/005
+$GOSUMDB/tile/8/1/000.p/59
+
+The Go checksum database will run at https://sum.golang.org/ and serve the
+following endpoints:
+
+ /latest will serve a signed tree size and hash for the latest log.
+
+ /lookup/M@V will serve the log record number for the entry about module M
+version V, followed by the data for the record (that is, the go.sum lines for
+module M version V) and a signed tree hash for a tree that contains the record.
+If the module version is not yet recorded in the log, the notary will try to
+fetch it before replying. Note that the data should never be used without first
+authenticating it against the signed tree hash and authenticating the signed
+tree hash against the client's timeline of signed tree hashes.
+
+ /tile/H/L/K[.p/W] will serve a log tile. The optional .p/W suffix indicates
+a partial log tile with only W hashes. Clients must fall back to fetching the
+full tile if a partial tile is not found. The record data for the leaf hashes in
+/tile/H/0/K[.p/W] are served as /tile/H/data/K[.p/W] (with a literal data path
+element).
+
+Clients are expected to use /lookup and /tile/H/L/... during normal operations,
+while auditors will want to use /latest and /tile/H/data/.... A special go
+command may also fetch /latest to force incorporation of that signed tree head
+into the local timeline.
+
+A module proxy can also proxy requests to the checksum database. The general
+proxy URL form is <proxyURL>/sumdb/<databaseURL>. If GOPROXY=https://proxy.site
+then the latest signed tree would be fetched using
+https://proxy.site/sumdb/sum.golang.org/latest. Including the full database URL
+allows a transition to a new database log, such as sum.golang.org/v2.
+
+Known key:
+sum.golang.org+033de0ae+Ac4zctda0e5eza+HJyk9SxEdh+s3Ux18htTTAD8OuAn8
diff --git a/database.go b/database.go
new file mode 100644
index 0000000..47a8918
--- /dev/null
+++ b/database.go
@@ -0,0 +1,82 @@
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "sync"
+ "time"
+
+ "git.sr.ht/~benburwell/gosumdbaudit/sumdb"
+)
+
+type database struct {
+ host string
+ key string
+ pollInterval time.Duration
+
+ hc http.Client
+
+ config map[string][]byte
+ configMu sync.RWMutex
+}
+
+func (d *database) ReadRemote(path string) ([]byte, error) {
+ log.Printf("read remote: %s", path)
+ resp, err := d.hc.Get("https://" + d.host + path)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ return ioutil.ReadAll(resp.Body)
+}
+
+func (d *database) ReadConfig(file string) ([]byte, error) {
+ log.Printf("read config: %s", file)
+ if file == "key" {
+ return []byte(d.key), nil
+ }
+ d.configMu.RLock()
+ defer d.configMu.RUnlock()
+ if d.config == nil {
+ d.config = make(map[string][]byte)
+ // d.config["sum.golang.org/latest"] = []byte(`go.sum database tree
+ // 163038
+ // S1dhskM/kuUJUOCz3InBRhl0vFiHxr0INft+24ClisI=
+
+ // — sum.golang.org Az3gruAGD/ybzwcCUArmKpzAZNmEOu3Yahr9WIKA2SFAK3G2xzo39uHS70mylR3nsT9t3ZpVQW89RT6Tg1+1nIf7bgI=
+ // `)
+ }
+ c, ok := d.config[file]
+ if !ok {
+ return nil, nil
+ }
+ return c, nil
+}
+
+func (d *database) WriteConfig(file string, old, new []byte) error {
+ log.Printf("write config: %s", file)
+ d.configMu.Lock()
+ defer d.configMu.Unlock()
+ if val, ok := d.config[file]; ok && !bytes.Equal(val, old) {
+ return sumdb.ErrWriteConflict
+ }
+ d.config[file] = new
+ return nil
+}
+
+func (d *database) ReadCache(file string) ([]byte, error) {
+ return nil, fmt.Errorf("cache is not implemented")
+}
+
+func (d *database) WriteCache(file string, data []byte) {}
+
+func (d *database) Log(msg string) {
+ log.Printf(msg)
+}
+
+func (d *database) SecurityError(msg string) {
+ log.Printf("!!! SECURITY ERROR !!!\n%s", msg)
+}
diff --git a/go.mod b/go.mod
index 8b85e55..6e09da5 100644
--- a/go.mod
+++ b/go.mod
@@ -2,4 +2,7 @@ module git.sr.ht/~benburwell/gosumdbaudit
go 1.13
-require golang.org/x/mod v0.1.0
+require (
+ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529
+ golang.org/x/mod v0.1.0
+)
diff --git a/main.go b/main.go
index d08cdbd..9a62ac3 100644
--- a/main.go
+++ b/main.go
@@ -1,96 +1,40 @@
package main
import (
- "bytes"
- "fmt"
- "io"
"log"
- "net/http"
- "strconv"
- "strings"
+ "time"
- "golang.org/x/mod/sumdb/note"
+ "git.sr.ht/~benburwell/gosumdbaudit/sumdb"
)
func main() {
- dbs := []*db{
- &db{host: "sum.golang.org", key: "sum.golang.org+033de0ae+Ac4zctda0e5eza+HJyk9SxEdh+s3Ux18htTTAD8OuAn8"},
- //&db{host: "sum.golang.org", key: "sum.golang.org+033de0ae+BADBADBADBADBADBADBADBADBADBADBADBADBADBADBA"},
- }
- for _, d := range dbs {
- if err := audit(d); err != nil {
- log.Printf("AUDIT FAIL (%s): %s", d.host, err.Error())
+ dbs := []*database{
+ &database{
+ host: "sum.golang.org",
+
+ key: "sum.golang.org+033de0ae+Ac4zctda0e5eza+HJyk9SxEdh+s3Ux18htTTAD8OuAn8",
+ // key: "sum.golang.org+033de0ae+BADBADBADBADBADBADBADBADBADBADBADBADBADBADBA",
+
+ pollInterval: 10 * time.Second,
+ },
+ }
+ for _, db := range dbs {
+ client := sumdb.NewClient(db)
+ // lines, err := client.Lookup("golang.org/x/text", "v0.3.0")
+ lines, err := client.Lookup("github.com/influxdata/influxdb", "v0.0.0-20190908081120-80e3efa37a3a")
+ if err != nil {
+ log.Printf("could not lookup: %v", err)
+ } else {
+ for _, line := range lines {
+ log.Printf("got: %s", line)
+ }
}
+ // log.Printf("config: %s", string(db.config["sum.golang.org/latest"]))
+ // if err := d.monitor(); err != nil {
+ // log.Printf("AUDIT FAIL (%s): %s", d.host, err.Error())
+ // }
+ // if err := audit(d); err != nil {
+ // log.Printf("AUDIT FAIL (%s): %s", d.host, err.Error())
+ // }
}
}
-
-func audit(d *db) error {
- log.Printf("starting audit of %s...", d.host)
- size, hash, err := d.getLatest()
- if err != nil {
- return err
- }
- log.Printf("db size %d", size)
- log.Printf("db hash %s", hash)
- return nil
-}
-
-type db struct {
- host string
- key string
-}
-
-// httpGet makes a GET request to the specified path of the database and
-// returns a byte slice of the response body.
-func (d *db) httpGet(path string) ([]byte, error) {
- client := &http.Client{}
- resp, err := client.Get("https://" + d.host + path)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- var body bytes.Buffer
- if _, err := io.Copy(&body, resp.Body); err != nil {
- return nil, fmt.Errorf("could not read response body: %w", err)
- }
- return body.Bytes(), nil
-}
-
-// verifyNote takes a signed byte slice, verifies the signature against the
-// db's public key. If successful, the note content is returned, otherwise, an
-// error.
-func (d *db) verifyNote(b []byte) (string, error) {
- verifier, err := note.NewVerifier(d.key)
- if err != nil {
- return "", err
- }
- verifiers := note.VerifierList(verifier)
- msg, err := note.Open(b, verifiers)
- if err != nil {
- return "", err
- }
- return msg.Text, nil
-}
-
-// getLatest fetches and verifies the latest signed tree head hash and database
-// size.
-func (d *db) getLatest() (int, string, error) {
- body, err := d.httpGet("/latest")
- if err != nil {
- return 0, "", fmt.Errorf("could not fetch latest: %w", err)
- }
- msg, err := d.verifyNote(body)
- if err != nil {
- return 0, "", fmt.Errorf("could not verify note: %w", err)
- }
- parts := strings.Split(msg, "\n")
- if len(parts) != 4 {
- return 0, "", fmt.Errorf("could not parse latest: expected %d lines but got %d", 4, len(parts))
- }
- size, err := strconv.Atoi(parts[1])
- if err != nil {
- return 0, "", fmt.Errorf("could not parse tree size: %w", err)
- }
- hash := parts[2]
- return size, hash, nil
-}
diff --git a/own.go b/own.go
new file mode 100644
index 0000000..ae3e457
--- /dev/null
+++ b/own.go
@@ -0,0 +1,155 @@
+package main
+
+// func audit(d *db) error {
+// log.Printf("starting audit of %s...", d.host)
+// size, hash, err := d.getLatest()
+// if err != nil {
+// return err
+// }
+// log.Printf("db size %d", size)
+// log.Printf("db hash %s", hash)
+// return nil
+// }
+
+// type db struct {
+// host string
+// key string
+// pollInterval time.Duration
+// }
+
+// // httpGet makes a GET request to the specified path of the database and
+// // returns a byte slice of the response body.
+// func (d *db) httpGet(path string) ([]byte, error) {
+// client := &http.Client{}
+// resp, err := client.Get("https://" + d.host + path)
+// if err != nil {
+// return nil, err
+// }
+// defer resp.Body.Close()
+// var body bytes.Buffer
+// if _, err := io.Copy(&body, resp.Body); err != nil {
+// return nil, fmt.Errorf("could not read response body: %w", err)
+// }
+// return body.Bytes(), nil
+// }
+
+// // verifyNote takes a signed byte slice, verifies the signature against the
+// // db's public key. If successful, the note content is returned, otherwise, an
+// // error.
+// func (d *db) verifyNote(b []byte) (string, error) {
+// verifier, err := note.NewVerifier(d.key)
+// if err != nil {
+// return "", err
+// }
+// verifiers := note.VerifierList(verifier)
+// msg, err := note.Open(b, verifiers)
+// if err != nil {
+// return "", err
+// }
+// return msg.Text, nil
+// }
+
+// // bootstrapMonitor fetches and verifies the current tree starting from the
+// // first log entry, and returns the current verified size and hash.
+// func (d *db) bootstrapMonitor() (int, string, error) {
+// log.Printf("bootstrapping monitor")
+// log.Printf("TODO: implement fully")
+// log.Printf("verified until size 163038")
+// return 163038, "S1dhskM/kuUJUOCz3InBRhl0vFiHxr0INft+24ClisI=", nil
+
+// // TODO: implement
+
+// // 1. Fetch the current STH (section 4.3)
+// // 2. Verify the STH signature
+// // size, hash, err := d.getLatest()
+// // if err != nil {
+// // return err
+// // }
+
+// // 3. Fetch all entries in the tree corresponding to the STH (section 4.6)
+
+// // 4. Confirm that the tree made from the fetched entries produces the same
+// // hash as that in the STH.
+// }
+
+// // monitor monitors the db to ensure it behaves correctly, using the algorithm
+// // for CT logs specified in RFC 6952 section 3.5.
+// func (d *db) monitor() error {
+// log.Printf("starting monitor")
+// size, hash, err := d.bootstrapMonitor()
+// if err != nil {
+// return err
+// }
+// log.Printf("successfully verified merkle tree proof until size %d and hash %s", size, hash)
+
+// // 5. Fetch the current STH (section 4.3). Repeat until the STH changes.
+// // 6. Verify the STH signature.
+// log.Printf("waiting for a tree size greater than %d", size)
+// newSize, newHash, err := d.awaitNewSTH(size)
+// if err != nil {
+// return err
+// }
+// log.Printf("got new STH with size %d and hash %s", newSize, newHash)
+
+// // 7. Fetch all the new entries in the tree corresponding to the STH (section
+// // 4.6). If they remain unavailable for an extended period, then this should
+// // be viewed as misbehavior on the part of the log.
+
+// // 8. Fetch a consistency proof for the new STH with the previous STH
+// // (section 4.4).
+
+// // 9. Verify the consistency proof.
+
+// // 10. Verify that the new entries generate the corresponding elements in the
+// // consistency proof.
+
+// // 11. Go to step 5.
+
+// return nil
+// }
+
+// // awaitNewSTH periodically checks and verifies the current STH. If the latest
+// // tree size differs from the previous size, the new verified size and hash are
+// // returned.
+// func (d *db) awaitNewSTH(prevSize int) (int, string, error) {
+// for {
+// log.Printf("sleeping...")
+// time.Sleep(d.pollInterval)
+// log.Printf("checking latest tree size")
+// size, hash, err := d.getLatest()
+// if err != nil {
+// return 0, "", err
+// }
+// if size < prevSize {
+// return 0, "", fmt.Errorf("misbehaving log: latest log contains %d entries but previously reported %d", size, prevSize)
+// }
+// if size != prevSize {
+// log.Printf("found a new STH (size=%d)", size)
+// return size, hash, nil
+// }
+// log.Printf("tree sizes match")
+// }
+// }
+
+// // getLatest fetches and verifies the latest signed tree head hash and database
+// // size.
+// func (d *db) getLatest() (int, string, error) {
+// body, err := d.httpGet("/latest")
+// if err != nil {
+// return 0, "", fmt.Errorf("could not fetch latest: %w", err)
+// }
+// msg, err := d.verifyNote(body)
+// if err != nil {
+// return 0, "", fmt.Errorf("could not verify note: %w", err)
+// }
+// parts := strings.Split(msg, "\n")
+// if len(parts) != 4 {
+// return 0, "", fmt.Errorf("could not parse latest: expected %d lines but got %d", 4, len(parts))
+// }
+// size, err := strconv.Atoi(parts[1])
+// if err != nil {
+// return 0, "", fmt.Errorf("could not parse tree size: %w", err)
+// }
+// hash := parts[2]
+// return size, hash, nil
+// }
diff --git a/sumdb/cache.go b/sumdb/cache.go
new file mode 100644
index 0000000..629e591
--- /dev/null
+++ b/sumdb/cache.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parallel cache.
+// This file is copied from cmd/go/internal/par.
+
+package sumdb
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// parCache runs an action once per key and caches the result.
+type parCache struct {
+ m sync.Map
+}
+
+type cacheEntry struct {
+ done uint32
+ mu sync.Mutex
+ result interface{}
+}
+
+// Do calls the function f if and only if Do is being called for the first time with this key.
+// No call to Do with a given key returns until the one call to f returns.
+// Do returns the value returned by the one call to f.
+func (c *parCache) Do(key interface{}, f func() interface{}) interface{} {
+ entryIface, ok := c.m.Load(key)
+ if !ok {
+ entryIface, _ = c.m.LoadOrStore(key, new(cacheEntry))
+ }
+ e := entryIface.(*cacheEntry)
+ if atomic.LoadUint32(&e.done) == 0 {
+ e.mu.Lock()
+ if atomic.LoadUint32(&e.done) == 0 {
+ e.result = f()
+ atomic.StoreUint32(&e.done, 1)
+ }
+ e.mu.Unlock()
+ }
+ return e.result
+}
+
+// Get returns the cached result associated with key.
+// It returns nil if there is no such result.
+// If the result for key is being computed, Get does not wait for the computation to finish.
+func (c *parCache) Get(key interface{}) interface{} {
+ entryIface, ok := c.m.Load(key)
+ if !ok {
+ return nil
+ }
+ e := entryIface.(*cacheEntry)
+ if atomic.LoadUint32(&e.done) == 0 {
+ return nil
+ }
+ return e.result
+}
diff --git a/sumdb/client.go b/sumdb/client.go
new file mode 100644
index 0000000..70dd56f
--- /dev/null
+++ b/sumdb/client.go
@@ -0,0 +1,671 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sumdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "path"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "golang.org/x/mod/module"
+ "golang.org/x/mod/sumdb/note"
+ "golang.org/x/mod/sumdb/tlog"
+)
+
+// A ClientOps provides the external operations
+// (file caching, HTTP fetches, and so on) needed by the Client.
+// The methods must be safe for concurrent use by multiple goroutines.
+type ClientOps interface {
+ // ReadRemote reads and returns the content served at the given path
+ // on the remote database server. The path begins with "/lookup" or "/tile/",
+ // and there is no need to parse the path in any way.
+ // It is the implementation's responsibility to turn that path into a full URL
+ // and make the HTTP request. ReadRemote should return an error for
+ // any non-200 HTTP response status.
+ ReadRemote(path string) ([]byte, error)
+
+ // ReadConfig reads and returns the content of the named configuration file.
+ // There are only a fixed set of configuration files.
+ //
+ // "key" returns a file containing the verifier key for the server.
+ //
+ // serverName + "/latest" returns a file containing the latest known
+ // signed tree from the server.
+ // To signal that the client wishes to start with an "empty" signed tree,
+ // ReadConfig can return a successful empty result (0 bytes of data).
+ ReadConfig(file string) ([]byte, error)
+
+ // WriteConfig updates the content of the named configuration file,
+ // changing it from the old []byte to the new []byte.
+ // If the old []byte does not match the stored configuration,
+ // WriteConfig must return ErrWriteConflict.
+ // Otherwise, WriteConfig should atomically replace old with new.
+ // The "key" configuration file is never written using WriteConfig.
+ WriteConfig(file string, old, new []byte) error
+
+ // ReadCache reads and returns the content of the named cache file.
+ // Any returned error will be treated as equivalent to the file not existing.
+ // There can be arbitrarily many cache files, such as:
+ // serverName/lookup/pkg@version
+ // serverName/tile/8/1/x123/456
+ ReadCache(file string) ([]byte, error)
+
+ // WriteCache writes the named cache file.
+ WriteCache(file string, data []byte)
+
+ // Log prints the given log message (such as with log.Print)
+ Log(msg string)
+
+ // SecurityError prints the given security error log message.
+ // The Client returns ErrSecurity from any operation that invokes SecurityError,
+ // but the return value is mainly for testing. In a real program,
+ // SecurityError should typically print the message and call log.Fatal or os.Exit.
+ SecurityError(msg string)
+}
+
+// ErrWriteConflict signals a write conflict during Client.WriteConfig.
+var ErrWriteConflict = errors.New("write conflict")
+
+// ErrSecurity is returned by Client operations that invoke Client.SecurityError.
+var ErrSecurity = errors.New("security error: misbehaving server")
+
+// A Client is a client connection to a checksum database.
+// All the methods are safe for simultaneous use by multiple goroutines.
+type Client struct {
+ ops ClientOps // access to operations in the external world
+
+ didLookup uint32
+
+ // one-time initialized data
+ initOnce sync.Once
+ initErr error // init error, if any
+ name string // name of accepted verifier
+ verifiers note.Verifiers // accepted verifiers (just one, but Verifiers for note.Open)
+ tileReader tileReader
+ tileHeight int
+ nosumdb string
+
+ record parCache // cache of record lookup, keyed by path@vers
+ tileCache parCache // cache of c.readTile, keyed by tile
+
+ latestMu sync.Mutex
+ latest tlog.Tree // latest known tree head
+ latestMsg []byte // encoded signed note for latest
+
+ tileSavedMu sync.Mutex
+ tileSaved map[tlog.Tile]bool // which tiles have been saved using c.ops.WriteCache already
+}
+
+// NewClient returns a new Client using the given Client.
+func NewClient(ops ClientOps) *Client {
+ return &Client{
+ ops: ops,
+ }
+}
+
+// init initiailzes the client (if not already initialized)
+// and returns any initialization error.
+func (c *Client) init() error {
+ c.initOnce.Do(c.initWork)
+ return c.initErr
+}
+
+// initWork does the actual initialization work.
+func (c *Client) initWork() {
+ defer func() {
+ if c.initErr != nil {
+ c.initErr = fmt.Errorf("initializing sumdb.Client: %v", c.initErr)
+ }
+ }()
+
+ c.tileReader.c = c
+ if c.tileHeight == 0 {
+ c.tileHeight = 8
+ }
+ c.tileSaved = make(map[tlog.Tile]bool)
+
+ vkey, err := c.ops.ReadConfig("key")
+ if err != nil {
+ c.initErr = err
+ return
+ }
+ verifier, err := note.NewVerifier(strings.TrimSpace(string(vkey)))
+ if err != nil {
+ c.initErr = err
+ return
+ }
+ c.verifiers = note.VerifierList(verifier)
+ c.name = verifier.Name()
+
+ data, err := c.ops.ReadConfig(c.name + "/latest")
+ if err != nil {
+ c.initErr = err
+ return
+ }
+ if err := c.mergeLatest(data); err != nil {
+ c.initErr = err
+ return
+ }
+}
+
+// SetTileHeight sets the tile height for the Client.
+// Any call to SetTileHeight must happen before the first call to Lookup.
+// If SetTileHeight is not called, the Client defaults to tile height 8.
+// SetTileHeight can be called at most once,
+// and if so it must be called before the first call to Lookup.
+func (c *Client) SetTileHeight(height int) {
+ if atomic.LoadUint32(&c.didLookup) != 0 {
+ panic("SetTileHeight used after Lookup")
+ }
+ if height <= 0 {
+ panic("invalid call to SetTileHeight")
+ }
+ if c.tileHeight != 0 {
+ panic("multiple calls to SetTileHeight")
+ }
+ c.tileHeight = height
+}
+
+// SetGONOSUMDB sets the list of comma-separated GONOSUMDB patterns for the Client.
+// For any module path matching one of the patterns,
+// Lookup will return ErrGONOSUMDB.
+// SetGONOSUMDB can be called at most once,
+// and if so it must be called before the first call to Lookup.
+func (c *Client) SetGONOSUMDB(list string) {
+ if atomic.LoadUint32(&c.didLookup) != 0 {
+ panic("SetGONOSUMDB used after Lookup")
+ }
+ if c.nosumdb != "" {
+ panic("multiple calls to SetGONOSUMDB")
+ }
+ c.nosumdb = list
+}
+
+// ErrGONOSUMDB is returned by Lookup for paths that match
+// a pattern listed in the GONOSUMDB list (set by SetGONOSUMDB,
+// usually from the environment variable).
+var ErrGONOSUMDB = errors.New("skipped (listed in GONOSUMDB)")
+
+func (c *Client) skip(target string) bool {
+ return globsMatchPath(c.nosumdb, target)
+}
+
+// globsMatchPath reports whether any path prefix of target
+// matches one of the glob patterns (as defined by path.Match)
+// in the comma-separated globs list.
+// It ignores any empty or malformed patterns in the list.
+func globsMatchPath(globs, target string) bool {
+ for globs != "" {
+ // Extract next non-empty glob in comma-separated list.
+ var glob string
+ if i := strings.Index(globs, ","); i >= 0 {
+ glob, globs = globs[:i], globs[i+1:]
+ } else {
+ glob, globs = globs, ""
+ }
+ if glob == "" {
+ continue
+ }
+
+ // A glob with N+1 path elements (N slashes) needs to be matched
+ // against the first N+1 path elements of target,
+ // which end just before the N+1'th slash.
+ n := strings.Count(glob, "/")
+ prefix := target
+ // Walk target, counting slashes, truncating at the N+1'th slash.
+ for i := 0; i < len(target); i++ {
+ if target[i] == '/' {
+ if n == 0 {
+ prefix = target[:i]
+ break
+ }
+ n--
+ }
+ }
+ if n > 0 {
+ // Not enough prefix elements.
+ continue
+ }
+ matched, _ := path.Match(glob, prefix)
+ if matched {
+ return true
+ }
+ }
+ return false
+}
+
+// Lookup returns the go.sum lines for the given module path and version.
+// The version may end in a /go.mod suffix, in which case Lookup returns
+// the go.sum lines for the module's go.mod-only hash.
+func (c *Client) Lookup(path, vers string) (lines []string, err error) {
+ atomic.StoreUint32(&c.didLookup, 1)
+
+ if c.skip(path) {
+ return nil, ErrGONOSUMDB
+ }
+
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("%s@%s: %v", path, vers, err)
+ }
+ }()
+
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+
+ // Prepare encoded cache filename / URL.
+ epath, err := module.EscapePath(path)
+ if err != nil {
+ return nil, err
+ }
+ evers, err := module.EscapeVersion(strings.TrimSuffix(vers, "/go.mod"))
+ if err != nil {
+ return nil, err
+ }
+ remotePath := "/lookup/" + epath + "@" + evers
+ file := c.name + remotePath
+
+ // Fetch the data.
+ // The lookupCache avoids redundant ReadCache/GetURL operations
+ // (especially since go.sum lines tend to come in pairs for a given
+ // path and version) and also avoids having multiple of the same
+ // request in flight at once.
+ type cached struct {
+ data []byte
+ err error
+ }
+ result := c.record.Do(file, func() interface{} {
+ // Try the on-disk cache, or else get from web.
+ writeCache := false
+ data, err := c.ops.ReadCache(file)
+ if err != nil {
+ data, err = c.ops.ReadRemote(remotePath)
+ if err != nil {
+ return cached{nil, err}
+ }
+ writeCache = true
+ }
+
+ // Validate the record before using it for anything.
+ id, text, treeMsg, err := tlog.ParseRecord(data)
+ if err != nil {
+ return cached{nil, err}
+ }
+ if err := c.mergeLatest(treeMsg); err != nil {
+ return cached{nil, err}
+ }
+ if err := c.checkRecord(id, text); err != nil {
+ return cached{nil, err}
+ }
+
+ // Now that we've validated the record,
+ // save it to the on-disk cache (unless that's where it came from).
+ if writeCache {
+ c.ops.WriteCache(file, data)
+ }
+
+ return cached{data, nil}
+ }).(cached)
+ if result.err != nil {
+ return nil, result.err
+ }
+
+ // Extract the lines for the specific version we want
+ // (with or without /go.mod).
+ prefix := path + " " + vers + " "
+ var hashes []string
+ for _, line := range strings.Split(string(result.data), "\n") {
+ if strings.HasPrefix(line, prefix) {
+ hashes = append(hashes, line)
+ }
+ }
+ return hashes, nil
+}
+
+// mergeLatest merges the tree head in msg
+// with the Client's current latest tree head,
+// ensuring the result is a consistent timeline.
+// If the result is inconsistent, mergeLatest calls c.ops.SecurityError
+// with a detailed security error message and then
+// (only if c.ops.SecurityError does not exit the program) returns ErrSecurity.
+// If the Client's current latest tree head moves forward,
+// mergeLatest updates the underlying configuration file as well,
+// taking care to merge any independent updates to that configuration.
+func (c *Client) mergeLatest(msg []byte) error {
+ // Merge msg into our in-memory copy of the latest tree head.
+ when, err := c.mergeLatestMem(msg)
+ if err != nil {
+ return err
+ }
+ if when != msgFuture {
+ // msg matched our present or was in the past.
+ // No change to our present, so no update of config file.
+ return nil
+ }
+
+ // Flush our extended timeline back out to the configuration file.
+ // If the configuration file has been updated in the interim,
+ // we need to merge any updates made there as well.
+ // Note that writeConfig is an atomic compare-and-swap.
+ for {
+ msg, err := c.ops.ReadConfig(c.name + "/latest")
+ if err != nil {
+ return err
+ }
+ when, err := c.mergeLatestMem(msg)
+ if err != nil {
+ return err
+ }
+ if when != msgPast {
+ // msg matched our present or was from the future,
+ // and now our in-memory copy matches.
+ return nil
+ }
+
+ // msg (== config) is in the past, so we need to update it.
+ c.latestMu.Lock()
+ latestMsg := c.latestMsg
+ c.latestMu.Unlock()
+ if err := c.ops.WriteConfig(c.name+"/latest", msg, latestMsg); err != ErrWriteConflict {
+ // Success or a non-write-conflict error.
+ return err
+ }
+ }
+}
+
+const (
+ msgPast = 1 + iota
+ msgNow
+ msgFuture
+)
+
+// mergeLatestMem is like mergeLatest but is only concerned with
+// updating the in-memory copy of the latest tree head (c.latest)
+// not the configuration file.
+// The when result explains when msg happened relative to our
+// previous idea of c.latest:
+// msgPast means msg was from before c.latest,
+// msgNow means msg was exactly c.latest, and
+// msgFuture means msg was from after c.latest, which has now been updated.
+func (c *Client) mergeLatestMem(msg []byte) (when int, err error) {
+ if len(msg) == 0 {
+ // Accept empty msg as the unsigned, empty timeline.
+ c.latestMu.Lock()
+ latest := c.latest
+ c.latestMu.Unlock()
+ if latest.N == 0 {
+ return msgNow, nil
+ }
+ return msgPast, nil
+ }
+
+ note, err := note.Open(msg, c.verifiers)
+ if err != nil {
+ return 0, fmt.Errorf("reading tree note: %v\nnote:\n%s", err, msg)
+ }
+ tree, err := tlog.ParseTree([]byte(note.Text))
+ if err != nil {
+ return 0, fmt.Errorf("reading tree: %v\ntree:\n%s", err, note.Text)
+ }
+
+ // Other lookups may be calling mergeLatest with other heads,
+ // so c.latest is changing underfoot. We don't want to hold the
+ // c.mu lock during tile fetches, so loop trying to update c.latest.
+ c.latestMu.Lock()
+ latest := c.latest
+ latestMsg := c.latestMsg
+ c.latestMu.Unlock()
+
+ for {
+ // If the tree head looks old, check that it is on our timeline.
+ if tree.N <= latest.N {
+ if err := c.checkTrees(tree, msg, latest, latestMsg); err != nil {
+ return 0, err
+ }
+ if tree.N < latest.N {
+ return msgPast, nil
+ }
+ return msgNow, nil
+ }
+
+ // The tree head looks new. Check that we are on its timeline and try to move our timeline forward.
+ if err := c.checkTrees(latest, latestMsg, tree, msg); err != nil {
+ return 0, err
+ }
+
+ // Install our msg if possible.
+ // Otherwise we will go around again.
+ c.latestMu.Lock()
+ installed := false
+ if c.latest == latest {
+ installed = true
+ c.latest = tree
+ c.latestMsg = msg
+ } else {
+ latest = c.latest
+ latestMsg = c.latestMsg
+ }
+ c.latestMu.Unlock()
+
+ if installed {
+ return msgFuture, nil
+ }
+ }
+}
+
+// checkTrees checks that older (from olderNote) is contained in newer (from newerNote).
+// If an error occurs, such as malformed data or a network problem, checkTrees returns that error.
+// If on the other hand checkTrees finds evidence of misbehavior, it prepares a detailed
+// message and calls log.Fatal.
+func (c *Client) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, newerNote []byte) error {
+ thr := tlog.TileHashReader(newer, &c.tileReader)
+ h, err := tlog.TreeHash(older.N, thr)
+ if err != nil {
+ if older.N == newer.N {
+ return fmt.Errorf("checking tree#%d: %v", older.N, err)
+ }
+ return fmt.Errorf("checking tree#%d against tree#%d: %v", older.N, newer.N, err)
+ }
+ if h == older.Hash {
+ return nil
+ }
+
+ // Detected a fork in the tree timeline.
+ // Start by reporting the inconsistent signed tree notes.
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "SECURITY ERROR\n")
+ fmt.Fprintf(&buf, "go.sum database server misbehavior detected!\n\n")
+ indent := func(b []byte) []byte {
+ return bytes.Replace(b, []byte("\n"), []byte("\n\t"), -1)
+ }
+ fmt.Fprintf(&buf, "old database:\n\t%s\n", indent(olderNote))
+ fmt.Fprintf(&buf, "new database:\n\t%s\n", indent(newerNote))
+
+ // The notes alone are not enough to prove the inconsistency.
+ // We also need to show that the newer note's tree hash for older.N
+ // does not match older.Hash. The consumer of this report could
+ // of course consult the server to try to verify the inconsistency,
+ // but we are holding all the bits we need to prove it right now,
+ // so we might as well print them and make the report not depend
+ // on the continued availability of the misbehaving server.
+ // Preparing this data only reuses the tiled hashes needed for
+ // tlog.TreeHash(older.N, thr) above, so assuming thr is caching tiles,
+ // there are no new access to the server here, and these operations cannot fail.
+ fmt.Fprintf(&buf, "proof of misbehavior:\n\t%v", h)
+ if p, err := tlog.ProveTree(newer.N, older.N, thr); err != nil {
+ fmt.Fprintf(&buf, "\tinternal error: %v\n", err)
+ } else if err := tlog.CheckTree(p, newer.N, newer.Hash, older.N, h); err != nil {
+ fmt.Fprintf(&buf, "\tinternal error: generated inconsistent proof\n")
+ } else {
+ for _, h := range p {
+ fmt.Fprintf(&buf, "\n\t%v", h)
+ }
+ }
+ c.ops.SecurityError(buf.String())
+ return ErrSecurity
+}
+
+// checkRecord checks that record #id's hash matches data.
+func (c *Client) checkRecord(id int64, data []byte) error {
+ c.latestMu.Lock()
+ latest := c.latest
+ c.latestMu.Unlock()
+
+ if id >= latest.N {
+ return fmt.Errorf("cannot validate record %d in tree of size %d", id, latest.N)
+ }
+ hashes, err := tlog.TileHashReader(latest, &c.tileReader).ReadHashes([]int64{tlog.StoredHashIndex(0, id)})
+ if err != nil {
+ return err
+ }
+ if hashes[0] == tlog.RecordHash(data) {
+ return nil
+ }
+ return fmt.Errorf("cannot authenticate record data in server response")
+}
+
+// tileReader is a *Client wrapper that implements tlog.TileReader.
+// The separate type avoids exposing the ReadTiles and SaveTiles
+// methods on Client itself.
+type tileReader struct {
+ c *Client
+}
+
+func (r *tileReader) Height() int {
+ return r.c.tileHeight
+}
+
+// ReadTiles reads and returns the requested tiles,
+// either from the on-disk cache or the server.
+func (r *tileReader) ReadTiles(tiles []tlog.Tile) ([][]byte, error) {
+ // Read all the tiles in parallel.
+ data := make([][]byte, len(tiles))
+ errs := make([]error, len(tiles))
+ var wg sync.WaitGroup
+ for i, tile := range tiles {
+ wg.Add(1)
+ go func(i int, tile tlog.Tile) {
+ defer wg.Done()
+ data[i], errs[i] = r.c.readTile(tile)
+ }(i, tile)
+ }
+ wg.Wait()
+
+ for _, err := range errs {
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return data, nil
+}
+
+// tileCacheKey returns the cache key for the tile.
+func (c *Client) tileCacheKey(tile tlog.Tile) string {
+ return c.name + "/" + tile.Path()
+}
+
+// tileRemotePath returns the remote path for the tile.
+func (c *Client) tileRemotePath(tile tlog.Tile) string {
+ return "/" + tile.Path()
+}
+
+// readTile reads a single tile, either from the on-disk cache or the server.
+func (c *Client) readTile(tile tlog.Tile) ([]byte, error) {
+ type cached struct {
+ data []byte
+ err error
+ }
+
+ result := c.tileCache.Do(tile, func() interface{} {
+ // Try the requested tile in on-disk cache.
+ data, err := c.ops.ReadCache(c.tileCacheKey(tile))
+ if err == nil {
+ c.markTileSaved(tile)
+ return cached{data, nil}
+ }
+
+ // Try the full tile in on-disk cache (if requested tile not already full).
+ // We only save authenticated tiles to the on-disk cache,
+ // so the recreated prefix is equally authenticated.
+ full := tile
+ full.W = 1 << uint(tile.H)
+ if tile != full {
+ data, err := c.ops.ReadCache(c.tileCacheKey(full))
+ if err == nil {
+ c.markTileSaved(tile) // don't save tile later; we already have full
+ return cached{data[:len(data)/full.W*tile.W], nil}
+ }
+ }
+
+ // Try requested tile from server.
+ data, err = c.ops.ReadRemote(c.tileRemotePath(tile))
+ if err == nil {
+ return cached{data, nil}
+ }
+
+ // Try full tile on server.
+ // If the partial tile does not exist, it should be because
+ // the tile has been completed and only the complete one
+ // is available.
+ if tile != full {
+ data, err := c.ops.ReadRemote(c.tileRemotePath(full))
+ if err == nil {
+ // Note: We could save the full tile in the on-disk cache here,
+ // but we don't know if it is valid yet, and we will only find out
+ // about the partial data, not the full data. So let SaveTiles
+ // save the partial tile, and we'll just refetch the full tile later
+ // once we can validate more (or all) of it.
+ return cached{data[:len(data)/full.W*tile.W], nil}
+ }
+ }
+
+ // Nothing worked.
+ // Return the error from the server fetch for the requested (not full) tile.
+ return cached{nil, err}
+ }).(cached)
+
+ return result.data, result.err
+}
+
+// markTileSaved records that tile is already present in the on-disk cache,
+// so that a future SaveTiles for that tile can be ignored.
+func (c *Client) markTileSaved(tile tlog.Tile) {
+ c.tileSavedMu.Lock()
+ c.tileSaved[tile] = true
+ c.tileSavedMu.Unlock()
+}
+
+// SaveTiles saves the now validated tiles.
+func (r *tileReader) SaveTiles(tiles []tlog.Tile, data [][]byte) {
+ c := r.c
+
+ // Determine which tiles need saving.
+ // (Tiles that came from the cache need not be saved back.)
+ save := make([]bool, len(tiles))
+ c.tileSavedMu.Lock()
+ for i, tile := range tiles {
+ if !c.tileSaved[tile] {
+ save[i] = true
+ c.tileSaved[tile] = true
+ }
+ }
+ c.tileSavedMu.Unlock()
+
+ for i, tile := range tiles {
+ if save[i] {
+ // If WriteCache fails here (out of disk space? i/o error?),
+ // c.tileSaved[tile] is still true and we will not try to write it again.
+ // Next time we run maybe we'll redownload it again and be
+ // more successful.
+ c.ops.WriteCache(c.name+"/"+tile.Path(), data[i])
+ }
+ }
+}
diff --git a/sumdb/client_test.go b/sumdb/client_test.go
new file mode 100644
index 0000000..0f3c481
--- /dev/null
+++ b/sumdb/client_test.go
@@ -0,0 +1,460 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sumdb
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "sync"
+ "testing"
+
+ "golang.org/x/mod/sumdb/note"
+ "golang.org/x/mod/sumdb/tlog"
+)
+
+const (
+ testName = "localhost.localdev/sumdb"
+ testVerifierKey = "localhost.localdev/sumdb+00000c67+AcTrnkbUA+TU4heY3hkjiSES/DSQniBqIeQ/YppAUtK6"
+ testSignerKey = "PRIVATE+KEY+localhost.localdev/sumdb+00000c67+AXu6+oaVaOYuQOFrf1V59JK1owcFlJcHwwXHDfDGxSPk"
+)
+
+func TestClientLookup(t *testing.T) {
+ tc := newTestClient(t)
+ tc.mustHaveLatest(1)
+
+ // Basic lookup.
+ tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=")
+ tc.mustHaveLatest(3)
+
+ // Everything should now be cached, both for the original package and its /go.mod.
+ tc.getOK = false
+ tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=")
+ tc.mustLookup("rsc.io/sampler", "v1.3.0/go.mod", "rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=")
+ tc.mustHaveLatest(3)
+ tc.getOK = true
+ tc.getTileOK = false // the cache has what we need
+
+ // Lookup with multiple returned lines.
+ tc.mustLookup("rsc.io/quote", "v1.5.2", "rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=\nrsc.io/quote v1.5.2 h2:xyzzy")
+ tc.mustHaveLatest(3)
+
+ // Lookup with need for !-encoding.
+ // rsc.io/Quote is the only record written after rsc.io/samper,
+ // so it is the only one that should need more tiles.
+ tc.getTileOK = true
+ tc.mustLookup("rsc.io/Quote", "v1.5.2", "rsc.io/Quote v1.5.2 h1:uppercase!=")
+ tc.mustHaveLatest(4)
+}
+
+func TestClientBadTiles(t *testing.T) {
+ tc := newTestClient(t)
+
+ flipBits := func() {
+ for url, data := range tc.remote {
+ if strings.Contains(url, "/tile/") {
+ for i := range data {
+ data[i] ^= 0x80
+ }
+ }
+ }
+ }
+
+ // Bad tiles in initial download.
+ tc.mustHaveLatest(1)
+ flipBits()
+ _, err := tc.client.Lookup("rsc.io/sampler", "v1.3.0")
+ tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile")
+ flipBits()
+ tc.newClient()
+ tc.mustLookup("rsc.io/sampler", "v1.3.0", "rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=")
+
+ // Bad tiles after initial download.
+ flipBits()
+ _, err = tc.client.Lookup("rsc.io/Quote", "v1.5.2")
+ tc.mustError(err, "rsc.io/Quote@v1.5.2: checking tree#3 against tree#4: downloaded inconsistent tile")
+ flipBits()
+ tc.newClient()
+ tc.mustLookup("rsc.io/Quote", "v1.5.2", "rsc.io/Quote v1.5.2 h1:uppercase!=")
+
+ // Bad starting tree hash looks like bad tiles.
+ tc.newClient()
+ text := tlog.FormatTree(tlog.Tree{N: 1, Hash: tlog.Hash{}})
+ data, err := note.Sign(&note.Note{Text: string(text)}, tc.signer)
+ if err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.config[testName+"/latest"] = data
+ _, err = tc.client.Lookup("rsc.io/sampler", "v1.3.0")
+ tc.mustError(err, "rsc.io/sampler@v1.3.0: initializing sumdb.Client: checking tree#1: downloaded inconsistent tile")
+}
+
+func TestClientFork(t *testing.T) {
+ tc := newTestClient(t)
+ tc2 := tc.fork()
+
+ tc.addRecord("rsc.io/pkg1@v1.5.2", `rsc.io/pkg1 v1.5.2 h1:hash!=
+`)
+ tc.addRecord("rsc.io/pkg1@v1.5.4", `rsc.io/pkg1 v1.5.4 h1:hash!=
+`)
+ tc.mustLookup("rsc.io/pkg1", "v1.5.2", "rsc.io/pkg1 v1.5.2 h1:hash!=")
+
+ tc2.addRecord("rsc.io/pkg1@v1.5.3", `rsc.io/pkg1 v1.5.3 h1:hash!=
+`)
+ tc2.addRecord("rsc.io/pkg1@v1.5.4", `rsc.io/pkg1 v1.5.4 h1:hash!=
+`)
+ tc2.mustLookup("rsc.io/pkg1", "v1.5.4", "rsc.io/pkg1 v1.5.4 h1:hash!=")
+
+ key := "/lookup/rsc.io/pkg1@v1.5.2"
+ tc2.remote[key] = tc.remote[key]
+ _, err := tc2.client.Lookup("rsc.io/pkg1", "v1.5.2")
+ tc2.mustError(err, ErrSecurity.Error())
+
+ /*
+ SECURITY ERROR
+ go.sum database server misbehavior detected!
+
+ old database:
+ go.sum database tree!
+ 5
+ nWzN20+pwMt62p7jbv1/NlN95ePTlHijabv5zO/s36w=
+
+ — localhost.localdev/sumdb AAAMZ5/2FVAdMH58kmnz/0h299pwyskEbzDzoa2/YaPdhvLya4YWDFQQxu2TQb5GpwAH4NdWnTwuhILafisyf3CNbgg=
+
+ new database:
+ go.sum database tree
+ 6
+ wc4SkQt52o5W2nQ8To2ARs+mWuUJjss+sdleoiqxMmM=
+
+ — localhost.localdev/sumdb AAAMZ6oRNswlEZ6ZZhxrCvgl1MBy+nusq4JU+TG6Fe2NihWLqOzb+y2c2kzRLoCr4tvw9o36ucQEnhc20e4nA4Qc/wc=
+
+ proof of misbehavior:
+ T7i+H/8ER4nXOiw4Bj0koZOkGjkxoNvlI34GpvhHhQg=
+ Nsuejv72de9hYNM5bqFv8rv3gm3zJQwv/DT/WNbLDLA=
+ mOmqqZ1aI/lzS94oq/JSbj7pD8Rv9S+xDyi12BtVSHo=
+ /7Aw5jVSMM9sFjQhaMg+iiDYPMk6decH7QLOGrL9Lx0=
+ */
+
+ wants := []string{
+ "SECURITY ERROR",
+ "go.sum database server misbehavior detected!",
+ "old database:\n\tgo.sum database tree\n\t5\n",
+ "— localhost.localdev/sumdb AAAMZ5/2FVAd",
+ "new database:\n\tgo.sum database tree\n\t6\n",
+ "— localhost.localdev/sumdb AAAMZ6oRNswl",
+ "proof of misbehavior:\n\tT7i+H/8ER4nXOiw4Bj0k",
+ }
+ text := tc2.security.String()
+ for _, want := range wants {
+ if !strings.Contains(text, want) {
+ t.Fatalf("cannot find %q in security text:\n%s", want, text)
+ }
+ }
+}
+
+func TestClientGONOSUMDB(t *testing.T) {
+ tc := newTestClient(t)
+ tc.client.SetGONOSUMDB("p,*/q")
+ tc.client.Lookup("rsc.io/sampler", "v1.3.0") // initialize before we turn off network
+ tc.getOK = false
+
+ ok := []string{
+ "abc",
+ "a/p",
+ "pq",
+ "q",
+ "n/o/p/q",
+ }
+ skip := []string{
+ "p",
+ "p/x",
+ "x/q",
+ "x/q/z",
+ }
+
+ for _, path := range ok {
+ _, err := tc.client.Lookup(path, "v1.0.0")
+ if err == ErrGONOSUMDB {
+ t.Errorf("Lookup(%q): ErrGONOSUMDB, wanted failed actual lookup", path)
+ }
+ }
+ for _, path := range skip {
+ _, err := tc.client.Lookup(path, "v1.0.0")
+ if err != ErrGONOSUMDB {
+ t.Errorf("Lookup(%q): %v, wanted ErrGONOSUMDB", path, err)
+ }
+ }
+}
+
+// A testClient is a self-contained client-side testing environment.
+type testClient struct {
+ t *testing.T // active test
+ client *Client // client being tested
+ tileHeight int // tile height to use (default 2)
+ getOK bool // should tc.GetURL succeed?
+ getTileOK bool // should tc.GetURL of tiles succeed?
+ treeSize int64
+ hashes []tlog.Hash
+ remote map[string][]byte
+ signer note.Signer
+
+ // mu protects config, cache, log, security
+ // during concurrent use of the exported methods
+ // by the client itself (testClient is the Client's ClientOps,
+ // and the Client methods can both read and write these fields).
+ // Unexported methods invoked directly by the test
+ // (for example, addRecord) need not hold the mutex:
+ // for proper test execution those methods should only
+ // be called when the Client is idle and not using its ClientOps.
+ // Not holding the mutex in those methods ensures
+ // that if a mistake is made, go test -race will report it.
+ // (Holding the mutex would eliminate the race report but
+ // not the underlying problem.)
+ // Similarly, the get map is not protected by the mutex,
+ // because the Client methods only read it.
+ mu sync.Mutex // prot
+ config map[string][]byte
+ cache map[string][]byte
+ security bytes.Buffer
+}
+
+// newTestClient returns a new testClient that will call t.Fatal on error
+// and has a few records already available on the remote server.
+func newTestClient(t *testing.T) *testClient {
+ tc := &testClient{
+ t: t,
+ tileHeight: 2,
+ getOK: true,
+ getTileOK: true,
+ config: make(map[string][]byte),
+ cache: make(map[string][]byte),
+ remote: make(map[string][]byte),
+ }
+
+ tc.config["key"] = []byte(testVerifierKey + "\n")
+ var err error
+ tc.signer, err = note.NewSigner(testSignerKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tc.newClient()
+
+ tc.addRecord("rsc.io/quote@v1.5.2", `rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=
+rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
+rsc.io/quote v1.5.2 h2:xyzzy
+`)
+
+ tc.addRecord("golang.org/x/text@v0.0.0-20170915032832-14c0d48ead0c", `golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:qgOY6WgZOaTkIIMiVjBQcw93ERBE4m30iBm00nkL0i8=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+`)
+ tc.addRecord("rsc.io/sampler@v1.3.0", `rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+`)
+ tc.config[testName+"/latest"] = tc.signTree(1)
+
+ tc.addRecord("rsc.io/!quote@v1.5.2", `rsc.io/Quote v1.5.2 h1:uppercase!=
+`)
+ return tc
+}
+
+// newClient resets the Client associated with tc.
+// This clears any in-memory cache from the Client
+// but not tc's on-disk cache.
+func (tc *testClient) newClient() {
+ tc.client = NewClient(tc)
+ tc.client.SetTileHeight(tc.tileHeight)
+}
+
+// mustLookup does a lookup for path@vers and checks that the lines that come back match want.
+func (tc *testClient) mustLookup(path, vers, want string) {
+ tc.t.Helper()
+ lines, err := tc.client.Lookup(path, vers)
+ if err != nil {
+ tc.t.Fatal(err)
+ }
+ if strings.Join(lines, "\n") != want {
+ tc.t.Fatalf("Lookup(%q, %q):\n\t%s\nwant:\n\t%s", path, vers, strings.Join(lines, "\n\t"), strings.Replace(want, "\n", "\n\t", -1))
+ }
+}
+
+// mustHaveLatest checks that the on-disk configuration
+// for latest is a tree of size n.
+func (tc *testClient) mustHaveLatest(n int64) {
+ tc.t.Helper()
+
+ latest := tc.config[testName+"/latest"]
+ lines := strings.Split(string(latest), "\n")
+ if len(lines) < 2 || lines[1] != fmt.Sprint(n) {
+ tc.t.Fatalf("/latest should have tree %d, but has:\n%s", n, latest)
+ }
+}
+
+// mustError checks that err's error string contains the text.
+func (tc *testClient) mustError(err error, text string) {
+ tc.t.Helper()
+ if err == nil || !strings.Contains(err.Error(), text) {
+ tc.t.Fatalf("err = %v, want %q", err, text)
+ }
+}
+
+// fork returns a copy of tc.
+// Changes made to the new copy or to tc are not reflected in the other.
+func (tc *testClient) fork() *testClient {
+ tc2 := &testClient{
+ t: tc.t,
+ getOK: tc.getOK,
+ getTileOK: tc.getTileOK,
+ tileHeight: tc.tileHeight,
+ treeSize: tc.treeSize,
+ hashes: append([]tlog.Hash{}, tc.hashes...),
+ signer: tc.signer,
+ config: copyMap(tc.config),
+ cache: copyMap(tc.cache),
+ remote: copyMap(tc.remote),
+ }
+ tc2.newClient()
+ return tc2
+}
+
+func copyMap(m map[string][]byte) map[string][]byte {
+ m2 := make(map[string][]byte)
+ for k, v := range m {
+ m2[k] = v
+ }
+ return m2
+}
+
+// ReadHashes is tc's implementation of tlog.HashReader, for use with
+// tlog.TreeHash and so on.
+func (tc *testClient) ReadHashes(indexes []int64) ([]tlog.Hash, error) {
+ var list []tlog.Hash
+ for _, id := range indexes {
+ list = append(list, tc.hashes[id])
+ }
+ return list, nil
+}
+
+// addRecord adds a log record using the given (!-encoded) key and data.
+func (tc *testClient) addRecord(key, data string) {
+ tc.t.Helper()
+
+ // Create record, add hashes to log tree.
+ id := tc.treeSize
+ tc.treeSize++
+ rec, err := tlog.FormatRecord(id, []byte(data))
+ if err != nil {
+ tc.t.Fatal(err)
+ }
+ hashes, err := tlog.StoredHashesForRecordHash(id, tlog.RecordHash([]byte(data)), tc)
+ if err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.hashes = append(tc.hashes, hashes...)
+
+ // Create lookup result.
+ tc.remote["/lookup/"+key] = append(rec, tc.signTree(tc.treeSize)...)
+
+ // Create new tiles.
+ tiles := tlog.NewTiles(tc.tileHeight, id, tc.treeSize)
+ for _, tile := range tiles {
+ data, err := tlog.ReadTileData(tile, tc)
+ if err != nil {
+ tc.t.Fatal(err)
+ }
+ tc.remote["/"+tile.Path()] = data
+ // TODO delete old partial tiles
+ }
+}
+
+// signTree returns the signed head for the tree of the given size.
+func (tc *testClient) signTree(size int64) []byte {
+ h, err := tlog.TreeHash(size, tc)
+ if err != nil {
+ tc.t.Fatal(err)
+ }
+ text := tlog.FormatTree(tlog.Tree{N: size, Hash: h})
+ data, err := note.Sign(&note.Note{Text: string(text)}, tc.signer)
+ if err != nil {
+ tc.t.Fatal(err)
+ }
+ return data
+}
+
+// ReadRemote is for tc's implementation of Client.
+func (tc *testClient) ReadRemote(path string) ([]byte, error) {
+ // No mutex here because only the Client should be running
+ // and the Client cannot change tc.get.
+ if !tc.getOK {
+ return nil, fmt.Errorf("disallowed remote read %s", path)
+ }
+ if strings.Contains(path, "/tile/") && !tc.getTileOK {
+ return nil, fmt.Errorf("disallowed remote tile read %s", path)
+ }
+
+ data, ok := tc.remote[path]
+ if !ok {
+ return nil, fmt.Errorf("no remote path %s", path)
+ }
+ return data, nil
+}
+
+// ReadConfig is for tc's implementation of Client.
+func (tc *testClient) ReadConfig(file string) ([]byte, error) {
+ tc.mu.Lock()
+ defer tc.mu.Unlock()
+
+ data, ok := tc.config[file]
+ if !ok {
+ return nil, fmt.Errorf("no config %s", file)
+ }
+ return data, nil
+}
+
+// WriteConfig is for tc's implementation of Client.
+func (tc *testClient) WriteConfig(file string, old, new []byte) error {
+ tc.mu.Lock()
+ defer tc.mu.Unlock()
+
+ data := tc.config[file]
+ if !bytes.Equal(old, data) {
+ return ErrWriteConflict
+ }
+ tc.config[file] = new
+ return nil
+}
+
+// ReadCache is for tc's implementation of Client.
+func (tc *testClient) ReadCache(file string) ([]byte, error) {
+ tc.mu.Lock()
+ defer tc.mu.Unlock()
+
+ data, ok := tc.cache[file]
+ if !ok {
+ return nil, fmt.Errorf("no cache %s", file)
+ }
+ return data, nil
+}
+
+// WriteCache is for tc's implementation of Client.
+func (tc *testClient) WriteCache(file string, data []byte) {
+ tc.mu.Lock()
+ defer tc.mu.Unlock()
+
+ tc.cache[file] = data
+}
+
+// Log is for tc's implementation of Client.
+func (tc *testClient) Log(msg string) {
+ tc.t.Log(msg)
+}
+
+// SecurityError is for tc's implementation of Client.
+func (tc *testClient) SecurityError(msg string) {
+ tc.mu.Lock()
+ defer tc.mu.Unlock()
+
+ fmt.Fprintf(&tc.security, "%s\n", strings.TrimRight(msg, "\n"))
+}
diff --git a/sumdb/test.go b/sumdb/test.go
new file mode 100644
index 0000000..534ca3e
--- /dev/null
+++ b/sumdb/test.go
@@ -0,0 +1,128 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sumdb
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "sync"
+
+ "golang.org/x/mod/sumdb/note"
+ "golang.org/x/mod/sumdb/tlog"
+)
+
+// NewTestServer constructs a new TestServer
+// that will sign its tree with the given signer key
+// (see golang.org/x/mod/sumdb/note)
+// and fetch new records as needed by calling gosum.
+func NewTestServer(signer string, gosum func(path, vers string) ([]byte, error)) *TestServer {
+ return &TestServer{signer: signer, gosum: gosum}
+}
+
+// A TestServer is an in-memory implementation of Server for testing.
+type TestServer struct {
+ signer string
+ gosum func(path, vers string) ([]byte, error)
+
+ mu sync.Mutex
+ hashes testHashes
+ records [][]byte
+ lookup map[string]int64
+}
+
+// testHashes implements tlog.HashReader, reading from a slice.
+type testHashes []tlog.Hash
+
+func (h testHashes) ReadHashes(indexes []int64) ([]tlog.Hash, error) {
+ var list []tlog.Hash
+ for _, id := range indexes {
+ list = append(list, h[id])
+ }
+ return list, nil
+}
+
+func (s *TestServer) Signed(ctx context.Context) ([]byte, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ size := int64(len(s.records))
+ h, err := tlog.TreeHash(size, s.hashes)
+ if err != nil {
+ return nil, err
+ }
+ text := tlog.FormatTree(tlog.Tree{N: size, Hash: h})
+ signer, err := note.NewSigner(s.signer)
+ if err != nil {
+ return nil, err
+ }
+ return note.Sign(&note.Note{Text: string(text)}, signer)
+}
+
+func (s *TestServer) ReadRecords(ctx context.Context, id, n int64) ([][]byte, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var list [][]byte
+ for i := int64(0); i < n; i++ {
+ if id+i >= int64(len(s.records)) {
+ return nil, fmt.Errorf("missing records")
+ }
+ list = append(list, s.records[id+i])
+ }
+ return list, nil
+}
+
+func (s *TestServer) Lookup(ctx context.Context, key string) (int64, error) {
+ s.mu.Lock()
+ id, ok := s.lookup[key]
+ s.mu.Unlock()
+ if ok {
+ return id, nil
+ }
+
+ // Look up module and compute go.sum lines.
+ i := strings.Index(key, "@")
+ if i < 0 {
+ return 0, fmt.Errorf("invalid lookup key %q", key)
+ }
+ path, vers := key[:i], key[i+1:]
+ data, err := s.gosum(path, vers)
+ if err != nil {
+ return 0, err
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ // We ran the fetch without the lock.
+ // If another fetch happened and committed, use it instead.
+ id, ok = s.lookup[key]
+ if ok {
+ return id, nil
+ }
+
+ // Add record.
+ id = int64(len(s.records))
+ s.records = append(s.records, data)
+ if s.lookup == nil {
+ s.lookup = make(map[string]int64)
+ }
+ s.lookup[key] = id
+ hashes, err := tlog.StoredHashesForRecordHash(id, tlog.RecordHash([]byte(data)), s.hashes)
+ if err != nil {
+ panic(err)
+ }
+ s.hashes = append(s.hashes, hashes...)
+
+ return id, nil
+}
+
+func (s *TestServer) ReadTileData(ctx context.Context, t tlog.Tile) ([]byte, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ return tlog.ReadTileData(t, s.hashes)
+}