diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 026e650..2b59c41 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/gojp/goreportcard", - "GoVersion": "devel +fd9fd4c Fri Feb 5 17:06:51 2016 +0000", + "GoVersion": "go1.6", "GodepVersion": "v58", "Packages": [ "./..." @@ -8,8 +8,8 @@ "Deps": [ { "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.1.0-66-gfc7313d", - "Rev": "fc7313d46ce2ee887e6d7d32acf648a30e379d8d" + "Comment": "v1.1.0-81-g0fd4c05", + "Rev": "0fd4c0547d204c7b1cad6db6f3adad5f2cf453e5" }, { "ImportPath": "github.com/dustin/go-humanize", diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d68..0000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go deleted file mode 100644 index a02c367..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket_test.go +++ /dev/null @@ -1,1827 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "log" - "math/rand" - "os" - "strconv" - "strings" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a bucket that gets a non-existent key returns nil. -func TestBucket_Get_NonExistent(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); v != nil { - t.Fatal("expected nil value") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can read a value that is not flushed yet. -func TestBucket_Get_FromNode(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket retrieved via Get() returns a nil. -func TestBucket_Get_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - - if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil { - t.Fatal("expected nil value") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can write a key/value. -func TestBucket_Put(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - - v := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if !bytes.Equal([]byte("bar"), v) { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can rewrite a key in the same transaction. -func TestBucket_Put_Repeat(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("baz")); err != nil { - t.Fatal(err) - } - - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if !bytes.Equal([]byte("baz"), value) { - t.Fatalf("unexpected value: %v", value) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can write a bunch of large values. -func TestBucket_Put_Large(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - count, factor := 100, 200 - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for i := 1; i < count; i++ { - if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - value := b.Get([]byte(strings.Repeat("0", i*factor))) - if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) { - t.Fatalf("unexpected value: %v", value) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a database can perform multiple large appends safely. -func TestDB_Put_VeryLarge(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - n, batchN := 400000, 200000 - ksize, vsize := 8, 500 - - db := MustOpenDB() - defer db.MustClose() - - for i := 0; i < n; i += batchN { - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for j := 0; j < batchN; j++ { - k, v := make([]byte, ksize), make([]byte, vsize) - binary.BigEndian.PutUint32(k, uint32(i+j)) - if err := b.Put(k, v); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - } -} - -// Ensure that a setting a value on a key with a bucket value returns an error. -func TestBucket_Put_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b0, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if err := b0.Put([]byte("foo"), []byte("bar")); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a setting a value while the transaction is closed returns an error. -func TestBucket_Put_Closed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - - if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that setting a value on a read-only bucket returns an error. -func TestBucket_Put_ReadOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can delete an existing key. -func TestBucket_Delete(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); v != nil { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a large set of keys will work correctly. -func TestBucket_Delete_Large(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil { - t.Fatal(err) - } - } - - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - if err := b.Delete([]byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - if v := b.Get([]byte(strconv.Itoa(i))); v != nil { - t.Fatalf("unexpected value: %v, i=%d", v, i) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Deleting a very large list of keys will cause the freelist to use overflow. -func TestBucket_Delete_FreelistOverflow(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := MustOpenDB() - defer db.MustClose() - - k := make([]byte, 16) - for i := uint64(0); i < 10000; i++ { - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("0")) - if err != nil { - t.Fatalf("bucket error: %s", err) - } - - for j := uint64(0); j < 1000; j++ { - binary.BigEndian.PutUint64(k[:8], i) - binary.BigEndian.PutUint64(k[8:], j) - if err := b.Put(k, nil); err != nil { - t.Fatalf("put error: %s", err) - } - } - - return nil - }); err != nil { - t.Fatal(err) - } - } - - // Delete all of them in one large transaction - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("0")) - c := b.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - if err := c.Delete(); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that accessing and updating nested buckets is ok across transactions. -func TestBucket_Nested(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - // Create a widgets bucket. - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - // Create a widgets/foo bucket. - _, err = b.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - // Create a widgets/bar key. - if err := b.Put([]byte("bar"), []byte("0000")); err != nil { - t.Fatal(err) - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // Update widgets/bar. - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // Cause a split. - if err := db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 10000; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // Insert into widgets/foo/baz. - if err := db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // Verify. - if err := db.View(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) { - t.Fatalf("unexpected value: %v", v) - } - if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) { - t.Fatalf("unexpected value: %v", v) - } - for i := 0; i < 10000; i++ { - if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) { - t.Fatalf("unexpected value: %v", v) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a bucket using Delete() returns an error. -func TestBucket_Delete_Bucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a key on a read-only bucket returns an error. -func TestBucket_Delete_ReadOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a deleting value while the transaction is closed returns an error. -func TestBucket_Delete_Closed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that deleting a bucket causes nested buckets to be deleted. -func TestBucket_DeleteBucket_Nested(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - foo, err := widgets.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - bar, err := foo.CreateBucket([]byte("bar")) - if err != nil { - t.Fatal(err) - } - if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. -func TestBucket_DeleteBucket_Nested2(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - foo, err := widgets.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - bar, err := foo.CreateBucket([]byte("bar")) - if err != nil { - t.Fatal(err) - } - - if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - widgets := tx.Bucket([]byte("widgets")) - if widgets == nil { - t.Fatal("expected widgets bucket") - } - - foo := widgets.Bucket([]byte("foo")) - if foo == nil { - t.Fatal("expected foo bucket") - } - - bar := foo.Bucket([]byte("bar")) - if bar == nil { - t.Fatal("expected bar bucket") - } - - if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { - t.Fatalf("unexpected value: %v", v) - } - if err := tx.DeleteBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) != nil { - t.Fatal("expected bucket to be deleted") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a child bucket with multiple pages causes all pages to get collected. -// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly. -func TestBucket_DeleteBucket_Large(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - foo, err := widgets.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 1000; i++ { - if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a simple value retrieved via Bucket() returns a nil. -func TestBucket_Bucket_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil { - t.Fatal("expected nil bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that creating a bucket on an existing non-bucket key returns an error. -func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if _, err := widgets.CreateBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a bucket on an existing non-bucket key returns an error. -func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can return an autoincrementing sequence. -func TestBucket_NextSequence(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - woojits, err := tx.CreateBucket([]byte("woojits")) - if err != nil { - t.Fatal(err) - } - - // Make sure sequence increments. - if seq, err := widgets.NextSequence(); err != nil { - t.Fatal(err) - } else if seq != 1 { - t.Fatalf("unexpecte sequence: %d", seq) - } - - if seq, err := widgets.NextSequence(); err != nil { - t.Fatal(err) - } else if seq != 2 { - t.Fatalf("unexpected sequence: %d", seq) - } - - // Buckets should be separate. - if seq, err := woojits.NextSequence(); err != nil { - t.Fatal(err) - } else if seq != 1 { - t.Fatalf("unexpected sequence: %d", 1) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket will persist an autoincrementing sequence even if its -// the only thing updated on the bucket. -// https://github.com/boltdb/bolt/issues/296 -func TestBucket_NextSequence_Persist(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if seq != 2 { - t.Fatalf("unexpected sequence: %d", seq) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that retrieving the next sequence on a read-only bucket returns an error. -func TestBucket_NextSequence_ReadOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - _, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that retrieving the next sequence for a bucket on a closed database return an error. -func TestBucket_NextSequence_Closed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - if _, err := b.NextSequence(); err != bolt.ErrTxClosed { - t.Fatal(err) - } -} - -// Ensure a user can loop over all key/value pairs in a bucket. -func TestBucket_ForEach(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("0000")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("0001")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte("0002")); err != nil { - t.Fatal(err) - } - - var index int - if err := b.ForEach(func(k, v []byte) error { - switch index { - case 0: - if !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0002")) { - t.Fatalf("unexpected value: %v", v) - } - case 1: - if !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0001")) { - t.Fatalf("unexpected value: %v", v) - } - case 2: - if !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0000")) { - t.Fatalf("unexpected value: %v", v) - } - } - index++ - return nil - }); err != nil { - t.Fatal(err) - } - - if index != 3 { - t.Fatalf("unexpected index: %d", index) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a database can stop iteration early. -func TestBucket_ForEach_ShortCircuit(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte("0000")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("0000")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("0000")); err != nil { - t.Fatal(err) - } - - var index int - if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - index++ - if bytes.Equal(k, []byte("baz")) { - return errors.New("marker") - } - return nil - }); err == nil || err.Error() != "marker" { - t.Fatalf("unexpected error: %s", err) - } - if index != 2 { - t.Fatalf("unexpected index: %d", index) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that looping over a bucket on a closed database returns an error. -func TestBucket_ForEach_Closed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - - if err := b.ForEach(func(k, v []byte) error { return nil }); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that an error is returned when inserting with an empty key. -func TestBucket_Put_EmptyKey(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte(""), []byte("bar")); err != bolt.ErrKeyRequired { - t.Fatalf("unexpected error: %s", err) - } - if err := b.Put(nil, []byte("bar")); err != bolt.ErrKeyRequired { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that an error is returned when inserting with a key that's too large. -func TestBucket_Put_KeyTooLarge(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put(make([]byte, 32769), []byte("bar")); err != bolt.ErrKeyTooLarge { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that an error is returned when inserting a value that's too large. -func TestBucket_Put_ValueTooLarge(t *testing.T) { - // Skip this test on DroneCI because the machine is resource constrained. - if os.Getenv("DRONE") == "true" { - t.Skip("not enough RAM for test") - } - - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != bolt.ErrValueTooLarge { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Add bucket with fewer keys but one big value. - bigKey := []byte("really-big-value") - for i := 0; i < 500; i++ { - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("woojits")) - if err != nil { - t.Fatal(err) - } - - if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - } - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", 10000))); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - stats := tx.Bucket([]byte("woojits")).Stats() - if stats.BranchPageN != 1 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 7 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 2 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 501 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 2 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } - - branchInuse := 16 // branch page header - branchInuse += 7 * 16 // branch elements - branchInuse += 7 * 3 // branch keys (6 3-byte keys) - if stats.BranchInuse != branchInuse { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } - - leafInuse := 7 * 16 // leaf page header - leafInuse += 501 * 16 // leaf elements - leafInuse += 500*3 + len(bigKey) // leaf keys - leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values - if stats.LeafInuse != leafInuse { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - // Only check allocations for 4KB pages. - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 4096 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 36864 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 0 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 0 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a bucket with random insertion utilizes fill percentage correctly. -func TestBucket_Stats_RandomFill(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } else if os.Getpagesize() != 4096 { - t.Skip("invalid page size for test") - } - - db := MustOpenDB() - defer db.MustClose() - - // Add a set of values in random order. It will be the same random - // order so we can maintain consistency between test runs. - var count int - rand := rand.New(rand.NewSource(42)) - for _, i := range rand.Perm(1000) { - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("woojits")) - if err != nil { - t.Fatal(err) - } - b.FillPercent = 0.9 - for _, j := range rand.Perm(100) { - index := (j * 10000) + i - if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil { - t.Fatal(err) - } - count++ - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - stats := tx.Bucket([]byte("woojits")).Stats() - if stats.KeyN != 100000 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } - - if stats.BranchPageN != 98 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.BranchInuse != 130984 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.BranchAlloc != 401408 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } - - if stats.LeafPageN != 3412 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.LeafInuse != 4742482 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } else if stats.LeafAlloc != 13975552 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Small(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - b, err := tx.CreateBucket([]byte("whozawhats")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - - return nil - }); err != nil { - t.Fatal(err) - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - if stats.BranchPageN != 0 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 0 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 1 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 1 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 0 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.LeafInuse != 0 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 0 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 0 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 1 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 16+16+6 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -func TestBucket_Stats_EmptyBucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - if stats.BranchPageN != 0 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 0 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 0 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 1 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 0 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.LeafInuse != 0 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 0 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 0 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 1 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 16 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Nested(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 100; i++ { - if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil { - t.Fatal(err) - } - } - - bar, err := b.CreateBucket([]byte("bar")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 10; i++ { - if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - } - - baz, err := bar.CreateBucket([]byte("baz")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 10; i++ { - if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - } - - return nil - }); err != nil { - t.Fatal(err) - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("foo")) - stats := b.Stats() - if stats.BranchPageN != 0 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 2 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 122 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 3 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 0 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } - - foo := 16 // foo (pghdr) - foo += 101 * 16 // foo leaf elements - foo += 100*2 + 100*2 // foo leaf key/values - foo += 3 + 16 // foo -> bar key/value - - bar := 16 // bar (pghdr) - bar += 11 * 16 // bar leaf elements - bar += 10 + 10 // bar leaf key/values - bar += 3 + 16 // bar -> baz key/value - - baz := 16 // baz (inline) (pghdr) - baz += 10 * 16 // baz leaf elements - baz += 10 + 10 // baz leaf key/values - - if stats.LeafInuse != foo+bar+baz { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 0 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 8192 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 3 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 1 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != baz { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a large bucket can calculate stats. -func TestBucket_Stats_Large(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := MustOpenDB() - defer db.MustClose() - - var index int - for i := 0; i < 100; i++ { - // Add bucket with lots of keys. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 1000; i++ { - if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil { - t.Fatal(err) - } - index++ - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - stats := tx.Bucket([]byte("widgets")).Stats() - if stats.BranchPageN != 13 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 1196 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 100000 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 3 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 25257 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.LeafInuse != 2596916 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 53248 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 4898816 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 0 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 0 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can write random keys and values across multiple transactions. -func TestBucket_Put_Single(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - index := 0 - if err := quick.Check(func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - m := make(map[string][]byte) - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - for _, item := range items { - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { - panic("put error: " + err.Error()) - } - m[string(item.Key)] = item.Value - return nil - }); err != nil { - t.Fatal(err) - } - - // Verify all key/values so far. - if err := db.View(func(tx *bolt.Tx) error { - i := 0 - for k, v := range m { - value := tx.Bucket([]byte("widgets")).Get([]byte(k)) - if !bytes.Equal(value, v) { - t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) - db.CopyTempFile() - t.FailNow() - } - i++ - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - index++ - return true - }, nil); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can insert multiple key/value pairs at once. -func TestBucket_Put_Multiple(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - if err := quick.Check(func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - // Bulk insert all values. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Verify all items exist. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - value := b.Get(item.Key) - if !bytes.Equal(item.Value, value) { - db.CopyTempFile() - t.Fatalf("exp=%x; got=%x", item.Value, value) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - return true - }, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can delete all key/value pairs and return to a single leaf page. -func TestBucket_Delete_Quick(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - if err := quick.Check(func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - // Bulk insert all values. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Remove items one at a time and check consistency. - for _, item := range items { - if err := db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete(item.Key) - }); err != nil { - t.Fatal(err) - } - } - - // Anything before our deletion index should be nil. - if err := db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) - return nil - }); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - return true - }, qconfig()); err != nil { - t.Error(err) - } -} - -func ExampleBucket_Put() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Start a write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - - // Set the value "bar" for the key "foo". - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Read value back in a different read-only transaction. - if err := db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value of 'foo' is: bar -} - -func ExampleBucket_Delete() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Start a write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - - // Set the value "bar" for the key "foo". - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - - // Retrieve the key back from the database and verify it. - value := b.Get([]byte("foo")) - fmt.Printf("The value of 'foo' was: %s\n", value) - - return nil - }); err != nil { - log.Fatal(err) - } - - // Delete the key in a different write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - }); err != nil { - log.Fatal(err) - } - - // Retrieve the key again. - if err := db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if value == nil { - fmt.Printf("The value of 'foo' is now: nil\n") - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value of 'foo' was: bar - // The value of 'foo' is now: nil -} - -func ExampleBucket_ForEach() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Insert data into a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("animals")) - if err != nil { - return err - } - - if err := b.Put([]byte("dog"), []byte("fun")); err != nil { - return err - } - if err := b.Put([]byte("cat"), []byte("lame")); err != nil { - return err - } - if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { - return err - } - - // Iterate over items in sorted key order. - if err := b.ForEach(func(k, v []byte) error { - fmt.Printf("A %s is %s.\n", k, v) - return nil - }); err != nil { - return err - } - - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go deleted file mode 100644 index b96e6f7..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go +++ /dev/null @@ -1,1532 +0,0 @@ -package main - -import ( - "bytes" - "encoding/binary" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" - "unsafe" - - "github.com/boltdb/bolt" -) - -var ( - // ErrUsage is returned when a usage message was printed and the process - // should simply exit with an error. - ErrUsage = errors.New("usage") - - // ErrUnknownCommand is returned when a CLI command is not specified. - ErrUnknownCommand = errors.New("unknown command") - - // ErrPathRequired is returned when the path to a Bolt database is not specified. - ErrPathRequired = errors.New("path required") - - // ErrFileNotFound is returned when a Bolt database does not exist. - ErrFileNotFound = errors.New("file not found") - - // ErrInvalidValue is returned when a benchmark reads an unexpected value. - ErrInvalidValue = errors.New("invalid value") - - // ErrCorrupt is returned when a checking a data file finds errors. - ErrCorrupt = errors.New("invalid value") - - // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly - // divided by the iteration count. - ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") - - // ErrPageIDRequired is returned when a required page id is not specified. - ErrPageIDRequired = errors.New("page id required") - - // ErrPageNotFound is returned when specifying a page above the high water mark. - ErrPageNotFound = errors.New("page not found") - - // ErrPageFreed is returned when reading a page that has already been freed. - ErrPageFreed = errors.New("page freed") -) - -// PageHeaderSize represents the size of the bolt.page header. -const PageHeaderSize = 16 - -func main() { - m := NewMain() - if err := m.Run(os.Args[1:]...); err == ErrUsage { - os.Exit(2) - } else if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } -} - -// Main represents the main program execution. -type Main struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewMain returns a new instance of Main connect to the standard input/output. -func NewMain() *Main { - return &Main{ - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run executes the program. -func (m *Main) Run(args ...string) error { - // Require a command at the beginning. - if len(args) == 0 || strings.HasPrefix(args[0], "-") { - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - } - - // Execute command. - switch args[0] { - case "help": - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - case "bench": - return newBenchCommand(m).Run(args[1:]...) - case "check": - return newCheckCommand(m).Run(args[1:]...) - case "dump": - return newDumpCommand(m).Run(args[1:]...) - case "info": - return newInfoCommand(m).Run(args[1:]...) - case "page": - return newPageCommand(m).Run(args[1:]...) - case "pages": - return newPagesCommand(m).Run(args[1:]...) - case "stats": - return newStatsCommand(m).Run(args[1:]...) - default: - return ErrUnknownCommand - } -} - -// Usage returns the help message. -func (m *Main) Usage() string { - return strings.TrimLeft(` -Bolt is a tool for inspecting bolt databases. - -Usage: - - bolt command [arguments] - -The commands are: - - bench run synthetic benchmark against bolt - check verifies integrity of bolt database - info print basic info - help print this screen - pages print list of pages with their types - stats iterate over all pages and generate usage stats - -Use "bolt [command] -h" for more information about a command. -`, "\n") -} - -// CheckCommand represents the "check" command execution. -type CheckCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewCheckCommand returns a CheckCommand. -func newCheckCommand(m *Main) *CheckCommand { - return &CheckCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *CheckCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Perform consistency check. - return db.View(func(tx *bolt.Tx) error { - var count int - ch := tx.Check() - loop: - for { - select { - case err, ok := <-ch: - if !ok { - break loop - } - fmt.Fprintln(cmd.Stdout, err) - count++ - } - } - - // Print summary of errors. - if count > 0 { - fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) - return ErrCorrupt - } - - // Notify user that database is valid. - fmt.Fprintln(cmd.Stdout, "OK") - return nil - }) -} - -// Usage returns the help message. -func (cmd *CheckCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt check PATH - -Check opens a database at PATH and runs an exhaustive check to verify that -all pages are accessible or are marked as freed. It also verifies that no -pages are double referenced. - -Verification errors will stream out as they are found and the process will -return after all pages have been checked. -`, "\n") -} - -// InfoCommand represents the "info" command execution. -type InfoCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewInfoCommand returns a InfoCommand. -func newInfoCommand(m *Main) *InfoCommand { - return &InfoCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *InfoCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open the database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Print basic database info. - info := db.Info() - fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) - - return nil -} - -// Usage returns the help message. -func (cmd *InfoCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt info PATH - -Info prints basic information about the Bolt database at PATH. -`, "\n") -} - -// DumpCommand represents the "dump" command execution. -type DumpCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newDumpCommand returns a DumpCommand. -func newDumpCommand(m *Main) *DumpCommand { - return &DumpCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *DumpCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database to retrieve page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return err - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - - // Print page to stdout. - if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil { - return err - } - } - - return nil -} - -// PrintPage prints a given page as hexidecimal. -func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *DumpCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt dump -page PAGEID PATH - -Dump prints a hexidecimal dump of a single page. -`, "\n") -} - -// PageCommand represents the "page" command execution. -type PageCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newPageCommand returns a PageCommand. -func newPageCommand(m *Main) *PageCommand { - return &PageCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PageCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - - // Retrieve page info and page size. - p, buf, err := ReadPage(path, pageID) - if err != nil { - return err - } - - // Print basic page info. - fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id) - fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) - fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) - - // Print type-specific data. - switch p.Type() { - case "meta": - err = cmd.PrintMeta(cmd.Stdout, buf) - case "leaf": - err = cmd.PrintLeaf(cmd.Stdout, buf) - case "branch": - err = cmd.PrintBranch(cmd.Stdout, buf) - case "freelist": - err = cmd.PrintFreelist(cmd.Stdout, buf) - } - if err != nil { - return err - } - } - - return nil -} - -// PrintMeta prints the data from the meta page. -func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error { - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - fmt.Fprintf(w, "Version: %d\n", m.version) - fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) - fmt.Fprintf(w, "Flags: %08x\n", m.flags) - fmt.Fprintf(w, "Root: \n", m.root.root) - fmt.Fprintf(w, "Freelist: \n", m.freelist) - fmt.Fprintf(w, "HWM: \n", m.pgid) - fmt.Fprintf(w, "Txn ID: %d\n", m.txid) - fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) - fmt.Fprintf(w, "\n") - return nil -} - -// PrintLeaf prints the data for a leaf page. -func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - // Format value as string. - var v string - if (e.flags & uint32(bucketLeafFlag)) != 0 { - b := (*bucket)(unsafe.Pointer(&e.value()[0])) - v = fmt.Sprintf("", b.root, b.sequence) - } else if isPrintable(string(e.value())) { - k = fmt.Sprintf("%q", string(e.value())) - } else { - k = fmt.Sprintf("%x", string(e.value())) - } - - fmt.Fprintf(w, "%s: %s\n", k, v) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintBranch prints the data for a leaf page. -func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.branchPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - fmt.Fprintf(w, "%s: \n", k, e.pgid) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintFreelist prints the data for a freelist page. -func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each page in the freelist. - ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) - for i := uint16(0); i < p.count; i++ { - fmt.Fprintf(w, "%d\n", ids[i]) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintPage prints a given page as hexidecimal. -func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *PageCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt page -page PATH pageid [pageid...] - -Page prints one or more pages in human readable format. -`, "\n") -} - -// PagesCommand represents the "pages" command execution. -type PagesCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewPagesCommand returns a PagesCommand. -func newPagesCommand(m *Main) *PagesCommand { - return &PagesCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PagesCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer func() { _ = db.Close() }() - - // Write header. - fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") - fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") - - return db.Update(func(tx *bolt.Tx) error { - var id int - for { - p, err := tx.Page(id) - if err != nil { - return &PageError{ID: id, Err: err} - } else if p == nil { - break - } - - // Only display count and overflow if this is a non-free page. - var count, overflow string - if p.Type != "free" { - count = strconv.Itoa(p.Count) - if p.OverflowCount > 0 { - overflow = strconv.Itoa(p.OverflowCount) - } - } - - // Print table row. - fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) - - // Move to the next non-overflow page. - id += 1 - if p.Type != "free" { - id += p.OverflowCount - } - } - return nil - }) -} - -// Usage returns the help message. -func (cmd *PagesCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt pages PATH - -Pages prints a table of pages with their type (meta, leaf, branch, freelist). -Leaf and branch pages will show a key count in the "items" column while the -freelist will show the number of free pages in the "items" column. - -The "overflow" column shows the number of blocks that the page spills over -into. Normally there is no overflow but large keys and values can cause -a single page to take up multiple blocks. -`, "\n") -} - -// StatsCommand represents the "stats" command execution. -type StatsCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewStatsCommand returns a StatsCommand. -func newStatsCommand(m *Main) *StatsCommand { - return &StatsCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *StatsCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path, prefix := fs.Arg(0), fs.Arg(1) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - return db.View(func(tx *bolt.Tx) error { - var s bolt.BucketStats - var count int - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - if bytes.HasPrefix(name, []byte(prefix)) { - s.Add(b.Stats()) - count += 1 - } - return nil - }); err != nil { - return err - } - - fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) - - fmt.Fprintln(cmd.Stdout, "Page count statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) - fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) - - fmt.Fprintln(cmd.Stdout, "Tree statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) - fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) - - fmt.Fprintln(cmd.Stdout, "Page size utilization") - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) - var percentage int - if s.BranchAlloc != 0 { - percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) - percentage = 0 - if s.LeafAlloc != 0 { - percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) - - fmt.Fprintln(cmd.Stdout, "Bucket statistics") - fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) - percentage = 0 - if s.BucketN != 0 { - percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) - } - fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) - percentage = 0 - if s.LeafInuse != 0 { - percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) - - return nil - }) -} - -// Usage returns the help message. -func (cmd *StatsCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt stats PATH - -Stats performs an extensive search of the database to track every page -reference. It starts at the current meta page and recursively iterates -through every accessible bucket. - -The following errors can be reported: - - already freed - The page is referenced more than once in the freelist. - - unreachable unfreed - The page is not referenced by a bucket or in the freelist. - - reachable freed - The page is referenced by a bucket but is also in the freelist. - - out of bounds - A page is referenced that is above the high water mark. - - multiple references - A page is referenced by more than one other page. - - invalid type - The page type is not "meta", "leaf", "branch", or "freelist". - -No errors should occur in your database. However, if for some reason you -experience corruption, please submit a ticket to the Bolt project page: - - https://github.com/boltdb/bolt/issues -`, "\n") -} - -var benchBucketName = []byte("bench") - -// BenchCommand represents the "bench" command execution. -type BenchCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewBenchCommand returns a BenchCommand using the -func newBenchCommand(m *Main) *BenchCommand { - return &BenchCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the "bench" command. -func (cmd *BenchCommand) Run(args ...string) error { - // Parse CLI arguments. - options, err := cmd.ParseFlags(args) - if err != nil { - return err - } - - // Remove path if "-work" is not set. Otherwise keep path. - if options.Work { - fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) - } else { - defer os.Remove(options.Path) - } - - // Create database. - db, err := bolt.Open(options.Path, 0666, nil) - if err != nil { - return err - } - db.NoSync = options.NoSync - defer db.Close() - - // Write to the database. - var results BenchResults - if err := cmd.runWrites(db, options, &results); err != nil { - return fmt.Errorf("write: %v", err) - } - - // Read from the database. - if err := cmd.runReads(db, options, &results); err != nil { - return fmt.Errorf("bench: read: %s", err) - } - - // Print results. - fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) - fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) - fmt.Fprintln(os.Stderr, "") - return nil -} - -// ParseFlags parses the command line flags. -func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) { - var options BenchOptions - - // Parse flagset. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") - fs.StringVar(&options.WriteMode, "write-mode", "seq", "") - fs.StringVar(&options.ReadMode, "read-mode", "seq", "") - fs.IntVar(&options.Iterations, "count", 1000, "") - fs.IntVar(&options.BatchSize, "batch-size", 0, "") - fs.IntVar(&options.KeySize, "key-size", 8, "") - fs.IntVar(&options.ValueSize, "value-size", 32, "") - fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") - fs.StringVar(&options.MemProfile, "memprofile", "", "") - fs.StringVar(&options.BlockProfile, "blockprofile", "", "") - fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") - fs.BoolVar(&options.NoSync, "no-sync", false, "") - fs.BoolVar(&options.Work, "work", false, "") - fs.StringVar(&options.Path, "path", "", "") - fs.SetOutput(cmd.Stderr) - if err := fs.Parse(args); err != nil { - return nil, err - } - - // Set batch size to iteration size if not set. - // Require that batch size can be evenly divided by the iteration count. - if options.BatchSize == 0 { - options.BatchSize = options.Iterations - } else if options.Iterations%options.BatchSize != 0 { - return nil, ErrNonDivisibleBatchSize - } - - // Generate temp path if one is not passed in. - if options.Path == "" { - f, err := ioutil.TempFile("", "bolt-bench-") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - f.Close() - os.Remove(f.Name()) - options.Path = f.Name() - } - - return &options, nil -} - -// Writes to the database. -func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for writes. - if options.ProfileMode == "rw" || options.ProfileMode == "w" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.WriteMode { - case "seq": - err = cmd.runWritesSequential(db, options, results) - case "rnd": - err = cmd.runWritesRandom(db, options, results) - case "seq-nest": - err = cmd.runWritesSequentialNested(db, options, results) - case "rnd-nest": - err = cmd.runWritesRandomNested(db, options, results) - default: - return fmt.Errorf("invalid write mode: %s", options.WriteMode) - } - - // Save time to write. - results.WriteDuration = time.Since(t) - - // Stop profiling for writes only. - if options.ProfileMode == "w" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists(benchBucketName) - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - key := make([]byte, options.KeySize) - value := make([]byte, options.ValueSize) - - // Write key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert key/value. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - top, err := tx.CreateBucketIfNotExists(benchBucketName) - if err != nil { - return err - } - top.FillPercent = options.FillPercent - - // Create bucket key. - name := make([]byte, options.KeySize) - binary.BigEndian.PutUint32(name, keySource()) - - // Create bucket. - b, err := top.CreateBucketIfNotExists(name) - if err != nil { - return err - } - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - var key = make([]byte, options.KeySize) - var value = make([]byte, options.ValueSize) - - // Generate key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert value into subbucket. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -// Reads from the database. -func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for reads. - if options.ProfileMode == "r" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.ReadMode { - case "seq": - switch options.WriteMode { - case "seq-nest", "rnd-nest": - err = cmd.runReadsSequentialNested(db, options, results) - default: - err = cmd.runReadsSequential(db, options, results) - } - default: - return fmt.Errorf("invalid read mode: %s", options.ReadMode) - } - - // Save read time. - results.ReadDuration = time.Since(t) - - // Stop profiling for reads. - if options.ProfileMode == "rw" || options.ProfileMode == "r" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - - c := tx.Bucket(benchBucketName).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return errors.New("invalid value") - } - count++ - } - - if options.WriteMode == "seq" && count != options.Iterations { - return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - var top = tx.Bucket(benchBucketName) - if err := top.ForEach(func(name, _ []byte) error { - c := top.Bucket(name).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return ErrInvalidValue - } - count++ - } - return nil - }); err != nil { - return err - } - - if options.WriteMode == "seq-nest" && count != options.Iterations { - return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -// File handlers for the various profiles. -var cpuprofile, memprofile, blockprofile *os.File - -// Starts all profiles set on the options. -func (cmd *BenchCommand) startProfiling(options *BenchOptions) { - var err error - - // Start CPU profiling. - if options.CPUProfile != "" { - cpuprofile, err = os.Create(options.CPUProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) - os.Exit(1) - } - pprof.StartCPUProfile(cpuprofile) - } - - // Start memory profiling. - if options.MemProfile != "" { - memprofile, err = os.Create(options.MemProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) - os.Exit(1) - } - runtime.MemProfileRate = 4096 - } - - // Start fatal profiling. - if options.BlockProfile != "" { - blockprofile, err = os.Create(options.BlockProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) - os.Exit(1) - } - runtime.SetBlockProfileRate(1) - } -} - -// Stops all profiles. -func (cmd *BenchCommand) stopProfiling() { - if cpuprofile != nil { - pprof.StopCPUProfile() - cpuprofile.Close() - cpuprofile = nil - } - - if memprofile != nil { - pprof.Lookup("heap").WriteTo(memprofile, 0) - memprofile.Close() - memprofile = nil - } - - if blockprofile != nil { - pprof.Lookup("block").WriteTo(blockprofile, 0) - blockprofile.Close() - blockprofile = nil - runtime.SetBlockProfileRate(0) - } -} - -// BenchOptions represents the set of options that can be passed to "bolt bench". -type BenchOptions struct { - ProfileMode string - WriteMode string - ReadMode string - Iterations int - BatchSize int - KeySize int - ValueSize int - CPUProfile string - MemProfile string - BlockProfile string - StatsInterval time.Duration - FillPercent float64 - NoSync bool - Work bool - Path string -} - -// BenchResults represents the performance results of the benchmark. -type BenchResults struct { - WriteOps int - WriteDuration time.Duration - ReadOps int - ReadDuration time.Duration -} - -// Returns the duration for a single write operation. -func (r *BenchResults) WriteOpDuration() time.Duration { - if r.WriteOps == 0 { - return 0 - } - return r.WriteDuration / time.Duration(r.WriteOps) -} - -// Returns average number of write operations that can be performed per second. -func (r *BenchResults) WriteOpsPerSecond() int { - var op = r.WriteOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -// Returns the duration for a single read operation. -func (r *BenchResults) ReadOpDuration() time.Duration { - if r.ReadOps == 0 { - return 0 - } - return r.ReadDuration / time.Duration(r.ReadOps) -} - -// Returns average number of read operations that can be performed per second. -func (r *BenchResults) ReadOpsPerSecond() int { - var op = r.ReadOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -type PageError struct { - ID int - Err error -} - -func (e *PageError) Error() string { - return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) -} - -// isPrintable returns true if the string is valid unicode and contains only printable runes. -func isPrintable(s string) bool { - if !utf8.ValidString(s) { - return false - } - for _, ch := range s { - if !unicode.IsPrint(ch) { - return false - } - } - return true -} - -// ReadPage reads page info & full page data from a path. -// This is not transactionally safe. -func ReadPage(path string, pageID int) (*page, []byte, error) { - // Find page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return nil, nil, fmt.Errorf("read page size: %s", err) - } - - // Open database file. - f, err := os.Open(path) - if err != nil { - return nil, nil, err - } - defer f.Close() - - // Read one block into buffer. - buf := make([]byte, pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - - // Determine total number of blocks. - p := (*page)(unsafe.Pointer(&buf[0])) - overflowN := p.overflow - - // Re-read entire page (with overflow) into buffer. - buf = make([]byte, (int(overflowN)+1)*pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - p = (*page)(unsafe.Pointer(&buf[0])) - - return p, buf, nil -} - -// ReadPageSize reads page size a path. -// This is not transactionally safe. -func ReadPageSize(path string) (int, error) { - // Open database file. - f, err := os.Open(path) - if err != nil { - return 0, err - } - defer f.Close() - - // Read 4KB chunk. - buf := make([]byte, 4096) - if _, err := io.ReadFull(f, buf); err != nil { - return 0, err - } - - // Read page size from metadata. - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - return int(m.pageSize), nil -} - -// atois parses a slice of strings into integers. -func atois(strs []string) ([]int, error) { - var a []int - for _, str := range strs { - i, err := strconv.Atoi(str) - if err != nil { - return nil, err - } - a = append(a, i) - } - return a, nil -} - -// DO NOT EDIT. Copied from the "bolt" package. -const maxAllocSize = 0xFFFFFFF - -// DO NOT EDIT. Copied from the "bolt" package. -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -// DO NOT EDIT. Copied from the "bolt" package. -const bucketLeafFlag = 0x01 - -// DO NOT EDIT. Copied from the "bolt" package. -type pgid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type txid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type bucket struct { - root pgid - sequence uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) Type() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go deleted file mode 100644 index c378b79..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package main_test - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/boltdb/bolt" - "github.com/boltdb/bolt/cmd/bolt" -) - -// Ensure the "info" command can print information about a database. -func TestInfoCommand_Run(t *testing.T) { - db := MustOpen(0666, nil) - db.DB.Close() - defer db.Close() - - // Run the info command. - m := NewMain() - if err := m.Run("info", db.Path); err != nil { - t.Fatal(err) - } -} - -// Ensure the "stats" command executes correctly with an empty database. -func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := MustOpen(0666, nil) - defer db.Close() - db.DB.Close() - - // Generate expected result. - exp := "Aggregate statistics for 0 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 0\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 0\n" + - "\tNumber of levels in B+tree: 0\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 0\n" + - "\tBytes actually used for leaf data: 0 (0%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 0\n" + - "\tTotal number on inlined buckets: 0 (0%)\n" + - "\tBytes used for inlined buckets: 0 (0%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - -// Ensure the "stats" command can execute correctly. -func TestStatsCommand_Run(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := MustOpen(0666, nil) - defer db.Close() - - if err := db.Update(func(tx *bolt.Tx) error { - // Create "foo" bucket. - b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - return err - } - for i := 0; i < 10; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "bar" bucket. - b, err = tx.CreateBucket([]byte("bar")) - if err != nil { - return err - } - for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "baz" bucket. - b, err = tx.CreateBucket([]byte("baz")) - if err != nil { - return err - } - if err := b.Put([]byte("key"), []byte("value")); err != nil { - return err - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.DB.Close() - - // Generate expected result. - exp := "Aggregate statistics for 3 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 1\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 111\n" + - "\tNumber of levels in B+tree: 1\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 4096\n" + - "\tBytes actually used for leaf data: 1996 (48%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 3\n" + - "\tTotal number on inlined buckets: 2 (66%)\n" + - "\tBytes used for inlined buckets: 236 (11%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - -// Main represents a test wrapper for main.Main that records output. -type Main struct { - *main.Main - Stdin bytes.Buffer - Stdout bytes.Buffer - Stderr bytes.Buffer -} - -// NewMain returns a new instance of Main. -func NewMain() *Main { - m := &Main{Main: main.NewMain()} - m.Main.Stdin = &m.Stdin - m.Main.Stdout = &m.Stdout - m.Main.Stderr = &m.Stderr - return m -} - -// MustOpen creates a Bolt database in a temporary location. -func MustOpen(mode os.FileMode, options *bolt.Options) *DB { - // Create temporary path. - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - - db, err := bolt.Open(f.Name(), mode, options) - if err != nil { - panic(err.Error()) - } - return &DB{DB: db, Path: f.Name()} -} - -// DB is a test wrapper for bolt.DB. -type DB struct { - *bolt.DB - Path string -} - -// Close closes and removes the database. -func (db *DB) Close() error { - defer os.Remove(db.Path) - return db.DB.Close() -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go deleted file mode 100644 index 562d60f..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor_test.go +++ /dev/null @@ -1,817 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "fmt" - "log" - "os" - "reflect" - "sort" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a cursor can return a reference to the bucket that created it. -func TestCursor_Bucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) { - t.Fatal("cursor bucket mismatch") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can seek to the appropriate keys. -func TestCursor_Seek(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("0001")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte("0002")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("0003")); err != nil { - t.Fatal(err) - } - - if _, err := b.CreateBucket([]byte("bkt")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - - // Exact match should go to the key. - if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0002")) { - t.Fatalf("unexpected value: %v", v) - } - - // Inexact match should go to the next key. - if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0003")) { - t.Fatalf("unexpected value: %v", v) - } - - // Low key should go to the first key. - if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0002")) { - t.Fatalf("unexpected value: %v", v) - } - - // High key should return no key. - if k, v := c.Seek([]byte("zzz")); k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - // Buckets should return their key but no value. - if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -func TestCursor_Delete(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - const count = 1000 - - // Insert every other key between 0 and $count. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < count; i += 1 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(i)) - if err := b.Put(k, make([]byte, 100)); err != nil { - t.Fatal(err) - } - } - if _, err := b.CreateBucket([]byte("sub")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - bound := make([]byte, 8) - binary.BigEndian.PutUint64(bound, uint64(count/2)) - for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { - if err := c.Delete(); err != nil { - t.Fatal(err) - } - } - - c.Seek([]byte("sub")) - if err := c.Delete(); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - stats := tx.Bucket([]byte("widgets")).Stats() - if stats.KeyN != count/2+1 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can seek to the appropriate keys when there are a -// large number of keys. This test also checks that seek will always move -// forward to the next key. -// -// Related: https://github.com/boltdb/bolt/pull/187 -func TestCursor_Seek_Large(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var count = 10000 - - // Insert every other key between 0 and $count. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < count; i += 100 { - for j := i; j < i+100; j += 2 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(j)) - if err := b.Put(k, make([]byte, 100)); err != nil { - t.Fatal(err) - } - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - for i := 0; i < count; i++ { - seek := make([]byte, 8) - binary.BigEndian.PutUint64(seek, uint64(i)) - - k, _ := c.Seek(seek) - - // The last seek is beyond the end of the the range so - // it should return nil. - if i == count-1 { - if k != nil { - t.Fatal("expected nil key") - } - continue - } - - // Otherwise we should seek to the exact key or the next key. - num := binary.BigEndian.Uint64(k) - if i%2 == 0 { - if num != uint64(i) { - t.Fatalf("unexpected num: %d", num) - } - } else { - if num != uint64(i+1) { - t.Fatalf("unexpected num: %d", num) - } - } - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a cursor can iterate over an empty bucket without error. -func TestCursor_EmptyBucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.First() - if k != nil { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. -func TestCursor_EmptyBucketReverse(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.Last() - if k != nil { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can iterate over a single root with a couple elements. -func TestCursor_Iterate_Leaf(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte{}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte{0}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte{1}); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - defer func() { _ = tx.Rollback() }() - - c := tx.Bucket([]byte("widgets")).Cursor() - - k, v := c.First() - if !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{1}) { - t.Fatalf("unexpected value: %v", v) - } - - k, v = c.Next() - if !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{}) { - t.Fatalf("unexpected value: %v", v) - } - - k, v = c.Next() - if !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{0}) { - t.Fatalf("unexpected value: %v", v) - } - - k, v = c.Next() - if k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - k, v = c.Next() - if k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. -func TestCursor_LeafRootReverse(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte{}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte{0}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte{1}); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - c := tx.Bucket([]byte("widgets")).Cursor() - - if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{0}) { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{}) { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{1}) { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Prev(); k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - if k, v := c.Prev(); k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can restart from the beginning. -func TestCursor_Restart(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte{}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte{}); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - c := tx.Bucket([]byte("widgets")).Cursor() - - if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } - if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } - - if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } - if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } -} - -// Ensure that a cursor can skip over empty pages that have been deleted. -func TestCursor_First_EmptyPages(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Create 1000 keys in the "widgets" bucket. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 1000; i++ { - if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil { - t.Fatal(err) - } - } - - return nil - }); err != nil { - t.Fatal(err) - } - - // Delete half the keys and then try to iterate. - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < 600; i++ { - if err := b.Delete(u64tob(uint64(i))); err != nil { - t.Fatal(err) - } - } - - c := b.Cursor() - var n int - for k, _ := c.First(); k != nil; k, _ = c.Next() { - n++ - } - if n != 400 { - t.Fatalf("unexpected key count: %d", n) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx can iterate over all elements in a bucket. -func TestCursor_QuickCheck(t *testing.T) { - f := func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - // Bulk insert all values. - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Sort test data. - sort.Sort(items) - - // Iterate over all items and check consistency. - var index = 0 - tx, err = db.Begin(false) - if err != nil { - t.Fatal(err) - } - - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { - if !bytes.Equal(k, items[index].Key) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, items[index].Value) { - t.Fatalf("unexpected value: %v", v) - } - index++ - } - if len(items) != index { - t.Fatalf("unexpected item count: %v, expected %v", len(items), index) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can iterate over all elements in a bucket in reverse. -func TestCursor_QuickCheck_Reverse(t *testing.T) { - f := func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - // Bulk insert all values. - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Sort test data. - sort.Sort(revtestdata(items)) - - // Iterate over all items and check consistency. - var index = 0 - tx, err = db.Begin(false) - if err != nil { - t.Fatal(err) - } - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { - if !bytes.Equal(k, items[index].Key) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, items[index].Value) { - t.Fatalf("unexpected value: %v", v) - } - index++ - } - if len(items) != index { - t.Fatalf("unexpected item count: %v, expected %v", len(items), index) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a Tx cursor can iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("bar")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("baz")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - names = append(names, string(k)) - if v != nil { - t.Fatalf("unexpected value: %v", v) - } - } - if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) { - t.Fatalf("unexpected names: %+v", names) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can reverse iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("bar")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("baz")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil; k, v = c.Prev() { - names = append(names, string(k)) - if v != nil { - t.Fatalf("unexpected value: %v", v) - } - } - if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) { - t.Fatalf("unexpected names: %+v", names) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -func ExampleCursor() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Start a read-write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - b, err := tx.CreateBucket([]byte("animals")) - if err != nil { - return err - } - - // Insert data into a bucket. - if err := b.Put([]byte("dog"), []byte("fun")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("cat"), []byte("lame")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { - log.Fatal(err) - } - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in sorted key order. This starts from the - // first key/value pair and updates the k/v variables to the - // next key/value on each iteration. - // - // The loop finishes at the end of the cursor when a nil key is returned. - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }); err != nil { - log.Fatal(err) - } - - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} - -func ExampleCursor_reverse() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Start a read-write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - b, err := tx.CreateBucket([]byte("animals")) - if err != nil { - return err - } - - // Insert data into a bucket. - if err := b.Put([]byte("dog"), []byte("fun")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("cat"), []byte("lame")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { - log.Fatal(err) - } - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in reverse sorted key order. This starts - // from the last key/value pair and updates the k/v variables to - // the previous key/value on each iteration. - // - // The loop finishes at the beginning of the cursor when a nil key - // is returned. - for k, v := c.Last(); k != nil; k, v = c.Prev() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }); err != nil { - log.Fatal(err) - } - - // Close the database to release the file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // A liger is awesome. - // A dog is fun. - // A cat is lame. -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go deleted file mode 100644 index 9fe13ec..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/db_test.go +++ /dev/null @@ -1,1684 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "flag" - "fmt" - "hash/fnv" - "io/ioutil" - "log" - "os" - "path/filepath" - "regexp" - "runtime" - "sort" - "strings" - "sync" - "testing" - "time" - "unsafe" - - "github.com/boltdb/bolt" -) - -var statsFlag = flag.Bool("stats", false, "show performance stats") - -// version is the data file format version. -const version = 2 - -// magic is the marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// pageSize is the size of one page in the data file. -const pageSize = 4096 - -// pageHeaderSize is the size of a page header. -const pageHeaderSize = 16 - -// meta represents a simplified version of a database meta page for testing. -type meta struct { - magic uint32 - version uint32 - _ uint32 - _ uint32 - _ [16]byte - _ uint64 - _ uint64 - _ uint64 - checksum uint64 -} - -// Ensure that a database can be opened without error. -func TestOpen(t *testing.T) { - path := tempfile() - db, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } else if db == nil { - t.Fatal("expected db") - } - - if s := db.Path(); s != path { - t.Fatalf("unexpected path: %s", s) - } - - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure that opening a database with a blank path returns an error. -func TestOpen_ErrPathRequired(t *testing.T) { - _, err := bolt.Open("", 0666, nil) - if err == nil { - t.Fatalf("expected error") - } -} - -// Ensure that opening a database with a bad path returns an error. -func TestOpen_ErrNotExists(t *testing.T) { - _, err := bolt.Open(filepath.Join(tempfile(), "bad-path"), 0666, nil) - if err == nil { - t.Fatal("expected error") - } -} - -// Ensure that opening a file with wrong checksum returns ErrChecksum. -func TestOpen_ErrChecksum(t *testing.T) { - buf := make([]byte, pageSize) - meta := (*meta)(unsafe.Pointer(&buf[0])) - meta.magic = magic - meta.version = version - meta.checksum = 123 - - path := tempfile() - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - if _, err := f.WriteAt(buf, pageHeaderSize); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - defer os.Remove(path) - - if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrChecksum { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that opening a file that is not a Bolt database returns ErrInvalid. -func TestOpen_ErrInvalid(t *testing.T) { - path := tempfile() - - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - if _, err := fmt.Fprintln(f, "this is not a bolt database"); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - defer os.Remove(path) - - if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrInvalid { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that opening a file created with a different version of Bolt returns -// ErrVersionMismatch. -func TestOpen_ErrVersionMismatch(t *testing.T) { - buf := make([]byte, pageSize) - meta := (*meta)(unsafe.Pointer(&buf[0])) - meta.magic = magic - meta.version = version + 100 - - path := tempfile() - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - if _, err := f.WriteAt(buf, pageHeaderSize); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - defer os.Remove(path) - - if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrVersionMismatch { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that opening an already open database file will timeout. -func TestOpen_Timeout(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip("solaris fcntl locks don't support intra-process locking") - } - - path := tempfile() - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } else if db0 == nil { - t.Fatal("expected database") - } - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond}) - if err != bolt.ErrTimeout { - t.Fatalf("unexpected timeout: %s", err) - } else if db1 != nil { - t.Fatal("unexpected database") - } else if time.Since(start) <= 100*time.Millisecond { - t.Fatal("expected to wait at least timeout duration") - } - - if err := db0.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure that opening an already open database file will wait until its closed. -func TestOpen_Wait(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip("solaris fcntl locks don't support intra-process locking") - } - - path := tempfile() - - // Open a data file. - db0, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - - // Close it in just a bit. - time.AfterFunc(100*time.Millisecond, func() { _ = db0.Close() }) - - // Attempt to open the database again. - start := time.Now() - db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond}) - if err != nil { - t.Fatal(err) - } else if time.Since(start) <= 100*time.Millisecond { - t.Fatal("expected to wait at least timeout duration") - } - - if err := db1.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure that opening a database does not increase its size. -// https://github.com/boltdb/bolt/issues/291 -func TestOpen_Size(t *testing.T) { - // Open a data file. - db := MustOpenDB() - path := db.Path() - defer db.MustClose() - - pagesize := db.Info().PageSize - - // Insert until we get above the minimum 4MB size. - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for i := 0; i < 10000; i++ { - if err := b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Close database and grab the size. - if err := db.DB.Close(); err != nil { - t.Fatal(err) - } - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db0.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - if err := db0.Close(); err != nil { - t.Fatal(err) - } - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - // db size might increase by a few page sizes due to the new small update. - if sz < newSz-5*int64(pagesize) { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that opening a database beyond the max step size does not increase its size. -// https://github.com/boltdb/bolt/issues/303 -func TestOpen_Size_Large(t *testing.T) { - if testing.Short() { - t.Skip("short mode") - } - - // Open a data file. - db := MustOpenDB() - path := db.Path() - defer db.MustClose() - - pagesize := db.Info().PageSize - - // Insert until we get above the minimum 4MB size. - var index uint64 - for i := 0; i < 10000; i++ { - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for j := 0; j < 1000; j++ { - if err := b.Put(u64tob(index), make([]byte, 50)); err != nil { - t.Fatal(err) - } - index++ - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - // Close database and grab the size. - if err := db.DB.Close(); err != nil { - t.Fatal(err) - } - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } else if sz < (1 << 30) { - t.Fatalf("expected larger initial size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db0.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) - }); err != nil { - t.Fatal(err) - } - if err := db0.Close(); err != nil { - t.Fatal(err) - } - - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - // db size might increase by a few page sizes due to the new small update. - if sz < newSz-5*int64(pagesize) { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that a re-opened database is consistent. -func TestOpen_Check(t *testing.T) { - path := tempfile() - - db, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } - - db, err = bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure that write errors to the meta file handler during initialization are returned. -func TestOpen_MetaInitWriteError(t *testing.T) { - t.Skip("pending") -} - -// Ensure that a database that is too small returns an error. -func TestOpen_FileTooSmall(t *testing.T) { - path := tempfile() - - db, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } - - // corrupt the database - if err := os.Truncate(path, int64(os.Getpagesize())); err != nil { - t.Fatal(err) - } - - db, err = bolt.Open(path, 0666, nil) - if err == nil || err.Error() != "file size too small" { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that a database can be opened in read-only mode by multiple processes -// and that a database can not be opened in read-write mode and in read-only -// mode at the same time. -func TestOpen_ReadOnly(t *testing.T) { - if runtime.GOOS == "solaris" { - t.Skip("solaris fcntl locks don't support intra-process locking") - } - - bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`) - - path := tempfile() - - // Open in read-write mode. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } else if db.IsReadOnly() { - t.Fatal("db should not be in read only mode") - } - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket(bucket) - if err != nil { - return err - } - if err := b.Put(key, value); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } - - // Open in read-only mode. - db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - if err != nil { - t.Fatal(err) - } - - // Opening in read-write mode should return an error. - if _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}); err == nil { - t.Fatal("expected error") - } - - // And again (in read-only mode). - db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) - if err != nil { - t.Fatal(err) - } - - // Verify both read-only databases are accessible. - for _, db := range []*bolt.DB{db0, db1} { - // Verify is is in read only mode indeed. - if !db.IsReadOnly() { - t.Fatal("expected read only mode") - } - - // Read-only databases should not allow updates. - if err := db.Update(func(*bolt.Tx) error { - panic(`should never get here`) - }); err != bolt.ErrDatabaseReadOnly { - t.Fatalf("unexpected error: %s", err) - } - - // Read-only databases should not allow beginning writable txns. - if _, err := db.Begin(true); err != bolt.ErrDatabaseReadOnly { - t.Fatalf("unexpected error: %s", err) - } - - // Verify the data. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(bucket) - if b == nil { - return fmt.Errorf("expected bucket `%s`", string(bucket)) - } - - got := string(b.Get(key)) - expected := string(value) - if got != expected { - return fmt.Errorf("expected `%s`, got `%s`", expected, got) - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - if err := db0.Close(); err != nil { - t.Fatal(err) - } - if err := db1.Close(); err != nil { - t.Fatal(err) - } -} - -// TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough -// to hold data from concurrent write transaction resolves the issue that -// read transaction blocks the write transaction and causes deadlock. -// This is a very hacky test since the mmap size is not exposed. -func TestDB_Open_InitialMmapSize(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - initMmapSize := 1 << 31 // 2GB - testWriteSize := 1 << 27 // 134MB - - db, err := bolt.Open(path, 0666, &bolt.Options{InitialMmapSize: initMmapSize}) - if err != nil { - t.Fatal(err) - } - - // create a long-running read transaction - // that never gets closed while writing - rtx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - - // create a write transaction - wtx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - b, err := wtx.CreateBucket([]byte("test")) - if err != nil { - t.Fatal(err) - } - - // and commit a large write - err = b.Put([]byte("foo"), make([]byte, testWriteSize)) - if err != nil { - t.Fatal(err) - } - - done := make(chan struct{}) - - go func() { - if err := wtx.Commit(); err != nil { - t.Fatal(err) - } - done <- struct{}{} - }() - - select { - case <-time.After(5 * time.Second): - t.Errorf("unexpected that the reader blocks writer") - case <-done: - } - - if err := rtx.Rollback(); err != nil { - t.Fatal(err) - } -} - -// Ensure that a database cannot open a transaction when it's not open. -func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { - var db bolt.DB - if _, err := db.Begin(false); err != bolt.ErrDatabaseNotOpen { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that a read-write transaction can be retrieved. -func TestDB_BeginRW(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } else if tx == nil { - t.Fatal("expected tx") - } - - if tx.DB() != db.DB { - t.Fatal("unexpected tx database") - } else if !tx.Writable() { - t.Fatal("expected writable tx") - } - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } -} - -// Ensure that opening a transaction while the DB is closed returns an error. -func TestDB_BeginRW_Closed(t *testing.T) { - var db bolt.DB - if _, err := db.Begin(true); err != bolt.ErrDatabaseNotOpen { - t.Fatalf("unexpected error: %s", err) - } -} - -func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } -func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) } - -// Ensure that a database cannot close while transactions are open. -func testDB_Close_PendingTx(t *testing.T, writable bool) { - db := MustOpenDB() - defer db.MustClose() - - // Start transaction. - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - // Open update in separate goroutine. - done := make(chan struct{}) - go func() { - if err := db.Close(); err != nil { - t.Fatal(err) - } - close(done) - }() - - // Ensure database hasn't closed. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - t.Fatal("database closed too early") - default: - } - - // Commit transaction. - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Ensure database closed now. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - default: - t.Fatal("database did not close") - } -} - -// Ensure a database can provide a transactional block. -func TestDB_Update(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - if v := b.Get([]byte("foo")); v != nil { - t.Fatalf("expected nil value, got: %v", v) - } - if v := b.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a closed database returns an error while running a transaction block -func TestDB_Update_Closed(t *testing.T) { - var db bolt.DB - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != bolt.ErrDatabaseNotOpen { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_Update_ManualCommit(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var panicked bool - if err := db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - panicked = true - } - }() - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - }() - return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_Update_ManualRollback(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var panicked bool - if err := db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - panicked = true - } - }() - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - }() - return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_View_ManualCommit(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var panicked bool - if err := db.View(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - panicked = true - } - }() - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - }() - return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_View_ManualRollback(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var panicked bool - if err := db.View(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - panicked = true - } - }() - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - }() - return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } -} - -// Ensure a write transaction that panics does not hold open locks. -func TestDB_Update_Panic(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Panic during update but recover. - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: update", r) - } - }() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - panic("omg") - }); err != nil { - t.Fatal(err) - } - }() - - // Verify we can update again. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Verify that our change persisted. - if err := db.Update(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a database can return an error through a read-only transactional block. -func TestDB_View_Error(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.View(func(tx *bolt.Tx) error { - return errors.New("xxx") - }); err == nil || err.Error() != "xxx" { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure a read transaction that panics does not hold open locks. -func TestDB_View_Panic(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Panic during view transaction but recover. - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: view", r) - } - }() - - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - panic("omg") - }); err != nil { - t.Fatal(err) - } - }() - - // Verify that we can still use read transactions. - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that DB stats can be returned. -func TestDB_Stats(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - stats := db.Stats() - if stats.TxStats.PageCount != 2 { - t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.PageCount) - } else if stats.FreePageN != 0 { - t.Fatalf("unexpected FreePageN != 0: %d", stats.FreePageN) - } else if stats.PendingPageN != 2 { - t.Fatalf("unexpected PendingPageN != 2: %d", stats.PendingPageN) - } -} - -// Ensure that database pages are in expected order and type. -func TestDB_Consistency(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - if err := db.Update(func(tx *bolt.Tx) error { - if p, _ := tx.Page(0); p == nil { - t.Fatal("expected page") - } else if p.Type != "meta" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(1); p == nil { - t.Fatal("expected page") - } else if p.Type != "meta" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(2); p == nil { - t.Fatal("expected page") - } else if p.Type != "free" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(3); p == nil { - t.Fatal("expected page") - } else if p.Type != "free" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(4); p == nil { - t.Fatal("expected page") - } else if p.Type != "leaf" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(5); p == nil { - t.Fatal("expected page") - } else if p.Type != "freelist" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(6); p != nil { - t.Fatal("unexpected page") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that DB stats can be subtracted from one another. -func TestDBStats_Sub(t *testing.T) { - var a, b bolt.Stats - a.TxStats.PageCount = 3 - a.FreePageN = 4 - b.TxStats.PageCount = 10 - b.FreePageN = 14 - diff := b.Sub(&a) - if diff.TxStats.PageCount != 7 { - t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.PageCount) - } - - // free page stats are copied from the receiver and not subtracted - if diff.FreePageN != 14 { - t.Fatalf("unexpected FreePageN: %d", diff.FreePageN) - } -} - -// Ensure two functions can perform updates in a single batch. -func TestDB_Batch(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Iterate over multiple updates in separate goroutines. - n := 2 - ch := make(chan error) - for i := 0; i < n; i++ { - go func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - }(i) - } - - // Check all responses to make sure there's no error. - for i := 0; i < n; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < n; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -func TestDB_Batch_Panic(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var sentinel int - var bork = &sentinel - var problem interface{} - var err error - - // Execute a function inside a batch that panics. - func() { - defer func() { - if p := recover(); p != nil { - problem = p - } - }() - err = db.Batch(func(tx *bolt.Tx) error { - panic(bork) - }) - }() - - // Verify there is no error. - if g, e := err, error(nil); g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } - // Verify the panic was captured. - if g, e := problem, bork; g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } -} - -func TestDB_BatchFull(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - const size = 3 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = size - // high enough to never trigger here - db.MaxBatchDelay = 1 * time.Hour - - go put(1) - go put(2) - - // Give the batch a chance to exhibit bugs. - time.Sleep(10 * time.Millisecond) - - // not triggered yet - select { - case <-ch: - t.Fatalf("batch triggered too early") - default: - } - - go put(3) - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -func TestDB_BatchTime(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - const size = 1 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = 1000 - db.MaxBatchDelay = 0 - - go put(1) - - // Batch must trigger by time alone. - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -func ExampleDB_Update() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Execute several commands within a read-write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Read the value back from a separate read-only transaction. - if err := db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release the file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value of 'foo' is: bar -} - -func ExampleDB_View() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Insert data into a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("people")) - if err != nil { - return err - } - if err := b.Put([]byte("john"), []byte("doe")); err != nil { - return err - } - if err := b.Put([]byte("susy"), []byte("que")); err != nil { - return err - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Access data from within a read-only transactional block. - if err := db.View(func(tx *bolt.Tx) error { - v := tx.Bucket([]byte("people")).Get([]byte("john")) - fmt.Printf("John's last name is %s.\n", v) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release the file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // John's last name is doe. -} - -func ExampleDB_Begin_ReadOnly() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Create a bucket using a read-write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - log.Fatal(err) - } - - // Create several keys in a transaction. - tx, err := db.Begin(true) - if err != nil { - log.Fatal(err) - } - b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("john"), []byte("blue")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("abby"), []byte("red")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("zephyr"), []byte("purple")); err != nil { - log.Fatal(err) - } - if err := tx.Commit(); err != nil { - log.Fatal(err) - } - - // Iterate over the values in sorted key order. - tx, err = db.Begin(false) - if err != nil { - log.Fatal(err) - } - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("%s likes %s\n", k, v) - } - - if err := tx.Rollback(); err != nil { - log.Fatal(err) - } - - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // abby likes red - // john likes blue - // zephyr likes purple -} - -func BenchmarkDBBatchAutomatic(b *testing.B) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("bench")) - return err - }); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - _, _ = h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Batch(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchSingle(b *testing.B) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("bench")) - return err - }); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - _, _ = h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Update(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchManual10x100(b *testing.B) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("bench")) - return err - }); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for major := 0; major < 10; major++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - insert100 := func(tx *bolt.Tx) error { - h := fnv.New32a() - buf := make([]byte, 4) - for minor := uint32(0); minor < 100; minor++ { - binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) - h.Reset() - _, _ = h.Write(buf[:]) - k := h.Sum(nil) - b := tx.Bucket([]byte("bench")) - if err := b.Put(k, []byte("filler")); err != nil { - return err - } - } - return nil - } - if err := db.Update(insert100); err != nil { - b.Fatal(err) - } - }(uint32(major)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func validateBatchBench(b *testing.B, db *DB) { - var rollback = errors.New("sentinel error to cause rollback") - validate := func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte("bench")) - h := fnv.New32a() - buf := make([]byte, 4) - for id := uint32(0); id < 1000; id++ { - binary.LittleEndian.PutUint32(buf, id) - h.Reset() - _, _ = h.Write(buf[:]) - k := h.Sum(nil) - v := bucket.Get(k) - if v == nil { - b.Errorf("not found id=%d key=%x", id, k) - continue - } - if g, e := v, []byte("filler"); !bytes.Equal(g, e) { - b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) - } - if err := bucket.Delete(k); err != nil { - return err - } - } - // should be empty now - c := bucket.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - b.Errorf("unexpected key: %x = %q", k, v) - } - return rollback - } - if err := db.Update(validate); err != nil && err != rollback { - b.Error(err) - } -} - -// DB is a test wrapper for bolt.DB. -type DB struct { - *bolt.DB -} - -// MustOpenDB returns a new, open DB at a temporary location. -func MustOpenDB() *DB { - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - panic(err) - } - return &DB{db} -} - -// Close closes the database and deletes the underlying file. -func (db *DB) Close() error { - // Log statistics. - if *statsFlag { - db.PrintStats() - } - - // Check database consistency after every test. - db.MustCheck() - - // Close database and remove file. - defer os.Remove(db.Path()) - return db.DB.Close() -} - -// MustClose closes the database and deletes the underlying file. Panic on error. -func (db *DB) MustClose() { - if err := db.Close(); err != nil { - panic(err) - } -} - -// PrintStats prints the database stats -func (db *DB) PrintStats() { - var stats = db.Stats() - fmt.Printf("[db] %-20s %-20s %-20s\n", - fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), - fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), - fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), - ) - fmt.Printf(" %-20s %-20s %-20s\n", - fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), - fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), - fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), - ) -} - -// MustCheck runs a consistency check on the database and panics if any errors are found. -func (db *DB) MustCheck() { - if err := db.Update(func(tx *bolt.Tx) error { - // Collect all the errors. - var errors []error - for err := range tx.Check() { - errors = append(errors, err) - if len(errors) > 10 { - break - } - } - - // If errors occurred, copy the DB and print the errors. - if len(errors) > 0 { - var path = tempfile() - if err := tx.CopyFile(path, 0600); err != nil { - panic(err) - } - - // Print errors. - fmt.Print("\n\n") - fmt.Printf("consistency check failed (%d errors)\n", len(errors)) - for _, err := range errors { - fmt.Println(err) - } - fmt.Println("") - fmt.Println("db saved to:") - fmt.Println(path) - fmt.Print("\n\n") - os.Exit(-1) - } - - return nil - }); err != nil && err != bolt.ErrDatabaseNotOpen { - panic(err) - } -} - -// CopyTempFile copies a database to a temporary file. -func (db *DB) CopyTempFile() { - path := tempfile() - if err := db.View(func(tx *bolt.Tx) error { - return tx.CopyFile(path, 0600) - }); err != nil { - panic(err) - } - fmt.Println("db copied to: ", path) -} - -// tempfile returns a temporary file path. -func tempfile() string { - f, err := ioutil.TempFile("", "bolt-") - if err != nil { - panic(err) - } - if err := f.Close(); err != nil { - panic(err) - } - if err := os.Remove(f.Name()); err != nil { - panic(err) - } - return f.Name() -} - -// mustContainKeys checks that a bucket contains a given set of keys. -func mustContainKeys(b *bolt.Bucket, m map[string]string) { - found := make(map[string]string) - if err := b.ForEach(func(k, _ []byte) error { - found[string(k)] = "" - return nil - }); err != nil { - panic(err) - } - - // Check for keys found in bucket that shouldn't be there. - var keys []string - for k := range found { - if _, ok := m[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ","))) - } - - // Check for keys not found in bucket that should be there. - for k := range m { - if _, ok := found[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ","))) - } -} - -func trunc(b []byte, length int) []byte { - if length < len(b) { - return b[:length] - } - return b -} - -func truncDuration(d time.Duration) string { - return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") -} - -func fileSize(path string) int64 { - fi, err := os.Stat(path) - if err != nil { - return 0 - } - return fi.Size() -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -// btou64 converts an 8-byte slice into an uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go deleted file mode 100644 index 4e9b3a8..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package bolt - -import ( - "math/rand" - "reflect" - "sort" - "testing" - "unsafe" -) - -// Ensure that a page is added to a transaction's freelist. -func TestFreelist_free(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12}) - if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) - } -} - -// Ensure that a page and its overflow is added to a transaction's freelist. -func TestFreelist_free_overflow(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 3}) - if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) - } -} - -// Ensure that a transaction's free pages can be released. -func TestFreelist_release(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 1}) - f.free(100, &page{id: 9}) - f.free(102, &page{id: 39}) - f.release(100) - f.release(101) - if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - f.release(102) - if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can find contiguous blocks of pages. -func TestFreelist_allocate(t *testing.T) { - f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} - if id := int(f.allocate(3)); id != 3 { - t.Fatalf("exp=3; got=%v", id) - } - if id := int(f.allocate(1)); id != 6 { - t.Fatalf("exp=6; got=%v", id) - } - if id := int(f.allocate(3)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(2)); id != 12 { - t.Fatalf("exp=12; got=%v", id) - } - if id := int(f.allocate(1)); id != 7 { - t.Fatalf("exp=7; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - if id := int(f.allocate(1)); id != 9 { - t.Fatalf("exp=9; got=%v", id) - } - if id := int(f.allocate(1)); id != 18 { - t.Fatalf("exp=18; got=%v", id) - } - if id := int(f.allocate(1)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can deserialize from a freelist page. -func TestFreelist_read(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = freelistPageFlag - page.count = 2 - - // Insert 2 page ids. - ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) - ids[0] = 23 - ids[1] = 50 - - // Deserialize page into a freelist. - f := newFreelist() - f.read(page) - - // Ensure that there are two page ids in the freelist. - if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can serialize into a freelist page. -func TestFreelist_write(t *testing.T) { - // Create a freelist and write it to a page. - var buf [4096]byte - f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} - f.pending[100] = []pgid{28, 11} - f.pending[101] = []pgid{3} - p := (*page)(unsafe.Pointer(&buf[0])) - if err := f.write(p); err != nil { - t.Fatal(err) - } - - // Read the page back out. - f2 := newFreelist() - f2.read(p) - - // Ensure that the freelist is correct. - // All pages should be present and in reverse order. - if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { - t.Fatalf("exp=%v; got=%v", exp, f2.ids) - } -} - -func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } -func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } -func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } -func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } - -func benchmark_FreelistRelease(b *testing.B, size int) { - ids := randomPgids(size) - pending := randomPgids(len(ids) / 400) - b.ResetTimer() - for i := 0; i < b.N; i++ { - f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} - f.release(1) - } -} - -func randomPgids(n int) []pgid { - rand.Seed(42) - pgids := make(pgids, n) - for i := range pgids { - pgids[i] = pgid(rand.Int63()) - } - sort.Sort(pgids) - return pgids -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go deleted file mode 100644 index fa5d10f..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/node_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package bolt - -import ( - "testing" - "unsafe" -) - -// Ensure that a node can insert a key/value. -func TestNode_put(t *testing.T) { - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}} - n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) - n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) - - if len(n.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if n.inodes[2].flags != uint32(leafPageFlag) { - t.Fatalf("not a leaf: %d", n.inodes[2].flags) - } -} - -// Ensure that a node can deserialize from a leaf page. -func TestNode_read_LeafPage(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = leafPageFlag - page.count = 2 - - // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 - nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr)) - nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2 - nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4 - - // Write data for the nodes at the end. - data := (*[4096]byte)(unsafe.Pointer(&nodes[2])) - copy(data[:], []byte("barfooz")) - copy(data[7:], []byte("helloworldbye")) - - // Deserialize page into a leaf. - n := &node{} - n.read(page) - - // Check that there are two inodes with correct data. - if !n.isLeaf { - t.Fatal("expected leaf") - } - if len(n.inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can serialize into a leaf page. -func TestNode_write_LeafPage(t *testing.T) { - // Create a node. - n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) - n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) - n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) - - // Write it to a page. - var buf [4096]byte - p := (*page)(unsafe.Pointer(&buf[0])) - n.write(p) - - // Read the page back in. - n2 := &node{} - n2.read(p) - - // Check that the two pages are the same. - if len(n2.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n2.inodes)) - } - if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can split into appropriate subgroups. -func TestNode_split(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split between 2 & 3. - n.split(100) - - var parent = n.parent - if len(parent.children) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children)) - } - if len(parent.children[0].inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) - } - if len(parent.children[1].inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) - } -} - -// Ensure that a page with the minimum number of inodes just returns a single node. -func TestNode_split_MinKeys(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(20) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} - -// Ensure that a node that has keys that all fit on a page just returns one leaf. -func TestNode_split_SinglePage(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(4096) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go deleted file mode 100644 index 59f4a30..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/page_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package bolt - -import ( - "reflect" - "sort" - "testing" - "testing/quick" -) - -// Ensure that the page type can be returned in human readable format. -func TestPage_typ(t *testing.T) { - if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { - t.Fatalf("exp=branch; got=%v", typ) - } - if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { - t.Fatalf("exp=leaf; got=%v", typ) - } - if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { - t.Fatalf("exp=meta; got=%v", typ) - } - if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { - t.Fatalf("exp=freelist; got=%v", typ) - } - if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { - t.Fatalf("exp=unknown<4e20>; got=%v", typ) - } -} - -// Ensure that the hexdump debugging function doesn't blow up. -func TestPage_dump(t *testing.T) { - (&page{id: 256}).hexdump(16) -} - -func TestPgids_merge(t *testing.T) { - a := pgids{4, 5, 6, 10, 11, 12, 13, 27} - b := pgids{1, 3, 8, 9, 25, 30} - c := a.merge(b) - if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { - t.Errorf("mismatch: %v", c) - } - - a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} - b = pgids{8, 9, 25, 30} - c = a.merge(b) - if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { - t.Errorf("mismatch: %v", c) - } -} - -func TestPgids_merge_quick(t *testing.T) { - if err := quick.Check(func(a, b pgids) bool { - // Sort incoming lists. - sort.Sort(a) - sort.Sort(b) - - // Merge the two lists together. - got := a.merge(b) - - // The expected value should be the two lists combined and sorted. - exp := append(a, b...) - sort.Sort(exp) - - if !reflect.DeepEqual(exp, got) { - t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) - return false - } - - return true - }, nil); err != nil { - t.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go deleted file mode 100644 index 4da5817..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/quick_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package bolt_test - -import ( - "bytes" - "flag" - "fmt" - "math/rand" - "os" - "reflect" - "testing/quick" - "time" -) - -// testing/quick defaults to 5 iterations and a random seed. -// You can override these settings from the command line: -// -// -quick.count The number of iterations to perform. -// -quick.seed The seed to use for randomizing. -// -quick.maxitems The maximum number of items to insert into a DB. -// -quick.maxksize The maximum size of a key. -// -quick.maxvsize The maximum size of a value. -// - -var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int - -func init() { - flag.IntVar(&qcount, "quick.count", 5, "") - flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "") - flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "") - flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") - flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") - flag.Parse() - fmt.Fprintln(os.Stderr, "seed:", qseed) - fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize) -} - -func qconfig() *quick.Config { - return &quick.Config{ - MaxCount: qcount, - Rand: rand.New(rand.NewSource(int64(qseed))), - } -} - -type testdata []testdataitem - -func (t testdata) Len() int { return len(t) } -func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 } - -func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { - n := rand.Intn(qmaxitems-1) + 1 - items := make(testdata, n) - for i := 0; i < n; i++ { - item := &items[i] - item.Key = randByteSlice(rand, 1, qmaxksize) - item.Value = randByteSlice(rand, 0, qmaxvsize) - } - return reflect.ValueOf(items) -} - -type revtestdata []testdataitem - -func (t revtestdata) Len() int { return len(t) } -func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 } - -type testdataitem struct { - Key []byte - Value []byte -} - -func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte { - n := rand.Intn(maxSize-minSize) + minSize - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go deleted file mode 100644 index 3831016..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/simulation_test.go +++ /dev/null @@ -1,329 +0,0 @@ -package bolt_test - -import ( - "bytes" - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) } -func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } -func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } -func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } -func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) } - -func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } -func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } -func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } -func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) } - -func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } -func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } -func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) } - -func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) } - -// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety. -func testSimulate(t *testing.T, threadCount, parallelism int) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - rand.Seed(int64(qseed)) - - // A list of operations that readers and writers can perform. - var readerHandlers = []simulateHandler{simulateGetHandler} - var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} - - var versions = make(map[int]*QuickDB) - versions[1] = NewQuickDB() - - db := MustOpenDB() - defer db.MustClose() - - var mutex sync.Mutex - - // Run n threads in parallel, each with their own operation. - var wg sync.WaitGroup - var threads = make(chan bool, parallelism) - var i int - for { - threads <- true - wg.Add(1) - writable := ((rand.Int() % 100) < 20) // 20% writers - - // Choose an operation to execute. - var handler simulateHandler - if writable { - handler = writerHandlers[rand.Intn(len(writerHandlers))] - } else { - handler = readerHandlers[rand.Intn(len(readerHandlers))] - } - - // Execute a thread for the given operation. - go func(writable bool, handler simulateHandler) { - defer wg.Done() - - // Start transaction. - tx, err := db.Begin(writable) - if err != nil { - t.Fatal("tx begin: ", err) - } - - // Obtain current state of the dataset. - mutex.Lock() - var qdb = versions[tx.ID()] - if writable { - qdb = versions[tx.ID()-1].Copy() - } - mutex.Unlock() - - // Make sure we commit/rollback the tx at the end and update the state. - if writable { - defer func() { - mutex.Lock() - versions[tx.ID()] = qdb - mutex.Unlock() - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - }() - } else { - defer func() { _ = tx.Rollback() }() - } - - // Ignore operation if we don't have data yet. - if qdb == nil { - return - } - - // Execute handler. - handler(tx, qdb) - - // Release a thread back to the scheduling loop. - <-threads - }(writable, handler) - - i++ - if i > threadCount { - break - } - } - - // Wait until all threads are done. - wg.Wait() -} - -type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) - -// Retrieves a key from the database and verifies that it is what is expected. -func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { - // Randomly retrieve an existing exist. - keys := qdb.Rand() - if len(keys) == 0 { - return - } - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4))) - } - - // Drill into nested buckets. - for _, key := range keys[1 : len(keys)-1] { - b = b.Bucket(key) - if b == nil { - panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key)) - } - } - - // Verify key/value on the final bucket. - expected := qdb.Get(keys) - actual := b.Get(keys[len(keys)-1]) - if !bytes.Equal(actual, expected) { - fmt.Println("=== EXPECTED ===") - fmt.Println(expected) - fmt.Println("=== ACTUAL ===") - fmt.Println(actual) - fmt.Println("=== END ===") - panic("value mismatch") - } -} - -// Inserts a key into the database. -func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { - var err error - keys, value := randKeys(), randValue() - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - b, err = tx.CreateBucket(keys[0]) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - - // Create nested buckets, if necessary. - for _, key := range keys[1 : len(keys)-1] { - child := b.Bucket(key) - if child != nil { - b = child - } else { - b, err = b.CreateBucket(key) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - } - - // Insert into database. - if err := b.Put(keys[len(keys)-1], value); err != nil { - panic("put: " + err.Error()) - } - - // Insert into in-memory database. - qdb.Put(keys, value) -} - -// QuickDB is an in-memory database that replicates the functionality of the -// Bolt DB type except that it is entirely in-memory. It is meant for testing -// that the Bolt database is consistent. -type QuickDB struct { - sync.RWMutex - m map[string]interface{} -} - -// NewQuickDB returns an instance of QuickDB. -func NewQuickDB() *QuickDB { - return &QuickDB{m: make(map[string]interface{})} -} - -// Get retrieves the value at a key path. -func (db *QuickDB) Get(keys [][]byte) []byte { - db.RLock() - defer db.RUnlock() - - m := db.m - for _, key := range keys[:len(keys)-1] { - value := m[string(key)] - if value == nil { - return nil - } - switch value := value.(type) { - case map[string]interface{}: - m = value - case []byte: - return nil - } - } - - // Only return if it's a simple value. - if value, ok := m[string(keys[len(keys)-1])].([]byte); ok { - return value - } - return nil -} - -// Put inserts a value into a key path. -func (db *QuickDB) Put(keys [][]byte, value []byte) { - db.Lock() - defer db.Unlock() - - // Build buckets all the way down the key path. - m := db.m - for _, key := range keys[:len(keys)-1] { - if _, ok := m[string(key)].([]byte); ok { - return // Keypath intersects with a simple value. Do nothing. - } - - if m[string(key)] == nil { - m[string(key)] = make(map[string]interface{}) - } - m = m[string(key)].(map[string]interface{}) - } - - // Insert value into the last key. - m[string(keys[len(keys)-1])] = value -} - -// Rand returns a random key path that points to a simple value. -func (db *QuickDB) Rand() [][]byte { - db.RLock() - defer db.RUnlock() - if len(db.m) == 0 { - return nil - } - var keys [][]byte - db.rand(db.m, &keys) - return keys -} - -func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) { - i, index := 0, rand.Intn(len(m)) - for k, v := range m { - if i == index { - *keys = append(*keys, []byte(k)) - if v, ok := v.(map[string]interface{}); ok { - db.rand(v, keys) - } - return - } - i++ - } - panic("quickdb rand: out-of-range") -} - -// Copy copies the entire database. -func (db *QuickDB) Copy() *QuickDB { - db.RLock() - defer db.RUnlock() - return &QuickDB{m: db.copy(db.m)} -} - -func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} { - clone := make(map[string]interface{}, len(m)) - for k, v := range m { - switch v := v.(type) { - case map[string]interface{}: - clone[k] = db.copy(v) - default: - clone[k] = v - } - } - return clone -} - -func randKey() []byte { - var min, max = 1, 1024 - n := rand.Intn(max-min) + min - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} - -func randKeys() [][]byte { - var keys [][]byte - var count = rand.Intn(2) + 2 - for i := 0; i < count; i++ { - keys = append(keys, randKey()) - } - return keys -} - -func randValue() []byte { - n := rand.Intn(8192) - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go b/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go deleted file mode 100644 index 18ff166..0000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/tx_test.go +++ /dev/null @@ -1,716 +0,0 @@ -package bolt_test - -import ( - "bytes" - "errors" - "fmt" - "log" - "os" - "testing" - - "github.com/boltdb/bolt" -) - -// Ensure that committing a closed transaction returns an error. -func TestTx_Commit_ErrTxClosed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - if _, err := tx.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - if err := tx.Commit(); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that rolling back a closed transaction returns an error. -func TestTx_Rollback_ErrTxClosed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - if err := tx.Rollback(); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that committing a read-only transaction returns an error. -func TestTx_Commit_ErrTxNotWritable(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - if err := tx.Commit(); err != bolt.ErrTxNotWritable { - t.Fatal(err) - } -} - -// Ensure that a transaction can retrieve a cursor on the root bucket. -func TestTx_Cursor(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - - if _, err := tx.CreateBucket([]byte("woojits")); err != nil { - t.Fatal(err) - } - - c := tx.Cursor() - if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Next(); k != nil { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", k) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that creating a bucket with a read-only transaction returns an error. -func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.View(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("foo")) - if err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that creating a bucket on a closed transaction returns an error. -func TestTx_CreateBucket_ErrTxClosed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - if _, err := tx.CreateBucket([]byte("foo")); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that a Tx can retrieve a bucket. -func TestTx_Bucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx retrieving a non-existent key returns nil. -func TestTx_Get_NotFound(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if b.Get([]byte("no_such_key")) != nil { - t.Fatal("expected nil value") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can be created and retrieved. -func TestTx_CreateBucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Create a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } else if b == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Read the bucket through a separate transaction. - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can be created if it doesn't already exist. -func TestTx_CreateBucketIfNotExists(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - // Create bucket. - if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { - t.Fatal(err) - } else if b == nil { - t.Fatal("expected bucket") - } - - // Create bucket again. - if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { - t.Fatal(err) - } else if b == nil { - t.Fatal("expected bucket") - } - - return nil - }); err != nil { - t.Fatal(err) - } - - // Read the bucket through a separate transaction. - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure transaction returns an error if creating an unnamed bucket. -func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists([]byte{}); err != bolt.ErrBucketNameRequired { - t.Fatalf("unexpected error: %s", err) - } - - if _, err := tx.CreateBucketIfNotExists(nil); err != bolt.ErrBucketNameRequired { - t.Fatalf("unexpected error: %s", err) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket cannot be created twice. -func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Create a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Create the same bucket again. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != bolt.ErrBucketExists { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket is created with a non-blank name. -func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket(nil); err != bolt.ErrBucketNameRequired { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can be deleted. -func TestTx_DeleteBucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Create a bucket and add a value. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Delete the bucket and make sure we can't get the value. - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - if tx.Bucket([]byte("widgets")) != nil { - t.Fatal("unexpected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - // Create the bucket again and make sure there's not a phantom value. - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); v != nil { - t.Fatalf("unexpected phantom value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a bucket on a closed transaction returns an error. -func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that deleting a bucket with a read-only transaction returns an error. -func TestTx_DeleteBucket_ReadOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.View(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that nothing happens when deleting a bucket that doesn't exist. -func TestTx_DeleteBucket_NotFound(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != bolt.ErrBucketNotFound { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that no error is returned when a tx.ForEach function does not return -// an error. -func TestTx_ForEach_NoError(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return nil - }); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that an error is returned when a tx.ForEach function returns an error. -func TestTx_ForEach_WithError(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - - marker := errors.New("marker") - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return marker - }); err != marker { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that Tx commit handlers are called after a transaction successfully commits. -func TestTx_OnCommit(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var x int - if err := db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } else if x != 3 { - t.Fatalf("unexpected x: %d", x) - } -} - -// Ensure that Tx commit handlers are NOT called after a transaction rolls back. -func TestTx_OnCommit_Rollback(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var x int - if err := db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return errors.New("rollback this commit") - }); err == nil || err.Error() != "rollback this commit" { - t.Fatalf("unexpected error: %s", err) - } else if x != 0 { - t.Fatalf("unexpected x: %d", x) - } -} - -// Ensure that the database can be copied to a file path. -func TestTx_CopyFile(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - path := tempfile() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - return tx.CopyFile(path, 0600) - }); err != nil { - t.Fatal(err) - } - - db2, err := bolt.Open(path, 0600, nil) - if err != nil { - t.Fatal(err) - } - - if err := db2.View(func(tx *bolt.Tx) error { - if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { - t.Fatalf("unexpected value: %v", v) - } - if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db2.Close(); err != nil { - t.Fatal(err) - } -} - -type failWriterError struct{} - -func (failWriterError) Error() string { - return "error injected for tests" -} - -type failWriter struct { - // fail after this many bytes - After int -} - -func (f *failWriter) Write(p []byte) (n int, err error) { - n = len(p) - if n > f.After { - n = f.After - err = failWriterError{} - } - f.After -= n - return n, err -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Meta(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - return tx.Copy(&failWriter{}) - }); err == nil || err.Error() != "meta copy: error injected for tests" { - t.Fatalf("unexpected error: %v", err) - } -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Normal(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - return tx.Copy(&failWriter{3 * db.Info().PageSize}) - }); err == nil || err.Error() != "error injected for tests" { - t.Fatalf("unexpected error: %v", err) - } -} - -func ExampleTx_Rollback() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Create a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - log.Fatal(err) - } - - // Set a value for a key. - if err := db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - }); err != nil { - log.Fatal(err) - } - - // Update the key but rollback the transaction so it never saves. - tx, err := db.Begin(true) - if err != nil { - log.Fatal(err) - } - b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("foo"), []byte("baz")); err != nil { - log.Fatal(err) - } - if err := tx.Rollback(); err != nil { - log.Fatal(err) - } - - // Ensure that our original value is still set. - if err := db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' is still: %s\n", value) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value for 'foo' is still: bar -} - -func ExampleTx_CopyFile() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Create a bucket and a key. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Copy the database to another file. - toFile := tempfile() - if err := db.View(func(tx *bolt.Tx) error { - return tx.CopyFile(toFile, 0666) - }); err != nil { - log.Fatal(err) - } - defer os.Remove(toFile) - - // Open the cloned database. - db2, err := bolt.Open(toFile, 0666, nil) - if err != nil { - log.Fatal(err) - } - - // Ensure that the key exists in the copy. - if err := db2.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' in the clone is: %s\n", value) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - if err := db2.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value for 'foo' in the clone is: bar -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes_test.go deleted file mode 100644 index 88eed45..0000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes_test.go +++ /dev/null @@ -1,219 +0,0 @@ -package humanize - -import ( - "math/big" - "testing" -) - -func TestBigByteParsing(t *testing.T) { - tests := []struct { - in string - exp uint64 - }{ - {"42", 42}, - {"42MB", 42000000}, - {"42MiB", 44040192}, - {"42mb", 42000000}, - {"42mib", 44040192}, - {"42MIB", 44040192}, - {"42 MB", 42000000}, - {"42 MiB", 44040192}, - {"42 mb", 42000000}, - {"42 mib", 44040192}, - {"42 MIB", 44040192}, - {"42.5MB", 42500000}, - {"42.5MiB", 44564480}, - {"42.5 MB", 42500000}, - {"42.5 MiB", 44564480}, - // No need to say B - {"42M", 42000000}, - {"42Mi", 44040192}, - {"42m", 42000000}, - {"42mi", 44040192}, - {"42MI", 44040192}, - {"42 M", 42000000}, - {"42 Mi", 44040192}, - {"42 m", 42000000}, - {"42 mi", 44040192}, - {"42 MI", 44040192}, - {"42.5M", 42500000}, - {"42.5Mi", 44564480}, - {"42.5 M", 42500000}, - {"42.5 Mi", 44564480}, - // Large testing, breaks when too much larger than - // this. - {"12.5 EB", uint64(12.5 * float64(EByte))}, - {"12.5 E", uint64(12.5 * float64(EByte))}, - {"12.5 EiB", uint64(12.5 * float64(EiByte))}, - } - - for _, p := range tests { - got, err := ParseBigBytes(p.in) - if err != nil { - t.Errorf("Couldn't parse %v: %v", p.in, err) - } else { - if got.Uint64() != p.exp { - t.Errorf("Expected %v for %v, got %v", - p.exp, p.in, got) - } - } - } -} - -func TestBigByteErrors(t *testing.T) { - got, err := ParseBigBytes("84 JB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } - got, err = ParseBigBytes("") - if err == nil { - t.Errorf("Expected error parsing nothing") - } -} - -func bbyte(in uint64) string { - return BigBytes((&big.Int{}).SetUint64(in)) -} - -func bibyte(in uint64) string { - return BigIBytes((&big.Int{}).SetUint64(in)) -} - -func TestBigBytes(t *testing.T) { - testList{ - {"bytes(0)", bbyte(0), "0 B"}, - {"bytes(1)", bbyte(1), "1 B"}, - {"bytes(803)", bbyte(803), "803 B"}, - {"bytes(999)", bbyte(999), "999 B"}, - - {"bytes(1024)", bbyte(1024), "1.0 kB"}, - {"bytes(1MB - 1)", bbyte(MByte - Byte), "1000 kB"}, - - {"bytes(1MB)", bbyte(1024 * 1024), "1.0 MB"}, - {"bytes(1GB - 1K)", bbyte(GByte - KByte), "1000 MB"}, - - {"bytes(1GB)", bbyte(GByte), "1.0 GB"}, - {"bytes(1TB - 1M)", bbyte(TByte - MByte), "1000 GB"}, - - {"bytes(1TB)", bbyte(TByte), "1.0 TB"}, - {"bytes(1PB - 1T)", bbyte(PByte - TByte), "999 TB"}, - - {"bytes(1PB)", bbyte(PByte), "1.0 PB"}, - {"bytes(1PB - 1T)", bbyte(EByte - PByte), "999 PB"}, - - {"bytes(1EB)", bbyte(EByte), "1.0 EB"}, - // Overflows. - // {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"}, - - {"bytes(0)", bibyte(0), "0 B"}, - {"bytes(1)", bibyte(1), "1 B"}, - {"bytes(803)", bibyte(803), "803 B"}, - {"bytes(1023)", bibyte(1023), "1023 B"}, - - {"bytes(1024)", bibyte(1024), "1.0 KiB"}, - {"bytes(1MB - 1)", bibyte(MiByte - IByte), "1024 KiB"}, - - {"bytes(1MB)", bibyte(1024 * 1024), "1.0 MiB"}, - {"bytes(1GB - 1K)", bibyte(GiByte - KiByte), "1024 MiB"}, - - {"bytes(1GB)", bibyte(GiByte), "1.0 GiB"}, - {"bytes(1TB - 1M)", bibyte(TiByte - MiByte), "1024 GiB"}, - - {"bytes(1TB)", bibyte(TiByte), "1.0 TiB"}, - {"bytes(1PB - 1T)", bibyte(PiByte - TiByte), "1023 TiB"}, - - {"bytes(1PB)", bibyte(PiByte), "1.0 PiB"}, - {"bytes(1PB - 1T)", bibyte(EiByte - PiByte), "1023 PiB"}, - - {"bytes(1EiB)", bibyte(EiByte), "1.0 EiB"}, - // Overflows. - // {"bytes(1EB - 1P)", bibyte((KIByte*EIByte)-PiByte), "1023EB"}, - - {"bytes(5.5GiB)", bibyte(5.5 * GiByte), "5.5 GiB"}, - - {"bytes(5.5GB)", bbyte(5.5 * GByte), "5.5 GB"}, - }.validate(t) -} - -func TestVeryBigBytes(t *testing.T) { - b, _ := (&big.Int{}).SetString("15347691069326346944512", 10) - s := BigBytes(b) - if s != "15 ZB" { - t.Errorf("Expected 15 ZB, got %v", s) - } - s = BigIBytes(b) - if s != "13 ZiB" { - t.Errorf("Expected 13 ZiB, got %v", s) - } - - b, _ = (&big.Int{}).SetString("15716035654990179271180288", 10) - s = BigBytes(b) - if s != "16 YB" { - t.Errorf("Expected 16 YB, got %v", s) - } - s = BigIBytes(b) - if s != "13 YiB" { - t.Errorf("Expected 13 YiB, got %v", s) - } -} - -func TestVeryVeryBigBytes(t *testing.T) { - b, _ := (&big.Int{}).SetString("16093220510709943573688614912", 10) - s := BigBytes(b) - if s != "16093 YB" { - t.Errorf("Expected 16093 YB, got %v", s) - } - s = BigIBytes(b) - if s != "13312 YiB" { - t.Errorf("Expected 13312 YiB, got %v", s) - } -} - -func TestParseVeryBig(t *testing.T) { - tests := []struct { - in string - out string - }{ - {"16 ZB", "16000000000000000000000"}, - {"16 ZiB", "18889465931478580854784"}, - {"16.5 ZB", "16500000000000000000000"}, - {"16.5 ZiB", "19479761741837286506496"}, - {"16 Z", "16000000000000000000000"}, - {"16 Zi", "18889465931478580854784"}, - {"16.5 Z", "16500000000000000000000"}, - {"16.5 Zi", "19479761741837286506496"}, - - {"16 YB", "16000000000000000000000000"}, - {"16 YiB", "19342813113834066795298816"}, - {"16.5 YB", "16500000000000000000000000"}, - {"16.5 YiB", "19947276023641381382651904"}, - {"16 Y", "16000000000000000000000000"}, - {"16 Yi", "19342813113834066795298816"}, - {"16.5 Y", "16500000000000000000000000"}, - {"16.5 Yi", "19947276023641381382651904"}, - } - - for _, test := range tests { - x, err := ParseBigBytes(test.in) - if err != nil { - t.Errorf("Error parsing %q: %v", test.in, err) - continue - } - - if x.String() != test.out { - t.Errorf("Expected %q for %q, got %v", test.out, test.in, x) - } - } -} - -func BenchmarkParseBigBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseBigBytes("16.5 Z") - } -} - -func BenchmarkBigBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - bibyte(16.5 * GByte) - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes_test.go deleted file mode 100644 index 99cad92..0000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestByteParsing(t *testing.T) { - tests := []struct { - in string - exp uint64 - }{ - {"42", 42}, - {"42MB", 42000000}, - {"42MiB", 44040192}, - {"42mb", 42000000}, - {"42mib", 44040192}, - {"42MIB", 44040192}, - {"42 MB", 42000000}, - {"42 MiB", 44040192}, - {"42 mb", 42000000}, - {"42 mib", 44040192}, - {"42 MIB", 44040192}, - {"42.5MB", 42500000}, - {"42.5MiB", 44564480}, - {"42.5 MB", 42500000}, - {"42.5 MiB", 44564480}, - // No need to say B - {"42M", 42000000}, - {"42Mi", 44040192}, - {"42m", 42000000}, - {"42mi", 44040192}, - {"42MI", 44040192}, - {"42 M", 42000000}, - {"42 Mi", 44040192}, - {"42 m", 42000000}, - {"42 mi", 44040192}, - {"42 MI", 44040192}, - {"42.5M", 42500000}, - {"42.5Mi", 44564480}, - {"42.5 M", 42500000}, - {"42.5 Mi", 44564480}, - // Large testing, breaks when too much larger than - // this. - {"12.5 EB", uint64(12.5 * float64(EByte))}, - {"12.5 E", uint64(12.5 * float64(EByte))}, - {"12.5 EiB", uint64(12.5 * float64(EiByte))}, - } - - for _, p := range tests { - got, err := ParseBytes(p.in) - if err != nil { - t.Errorf("Couldn't parse %v: %v", p.in, err) - } - if got != p.exp { - t.Errorf("Expected %v for %v, got %v", - p.exp, p.in, got) - } - } -} - -func TestByteErrors(t *testing.T) { - got, err := ParseBytes("84 JB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } - got, err = ParseBytes("") - if err == nil { - t.Errorf("Expected error parsing nothing") - } - got, err = ParseBytes("16 EiB") - if err == nil { - t.Errorf("Expected error, got %v", got) - } -} - -func TestBytes(t *testing.T) { - testList{ - {"bytes(0)", Bytes(0), "0 B"}, - {"bytes(1)", Bytes(1), "1 B"}, - {"bytes(803)", Bytes(803), "803 B"}, - {"bytes(999)", Bytes(999), "999 B"}, - - {"bytes(1024)", Bytes(1024), "1.0 kB"}, - {"bytes(9999)", Bytes(9999), "10 kB"}, - {"bytes(1MB - 1)", Bytes(MByte - Byte), "1000 kB"}, - - {"bytes(1MB)", Bytes(1024 * 1024), "1.0 MB"}, - {"bytes(1GB - 1K)", Bytes(GByte - KByte), "1000 MB"}, - - {"bytes(1GB)", Bytes(GByte), "1.0 GB"}, - {"bytes(1TB - 1M)", Bytes(TByte - MByte), "1000 GB"}, - {"bytes(10MB)", Bytes(9999 * 1000), "10 MB"}, - - {"bytes(1TB)", Bytes(TByte), "1.0 TB"}, - {"bytes(1PB - 1T)", Bytes(PByte - TByte), "999 TB"}, - - {"bytes(1PB)", Bytes(PByte), "1.0 PB"}, - {"bytes(1PB - 1T)", Bytes(EByte - PByte), "999 PB"}, - - {"bytes(1EB)", Bytes(EByte), "1.0 EB"}, - // Overflows. - // {"bytes(1EB - 1P)", Bytes((KByte*EByte)-PByte), "1023EB"}, - - {"bytes(0)", IBytes(0), "0 B"}, - {"bytes(1)", IBytes(1), "1 B"}, - {"bytes(803)", IBytes(803), "803 B"}, - {"bytes(1023)", IBytes(1023), "1023 B"}, - - {"bytes(1024)", IBytes(1024), "1.0 KiB"}, - {"bytes(1MB - 1)", IBytes(MiByte - IByte), "1024 KiB"}, - - {"bytes(1MB)", IBytes(1024 * 1024), "1.0 MiB"}, - {"bytes(1GB - 1K)", IBytes(GiByte - KiByte), "1024 MiB"}, - - {"bytes(1GB)", IBytes(GiByte), "1.0 GiB"}, - {"bytes(1TB - 1M)", IBytes(TiByte - MiByte), "1024 GiB"}, - - {"bytes(1TB)", IBytes(TiByte), "1.0 TiB"}, - {"bytes(1PB - 1T)", IBytes(PiByte - TiByte), "1023 TiB"}, - - {"bytes(1PB)", IBytes(PiByte), "1.0 PiB"}, - {"bytes(1PB - 1T)", IBytes(EiByte - PiByte), "1023 PiB"}, - - {"bytes(1EiB)", IBytes(EiByte), "1.0 EiB"}, - // Overflows. - // {"bytes(1EB - 1P)", IBytes((KIByte*EIByte)-PiByte), "1023EB"}, - - {"bytes(5.5GiB)", IBytes(5.5 * GiByte), "5.5 GiB"}, - - {"bytes(5.5GB)", Bytes(5.5 * GByte), "5.5 GB"}, - }.validate(t) -} - -func BenchmarkParseBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseBytes("16.5 GB") - } -} - -func BenchmarkBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - Bytes(16.5 * GByte) - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/comma_test.go deleted file mode 100644 index 49040fb..0000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package humanize - -import ( - "math" - "math/big" - "testing" -) - -func TestCommas(t *testing.T) { - testList{ - {"0", Comma(0), "0"}, - {"10", Comma(10), "10"}, - {"100", Comma(100), "100"}, - {"1,000", Comma(1000), "1,000"}, - {"10,000", Comma(10000), "10,000"}, - {"100,000", Comma(100000), "100,000"}, - {"10,000,000", Comma(10000000), "10,000,000"}, - {"10,100,000", Comma(10100000), "10,100,000"}, - {"10,010,000", Comma(10010000), "10,010,000"}, - {"10,001,000", Comma(10001000), "10,001,000"}, - {"123,456,789", Comma(123456789), "123,456,789"}, - {"maxint", Comma(9.223372e+18), "9,223,372,000,000,000,000"}, - {"minint", Comma(-9.223372e+18), "-9,223,372,000,000,000,000"}, - {"-123,456,789", Comma(-123456789), "-123,456,789"}, - {"-10,100,000", Comma(-10100000), "-10,100,000"}, - {"-10,010,000", Comma(-10010000), "-10,010,000"}, - {"-10,001,000", Comma(-10001000), "-10,001,000"}, - {"-10,000,000", Comma(-10000000), "-10,000,000"}, - {"-100,000", Comma(-100000), "-100,000"}, - {"-10,000", Comma(-10000), "-10,000"}, - {"-1,000", Comma(-1000), "-1,000"}, - {"-100", Comma(-100), "-100"}, - {"-10", Comma(-10), "-10"}, - }.validate(t) -} - -func TestCommafs(t *testing.T) { - testList{ - {"0", Commaf(0), "0"}, - {"10.11", Commaf(10.11), "10.11"}, - {"100", Commaf(100), "100"}, - {"1,000", Commaf(1000), "1,000"}, - {"10,000", Commaf(10000), "10,000"}, - {"100,000", Commaf(100000), "100,000"}, - {"834,142.32", Commaf(834142.32), "834,142.32"}, - {"10,000,000", Commaf(10000000), "10,000,000"}, - {"10,100,000", Commaf(10100000), "10,100,000"}, - {"10,010,000", Commaf(10010000), "10,010,000"}, - {"10,001,000", Commaf(10001000), "10,001,000"}, - {"123,456,789", Commaf(123456789), "123,456,789"}, - {"maxf64", Commaf(math.MaxFloat64), "179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000"}, - {"minf64", Commaf(math.SmallestNonzeroFloat64), "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005"}, - {"-123,456,789", Commaf(-123456789), "-123,456,789"}, - {"-10,100,000", Commaf(-10100000), "-10,100,000"}, - {"-10,010,000", Commaf(-10010000), "-10,010,000"}, - {"-10,001,000", Commaf(-10001000), "-10,001,000"}, - {"-10,000,000", Commaf(-10000000), "-10,000,000"}, - {"-100,000", Commaf(-100000), "-100,000"}, - {"-10,000", Commaf(-10000), "-10,000"}, - {"-1,000", Commaf(-1000), "-1,000"}, - {"-100.11", Commaf(-100.11), "-100.11"}, - {"-10", Commaf(-10), "-10"}, - }.validate(t) -} - -func BenchmarkCommas(b *testing.B) { - for i := 0; i < b.N; i++ { - Comma(1234567890) - } -} - -func BenchmarkCommaf(b *testing.B) { - for i := 0; i < b.N; i++ { - Commaf(1234567890.83584) - } -} - -func BenchmarkBigCommas(b *testing.B) { - for i := 0; i < b.N; i++ { - BigComma(big.NewInt(1234567890)) - } -} - -func bigComma(i int64) string { - return BigComma(big.NewInt(i)) -} - -func TestBigCommas(t *testing.T) { - testList{ - {"0", bigComma(0), "0"}, - {"10", bigComma(10), "10"}, - {"100", bigComma(100), "100"}, - {"1,000", bigComma(1000), "1,000"}, - {"10,000", bigComma(10000), "10,000"}, - {"100,000", bigComma(100000), "100,000"}, - {"10,000,000", bigComma(10000000), "10,000,000"}, - {"10,100,000", bigComma(10100000), "10,100,000"}, - {"10,010,000", bigComma(10010000), "10,010,000"}, - {"10,001,000", bigComma(10001000), "10,001,000"}, - {"123,456,789", bigComma(123456789), "123,456,789"}, - {"maxint", bigComma(9.223372e+18), "9,223,372,000,000,000,000"}, - {"minint", bigComma(-9.223372e+18), "-9,223,372,000,000,000,000"}, - {"-123,456,789", bigComma(-123456789), "-123,456,789"}, - {"-10,100,000", bigComma(-10100000), "-10,100,000"}, - {"-10,010,000", bigComma(-10010000), "-10,010,000"}, - {"-10,001,000", bigComma(-10001000), "-10,001,000"}, - {"-10,000,000", bigComma(-10000000), "-10,000,000"}, - {"-100,000", bigComma(-100000), "-100,000"}, - {"-10,000", bigComma(-10000), "-10,000"}, - {"-1,000", bigComma(-1000), "-1,000"}, - {"-100", bigComma(-100), "-100"}, - {"-10", bigComma(-10), "-10"}, - }.validate(t) -} - -func TestVeryBigCommas(t *testing.T) { - tests := []struct{ in, exp string }{ - { - "84889279597249724975972597249849757294578485", - "84,889,279,597,249,724,975,972,597,249,849,757,294,578,485", - }, - { - "-84889279597249724975972597249849757294578485", - "-84,889,279,597,249,724,975,972,597,249,849,757,294,578,485", - }, - } - for _, test := range tests { - n, _ := (&big.Int{}).SetString(test.in, 10) - got := BigComma(n) - if test.exp != got { - t.Errorf("Expected %q, got %q", test.exp, got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/common_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/common_test.go deleted file mode 100644 index fc7db15..0000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/common_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package humanize - -import ( - "testing" -) - -type testList []struct { - name, got, exp string -} - -func (tl testList) validate(t *testing.T) { - for _, test := range tl { - if test.got != test.exp { - t.Errorf("On %v, expected '%v', but got '%v'", - test.name, test.exp, test.got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa_test.go deleted file mode 100644 index 276d411..0000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package humanize - -import ( - "fmt" - "regexp" - "strconv" - "testing" -) - -func TestFtoa(t *testing.T) { - testList{ - {"200", Ftoa(200), "200"}, - {"2", Ftoa(2), "2"}, - {"2.2", Ftoa(2.2), "2.2"}, - {"2.02", Ftoa(2.02), "2.02"}, - {"200.02", Ftoa(200.02), "200.02"}, - }.validate(t) -} - -func BenchmarkFtoaRegexTrailing(b *testing.B) { - trailingZerosRegex := regexp.MustCompile(`\.?0+$`) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - trailingZerosRegex.ReplaceAllString("2.00000", "") - trailingZerosRegex.ReplaceAllString("2.0000", "") - trailingZerosRegex.ReplaceAllString("2.000", "") - trailingZerosRegex.ReplaceAllString("2.00", "") - trailingZerosRegex.ReplaceAllString("2.0", "") - trailingZerosRegex.ReplaceAllString("2", "") - } -} - -func BenchmarkFtoaFunc(b *testing.B) { - for i := 0; i < b.N; i++ { - stripTrailingZeros("2.00000") - stripTrailingZeros("2.0000") - stripTrailingZeros("2.000") - stripTrailingZeros("2.00") - stripTrailingZeros("2.0") - stripTrailingZeros("2") - } -} - -func BenchmarkFmtF(b *testing.B) { - for i := 0; i < b.N; i++ { - _ = fmt.Sprintf("%f", 2.03584) - } -} - -func BenchmarkStrconvF(b *testing.B) { - for i := 0; i < b.N; i++ { - strconv.FormatFloat(2.03584, 'f', 6, 64) - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/number_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/number_test.go deleted file mode 100644 index dd38a5b..0000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/number_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package humanize - -import ( - "math" - "testing" -) - -type TestStruct struct { - name string - format string - num float64 - formatted string -} - -func TestFormatFloat(t *testing.T) { - tests := []TestStruct{ - {"default", "", 12345.6789, "12,345.68"}, - {"#", "#", 12345.6789, "12345.678900000"}, - {"#.", "#.", 12345.6789, "12346"}, - {"#,#", "#,#", 12345.6789, "12345,7"}, - {"#,##", "#,##", 12345.6789, "12345,68"}, - {"#,###", "#,###", 12345.6789, "12345,679"}, - {"#,###.", "#,###.", 12345.6789, "12,346"}, - {"#,###.##", "#,###.##", 12345.6789, "12,345.68"}, - {"#,###.###", "#,###.###", 12345.6789, "12,345.679"}, - {"#,###.####", "#,###.####", 12345.6789, "12,345.6789"}, - {"#.###,######", "#.###,######", 12345.6789, "12.345,678900"}, - {"#\u202f###,##", "#\u202f###,##", 12345.6789, "12 345,68"}, - - // special cases - {"NaN", "#", math.NaN(), "NaN"}, - {"+Inf", "#", math.Inf(1), "Infinity"}, - {"-Inf", "#", math.Inf(-1), "-Infinity"}, - {"signStr <= -0.000000001", "", -0.000000002, "-0.00"}, - {"signStr = 0", "", 0, "0.00"}, - {"Format directive must start with +", "+000", 12345.6789, "+12345.678900000"}, - } - - for _, test := range tests { - got := FormatFloat(test.format, test.num) - if got != test.formatted { - t.Errorf("On %v (%v, %v), got %v, wanted %v", - test.name, test.format, test.num, got, test.formatted) - } - } - // Test a single integer - got := FormatInteger("#", 12345) - if got != "12345.000000000" { - t.Errorf("On %v (%v, %v), got %v, wanted %v", - "integerTest", "#", 12345, got, "12345.000000000") - } - // Test the things that could panic - panictests := []TestStruct{ - {"RenderFloat(): invalid positive sign directive", "-", 12345.6789, "12,345.68"}, - {"RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers", "0.01", 12345.6789, "12,345.68"}, - } - for _, test := range panictests { - didPanic := false - var message interface{} - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - } - }() - - // call the target function - _ = FormatFloat(test.format, test.num) - - }() - if didPanic != true { - t.Errorf("On %v, should have panic and did not.", - test.name) - } - } - -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals_test.go deleted file mode 100644 index 51d85ee..0000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package humanize - -import ( - "testing" -) - -func TestOrdinals(t *testing.T) { - testList{ - {"0", Ordinal(0), "0th"}, - {"1", Ordinal(1), "1st"}, - {"2", Ordinal(2), "2nd"}, - {"3", Ordinal(3), "3rd"}, - {"4", Ordinal(4), "4th"}, - {"10", Ordinal(10), "10th"}, - {"11", Ordinal(11), "11th"}, - {"12", Ordinal(12), "12th"}, - {"13", Ordinal(13), "13th"}, - {"101", Ordinal(101), "101st"}, - {"102", Ordinal(102), "102nd"}, - {"103", Ordinal(103), "103rd"}, - }.validate(t) -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/si_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/si_test.go deleted file mode 100644 index 4940378..0000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/si_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package humanize - -import ( - "math" - "testing" -) - -func TestSI(t *testing.T) { - tests := []struct { - name string - num float64 - formatted string - }{ - {"e-24", 1e-24, "1 yF"}, - {"e-21", 1e-21, "1 zF"}, - {"e-18", 1e-18, "1 aF"}, - {"e-15", 1e-15, "1 fF"}, - {"e-12", 1e-12, "1 pF"}, - {"e-12", 2.2345e-12, "2.2345 pF"}, - {"e-12", 2.23e-12, "2.23 pF"}, - {"e-11", 2.23e-11, "22.3 pF"}, - {"e-10", 2.2e-10, "220 pF"}, - {"e-9", 2.2e-9, "2.2 nF"}, - {"e-8", 2.2e-8, "22 nF"}, - {"e-7", 2.2e-7, "220 nF"}, - {"e-6", 2.2e-6, "2.2 µF"}, - {"e-6", 1e-6, "1 µF"}, - {"e-5", 2.2e-5, "22 µF"}, - {"e-4", 2.2e-4, "220 µF"}, - {"e-3", 2.2e-3, "2.2 mF"}, - {"e-2", 2.2e-2, "22 mF"}, - {"e-1", 2.2e-1, "220 mF"}, - {"e+0", 2.2e-0, "2.2 F"}, - {"e+0", 2.2, "2.2 F"}, - {"e+1", 2.2e+1, "22 F"}, - {"0", 0, "0 F"}, - {"e+1", 22, "22 F"}, - {"e+2", 2.2e+2, "220 F"}, - {"e+2", 220, "220 F"}, - {"e+3", 2.2e+3, "2.2 kF"}, - {"e+3", 2200, "2.2 kF"}, - {"e+4", 2.2e+4, "22 kF"}, - {"e+4", 22000, "22 kF"}, - {"e+5", 2.2e+5, "220 kF"}, - {"e+6", 2.2e+6, "2.2 MF"}, - {"e+6", 1e+6, "1 MF"}, - {"e+7", 2.2e+7, "22 MF"}, - {"e+8", 2.2e+8, "220 MF"}, - {"e+9", 2.2e+9, "2.2 GF"}, - {"e+10", 2.2e+10, "22 GF"}, - {"e+11", 2.2e+11, "220 GF"}, - {"e+12", 2.2e+12, "2.2 TF"}, - {"e+15", 2.2e+15, "2.2 PF"}, - {"e+18", 2.2e+18, "2.2 EF"}, - {"e+21", 2.2e+21, "2.2 ZF"}, - {"e+24", 2.2e+24, "2.2 YF"}, - - // special case - {"1F", 1000 * 1000, "1 MF"}, - {"1F", 1e6, "1 MF"}, - } - - for _, test := range tests { - got := SI(test.num, "F") - if got != test.formatted { - t.Errorf("On %v (%v), got %v, wanted %v", - test.name, test.num, got, test.formatted) - } - - gotf, gotu, err := ParseSI(test.formatted) - if err != nil { - t.Errorf("Error parsing %v (%v): %v", test.name, test.formatted, err) - continue - } - - if math.Abs(1-(gotf/test.num)) > 0.01 { - t.Errorf("On %v (%v), got %v, wanted %v (±%v)", - test.name, test.formatted, gotf, test.num, - math.Abs(1-(gotf/test.num))) - } - if gotu != "F" { - t.Errorf("On %v (%v), expected unit F, got %v", - test.name, test.formatted, gotu) - } - } - - // Parse error - gotf, gotu, err := ParseSI("x1.21JW") // 1.21 jigga whats - if err == nil { - t.Errorf("Expected error on x1.21JW, got %v %v", gotf, gotu) - } -} - -func BenchmarkParseSI(b *testing.B) { - for i := 0; i < b.N; i++ { - ParseSI("2.2346ZB") - } -} diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/times_test.go b/Godeps/_workspace/src/github.com/dustin/go-humanize/times_test.go deleted file mode 100644 index 528daa4..0000000 --- a/Godeps/_workspace/src/github.com/dustin/go-humanize/times_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package humanize - -import ( - "math" - "testing" - "time" -) - -func TestPast(t *testing.T) { - now := time.Now().Unix() - testList{ - {"now", Time(time.Unix(now, 0)), "now"}, - {"1 second ago", Time(time.Unix(now-1, 0)), "1 second ago"}, - {"12 seconds ago", Time(time.Unix(now-12, 0)), "12 seconds ago"}, - {"30 seconds ago", Time(time.Unix(now-30, 0)), "30 seconds ago"}, - {"45 seconds ago", Time(time.Unix(now-45, 0)), "45 seconds ago"}, - {"1 minute ago", Time(time.Unix(now-63, 0)), "1 minute ago"}, - {"15 minutes ago", Time(time.Unix(now-15*Minute, 0)), "15 minutes ago"}, - {"1 hour ago", Time(time.Unix(now-63*Minute, 0)), "1 hour ago"}, - {"2 hours ago", Time(time.Unix(now-2*Hour, 0)), "2 hours ago"}, - {"21 hours ago", Time(time.Unix(now-21*Hour, 0)), "21 hours ago"}, - {"1 day ago", Time(time.Unix(now-26*Hour, 0)), "1 day ago"}, - {"2 days ago", Time(time.Unix(now-49*Hour, 0)), "2 days ago"}, - {"3 days ago", Time(time.Unix(now-3*Day, 0)), "3 days ago"}, - {"1 week ago (1)", Time(time.Unix(now-7*Day, 0)), "1 week ago"}, - {"1 week ago (2)", Time(time.Unix(now-12*Day, 0)), "1 week ago"}, - {"2 weeks ago", Time(time.Unix(now-15*Day, 0)), "2 weeks ago"}, - {"1 month ago", Time(time.Unix(now-39*Day, 0)), "1 month ago"}, - {"3 months ago", Time(time.Unix(now-99*Day, 0)), "3 months ago"}, - {"1 year ago (1)", Time(time.Unix(now-365*Day, 0)), "1 year ago"}, - {"1 year ago (1)", Time(time.Unix(now-400*Day, 0)), "1 year ago"}, - {"2 years ago (1)", Time(time.Unix(now-548*Day, 0)), "2 years ago"}, - {"2 years ago (2)", Time(time.Unix(now-725*Day, 0)), "2 years ago"}, - {"2 years ago (3)", Time(time.Unix(now-800*Day, 0)), "2 years ago"}, - {"3 years ago", Time(time.Unix(now-3*Year, 0)), "3 years ago"}, - {"long ago", Time(time.Unix(now-LongTime, 0)), "a long while ago"}, - }.validate(t) -} - -func TestFuture(t *testing.T) { - now := time.Now().Unix() - testList{ - {"now", Time(time.Unix(now, 0)), "now"}, - {"1 second from now", Time(time.Unix(now+1, 0)), "1 second from now"}, - {"12 seconds from now", Time(time.Unix(now+12, 0)), "12 seconds from now"}, - {"30 seconds from now", Time(time.Unix(now+30, 0)), "30 seconds from now"}, - {"45 seconds from now", Time(time.Unix(now+45, 0)), "45 seconds from now"}, - {"15 minutes from now", Time(time.Unix(now+15*Minute, 0)), "15 minutes from now"}, - {"2 hours from now", Time(time.Unix(now+2*Hour, 0)), "2 hours from now"}, - {"21 hours from now", Time(time.Unix(now+21*Hour, 0)), "21 hours from now"}, - {"1 day from now", Time(time.Unix(now+26*Hour, 0)), "1 day from now"}, - {"2 days from now", Time(time.Unix(now+49*Hour, 0)), "2 days from now"}, - {"3 days from now", Time(time.Unix(now+3*Day, 0)), "3 days from now"}, - {"1 week from now (1)", Time(time.Unix(now+7*Day, 0)), "1 week from now"}, - {"1 week from now (2)", Time(time.Unix(now+12*Day, 0)), "1 week from now"}, - {"2 weeks from now", Time(time.Unix(now+15*Day, 0)), "2 weeks from now"}, - {"1 month from now", Time(time.Unix(now+30*Day, 0)), "1 month from now"}, - {"1 year from now", Time(time.Unix(now+365*Day, 0)), "1 year from now"}, - {"2 years from now", Time(time.Unix(now+2*Year, 0)), "2 years from now"}, - {"a while from now", Time(time.Unix(now+LongTime, 0)), "a long while from now"}, - }.validate(t) -} - -func TestRange(t *testing.T) { - start := time.Time{} - end := time.Unix(math.MaxInt64, math.MaxInt64) - x := RelTime(start, end, "ago", "from now") - if x != "a long while from now" { - t.Errorf("Expected a long while from now, got %q", x) - } -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore b/vendor/github.com/boltdb/bolt/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/.gitignore rename to vendor/github.com/boltdb/bolt/.gitignore diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/LICENSE rename to vendor/github.com/boltdb/bolt/LICENSE diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/Makefile rename to vendor/github.com/boltdb/bolt/Makefile diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md similarity index 98% rename from Godeps/_workspace/src/github.com/boltdb/bolt/README.md rename to vendor/github.com/boltdb/bolt/README.md index 82e8574..8a21c6e 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md +++ b/vendor/github.com/boltdb/bolt/README.md @@ -835,6 +835,9 @@ Below is a list of public, open source projects that use Bolt: backed by boltdb. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. * [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/boltdb/bolt/appveyor.yml new file mode 100644 index 0000000..6e26e94 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/appveyor.yml @@ -0,0 +1,18 @@ +version: "{build}" + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\boltdb\bolt + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - go version + - go env + - go get -v -t ./... + +build_script: + - go test -v ./... diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_386.go rename to vendor/github.com/boltdb/bolt/bolt_386.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_amd64.go rename to vendor/github.com/boltdb/bolt/bolt_amd64.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm.go rename to vendor/github.com/boltdb/bolt/bolt_arm.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go rename to vendor/github.com/boltdb/bolt/bolt_arm64.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go rename to vendor/github.com/boltdb/bolt/bolt_linux.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go rename to vendor/github.com/boltdb/bolt/bolt_openbsd.go diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 0000000..645ddc3 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 0000000..2dc6be0 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,9 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go rename to vendor/github.com/boltdb/bolt/bolt_ppc64le.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go rename to vendor/github.com/boltdb/bolt/bolt_s390x.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go similarity index 89% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go rename to vendor/github.com/boltdb/bolt/bolt_unix.go index 4b0723a..cad62dd 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -11,7 +11,7 @@ import ( ) // flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, exclusive bool, timeout time.Duration) error { +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time for { // If we're beyond our timeout then return an error. @@ -27,7 +27,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error { } // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB) + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) if err == nil { return nil } else if err != syscall.EWOULDBLOCK { @@ -40,8 +40,8 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error { } // funlock releases an advisory lock on a file descriptor. -func funlock(f *os.File) error { - return syscall.Flock(int(f.Fd()), syscall.LOCK_UN) +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) } // mmap memory maps a DB's data file. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go similarity index 87% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go rename to vendor/github.com/boltdb/bolt/bolt_unix_solaris.go index 1c4e48d..307bf2b 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -11,7 +11,7 @@ import ( ) // flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, exclusive bool, timeout time.Duration) error { +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time for { // If we're beyond our timeout then return an error. @@ -32,7 +32,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error { } else { lock.Type = syscall.F_RDLCK } - err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock) + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) if err == nil { return nil } else if err != syscall.EAGAIN { @@ -45,13 +45,13 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error { } // funlock releases an advisory lock on a file descriptor. -func funlock(f *os.File) error { +func funlock(db *DB) error { var lock syscall.Flock_t lock.Start = 0 lock.Len = 0 lock.Type = syscall.F_UNLCK lock.Whence = 0 - return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock) + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) } // mmap memory maps a DB's data file. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go similarity index 83% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go rename to vendor/github.com/boltdb/bolt/bolt_windows.go index 91c4968..d538e6a 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -16,6 +16,8 @@ var ( ) const ( + lockExt = ".lock" + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx flagLockExclusive = 2 flagLockFailImmediately = 1 @@ -46,7 +48,16 @@ func fdatasync(db *DB) error { } // flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, exclusive bool, timeout time.Duration) error { +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + var t time.Time for { // If we're beyond our timeout then return an error. @@ -62,7 +73,7 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error { flag |= flagLockExclusive } - err := lockFileEx(syscall.Handle(f.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) if err == nil { return nil } else if err != errLockViolation { @@ -75,8 +86,11 @@ func flock(f *os.File, exclusive bool, timeout time.Duration) error { } // funlock releases an advisory lock on a file descriptor. -func funlock(f *os.File) error { - return unlockFileEx(syscall.Handle(f.Fd()), 0, 1, 0, &syscall.Overlapped{}) +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path+lockExt) + return err } // mmap memory maps a DB's data file. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go rename to vendor/github.com/boltdb/bolt/boltsync_unix.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go rename to vendor/github.com/boltdb/bolt/bucket.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go rename to vendor/github.com/boltdb/bolt/cursor.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go similarity index 98% rename from Godeps/_workspace/src/github.com/boltdb/bolt/db.go rename to vendor/github.com/boltdb/bolt/db.go index 0f1e1bc..501d36a 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go +++ b/vendor/github.com/boltdb/bolt/db.go @@ -93,6 +93,7 @@ type DB struct { path string file *os.File + lockfile *os.File // windows only dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int @@ -177,7 +178,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db.file, !db.readOnly, options.Timeout); err != nil { + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { _ = db.close() return nil, err } @@ -379,6 +380,10 @@ func (db *DB) Close() error { } func (db *DB) close() error { + if !db.opened { + return nil + } + db.opened = false db.freelist = nil @@ -397,7 +402,7 @@ func (db *DB) close() error { // No need to unlock read-only file. if !db.readOnly { // Unlock the file. - if err := funlock(db.file); err != nil { + if err := funlock(db); err != nil { log.Printf("bolt.Close(): funlock error: %s", err) } } @@ -824,8 +829,10 @@ func (db *DB) grow(sz int) error { // Truncate and fsync to ensure file size metadata is flushed. // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } } if err := db.file.Sync(); err != nil { return fmt.Errorf("file sync error: %s", err) diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/doc.go rename to vendor/github.com/boltdb/bolt/doc.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/errors.go rename to vendor/github.com/boltdb/bolt/errors.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/freelist.go rename to vendor/github.com/boltdb/bolt/freelist.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/node.go rename to vendor/github.com/boltdb/bolt/node.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go similarity index 100% rename from Godeps/_workspace/src/github.com/boltdb/bolt/page.go rename to vendor/github.com/boltdb/bolt/page.go diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go similarity index 95% rename from Godeps/_workspace/src/github.com/boltdb/bolt/tx.go rename to vendor/github.com/boltdb/bolt/tx.go index e74d2ca..299c073 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -297,12 +297,34 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } defer func() { _ = f.Close() }() - // Copy the meta pages. - tx.db.metalock.Lock() - n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) - tx.db.metalock.Unlock() + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) if err != nil { - return n, fmt.Errorf("meta copy: %s", err) + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) } // Copy data pages. diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/.gitignore b/vendor/github.com/dustin/go-humanize/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/.gitignore rename to vendor/github.com/dustin/go-humanize/.gitignore diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/LICENSE rename to vendor/github.com/dustin/go-humanize/LICENSE diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/README.markdown rename to vendor/github.com/dustin/go-humanize/README.markdown diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/big.go rename to vendor/github.com/dustin/go-humanize/big.go diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/bigbytes.go rename to vendor/github.com/dustin/go-humanize/bigbytes.go diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/bytes.go rename to vendor/github.com/dustin/go-humanize/bytes.go diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/comma.go rename to vendor/github.com/dustin/go-humanize/comma.go diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/ftoa.go rename to vendor/github.com/dustin/go-humanize/ftoa.go diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/humanize.go rename to vendor/github.com/dustin/go-humanize/humanize.go diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/number.go rename to vendor/github.com/dustin/go-humanize/number.go diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/ordinals.go rename to vendor/github.com/dustin/go-humanize/ordinals.go diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/si.go rename to vendor/github.com/dustin/go-humanize/si.go diff --git a/Godeps/_workspace/src/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go similarity index 100% rename from Godeps/_workspace/src/github.com/dustin/go-humanize/times.go rename to vendor/github.com/dustin/go-humanize/times.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/LICENSE rename to vendor/golang.org/x/sys/LICENSE diff --git a/Godeps/_workspace/src/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/PATENTS rename to vendor/golang.org/x/sys/PATENTS diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/.gitignore b/vendor/golang.org/x/sys/unix/.gitignore similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/.gitignore rename to vendor/golang.org/x/sys/unix/.gitignore diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm.s b/vendor/golang.org/x/sys/unix/asm.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm.s rename to vendor/golang.org/x/sys/unix/asm.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/golang.org/x/sys/unix/asm_darwin_386.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_darwin_386.s rename to vendor/golang.org/x/sys/unix/asm_darwin_386.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_darwin_amd64.s rename to vendor/golang.org/x/sys/unix/asm_darwin_amd64.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_darwin_arm.s rename to vendor/golang.org/x/sys/unix/asm_darwin_arm.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_darwin_arm64.s rename to vendor/golang.org/x/sys/unix/asm_darwin_arm64.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_dragonfly_386.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_386.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_dragonfly_386.s rename to vendor/golang.org/x/sys/unix/asm_dragonfly_386.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_dragonfly_amd64.s rename to vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/golang.org/x/sys/unix/asm_freebsd_386.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_freebsd_386.s rename to vendor/golang.org/x/sys/unix/asm_freebsd_386.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_freebsd_amd64.s rename to vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_freebsd_arm.s rename to vendor/golang.org/x/sys/unix/asm_freebsd_arm.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_386.s rename to vendor/golang.org/x/sys/unix/asm_linux_386.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_amd64.s rename to vendor/golang.org/x/sys/unix/asm_linux_amd64.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_arm.s rename to vendor/golang.org/x/sys/unix/asm_linux_arm.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_arm64.s rename to vendor/golang.org/x/sys/unix/asm_linux_arm64.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_mips64x.s rename to vendor/golang.org/x/sys/unix/asm_linux_mips64x.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_linux_ppc64x.s rename to vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/golang.org/x/sys/unix/asm_netbsd_386.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_netbsd_386.s rename to vendor/golang.org/x/sys/unix/asm_netbsd_386.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_netbsd_amd64.s rename to vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_netbsd_arm.s rename to vendor/golang.org/x/sys/unix/asm_netbsd_arm.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/golang.org/x/sys/unix/asm_openbsd_386.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_openbsd_386.s rename to vendor/golang.org/x/sys/unix/asm_openbsd_386.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_openbsd_amd64.s rename to vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/asm_solaris_amd64.s rename to vendor/golang.org/x/sys/unix/asm_solaris_amd64.s diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/constants.go rename to vendor/golang.org/x/sys/unix/constants.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/env_unix.go rename to vendor/golang.org/x/sys/unix/env_unix.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/env_unset.go b/vendor/golang.org/x/sys/unix/env_unset.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/env_unset.go rename to vendor/golang.org/x/sys/unix/env_unset.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/flock.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/flock.go rename to vendor/golang.org/x/sys/unix/flock.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/flock_linux_32bit.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/flock_linux_32bit.go rename to vendor/golang.org/x/sys/unix/flock_linux_32bit.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/gccgo.go rename to vendor/golang.org/x/sys/unix/gccgo.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/gccgo_c.c rename to vendor/golang.org/x/sys/unix/gccgo_c.c diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/gccgo_linux_amd64.go rename to vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mkall.sh rename to vendor/golang.org/x/sys/unix/mkall.sh diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mkerrors.sh rename to vendor/golang.org/x/sys/unix/mkerrors.sh diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mksyscall.pl b/vendor/golang.org/x/sys/unix/mksyscall.pl similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mksyscall.pl rename to vendor/golang.org/x/sys/unix/mksyscall.pl diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mksyscall_solaris.pl b/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mksyscall_solaris.pl rename to vendor/golang.org/x/sys/unix/mksyscall_solaris.pl diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mksysctl_openbsd.pl b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mksysctl_openbsd.pl rename to vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_darwin.pl b/vendor/golang.org/x/sys/unix/mksysnum_darwin.pl similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_darwin.pl rename to vendor/golang.org/x/sys/unix/mksysnum_darwin.pl diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_dragonfly.pl b/vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_dragonfly.pl rename to vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_freebsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_freebsd.pl rename to vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_linux.pl b/vendor/golang.org/x/sys/unix/mksysnum_linux.pl similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_linux.pl rename to vendor/golang.org/x/sys/unix/mksysnum_linux.pl diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_netbsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_netbsd.pl rename to vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_openbsd.pl b/vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/mksysnum_openbsd.pl rename to vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/race.go rename to vendor/golang.org/x/sys/unix/race.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/race0.go rename to vendor/golang.org/x/sys/unix/race0.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/sockcmsg_linux.go rename to vendor/golang.org/x/sys/unix/sockcmsg_linux.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/sockcmsg_unix.go rename to vendor/golang.org/x/sys/unix/sockcmsg_unix.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/str.go rename to vendor/golang.org/x/sys/unix/str.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall.go rename to vendor/golang.org/x/sys/unix/syscall.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_bsd.go rename to vendor/golang.org/x/sys/unix/syscall_bsd.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin.go rename to vendor/golang.org/x/sys/unix/syscall_darwin.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin_386.go rename to vendor/golang.org/x/sys/unix/syscall_darwin_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin_amd64.go rename to vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin_arm.go rename to vendor/golang.org/x/sys/unix/syscall_darwin_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_darwin_arm64.go rename to vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_dragonfly.go rename to vendor/golang.org/x/sys/unix/syscall_dragonfly.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_dragonfly_386.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_dragonfly_386.go rename to vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_dragonfly_amd64.go rename to vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_freebsd.go rename to vendor/golang.org/x/sys/unix/syscall_freebsd.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_freebsd_386.go rename to vendor/golang.org/x/sys/unix/syscall_freebsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_freebsd_amd64.go rename to vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_freebsd_arm.go rename to vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux.go rename to vendor/golang.org/x/sys/unix/syscall_linux.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_386.go rename to vendor/golang.org/x/sys/unix/syscall_linux_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_amd64.go rename to vendor/golang.org/x/sys/unix/syscall_linux_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_arm.go rename to vendor/golang.org/x/sys/unix/syscall_linux_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_arm64.go rename to vendor/golang.org/x/sys/unix/syscall_linux_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_mips64x.go rename to vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_linux_ppc64x.go rename to vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_netbsd.go rename to vendor/golang.org/x/sys/unix/syscall_netbsd.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_netbsd_386.go rename to vendor/golang.org/x/sys/unix/syscall_netbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_netbsd_amd64.go rename to vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_netbsd_arm.go rename to vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_no_getwd.go b/vendor/golang.org/x/sys/unix/syscall_no_getwd.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_no_getwd.go rename to vendor/golang.org/x/sys/unix/syscall_no_getwd.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_openbsd.go rename to vendor/golang.org/x/sys/unix/syscall_openbsd.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_openbsd_386.go rename to vendor/golang.org/x/sys/unix/syscall_openbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_openbsd_amd64.go rename to vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_solaris.go rename to vendor/golang.org/x/sys/unix/syscall_solaris.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_solaris_amd64.go rename to vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/syscall_unix.go rename to vendor/golang.org/x/sys/unix/syscall_unix.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/types_darwin.go rename to vendor/golang.org/x/sys/unix/types_darwin.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/types_dragonfly.go rename to vendor/golang.org/x/sys/unix/types_dragonfly.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/types_freebsd.go rename to vendor/golang.org/x/sys/unix/types_freebsd.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/types_linux.go b/vendor/golang.org/x/sys/unix/types_linux.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/types_linux.go rename to vendor/golang.org/x/sys/unix/types_linux.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/types_netbsd.go rename to vendor/golang.org/x/sys/unix/types_netbsd.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/types_openbsd.go rename to vendor/golang.org/x/sys/unix/types_openbsd.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/types_solaris.go rename to vendor/golang.org/x/sys/unix/types_solaris.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_darwin_386.go rename to vendor/golang.org/x/sys/unix/zerrors_darwin_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_darwin_amd64.go rename to vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_darwin_arm.go rename to vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_darwin_arm64.go rename to vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_dragonfly_386.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_dragonfly_386.go rename to vendor/golang.org/x/sys/unix/zerrors_dragonfly_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go rename to vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_freebsd_386.go rename to vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_freebsd_amd64.go rename to vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_freebsd_arm.go rename to vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_386.go rename to vendor/golang.org/x/sys/unix/zerrors_linux_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_amd64.go rename to vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_arm.go rename to vendor/golang.org/x/sys/unix/zerrors_linux_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_arm64.go rename to vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_mips64.go rename to vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_mips64le.go rename to vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_ppc64.go rename to vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_linux_ppc64le.go rename to vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_netbsd_386.go rename to vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_netbsd_amd64.go rename to vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_netbsd_arm.go rename to vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_openbsd_386.go rename to vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_openbsd_amd64.go rename to vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zerrors_solaris_amd64.go rename to vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_darwin_386.go rename to vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_darwin_amd64.go rename to vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_darwin_arm.go rename to vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_darwin_arm64.go rename to vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_dragonfly_386.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_dragonfly_386.go rename to vendor/golang.org/x/sys/unix/zsyscall_dragonfly_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go rename to vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_freebsd_386.go rename to vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go rename to vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_freebsd_arm.go rename to vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_386.go rename to vendor/golang.org/x/sys/unix/zsyscall_linux_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_amd64.go rename to vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_arm.go rename to vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_arm64.go rename to vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_mips64.go rename to vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_mips64le.go rename to vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_ppc64.go rename to vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go rename to vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_netbsd_386.go rename to vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go rename to vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_netbsd_arm.go rename to vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_openbsd_386.go rename to vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go rename to vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsyscall_solaris_amd64.go rename to vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysctl_openbsd.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysctl_openbsd.go rename to vendor/golang.org/x/sys/unix/zsysctl_openbsd.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_darwin_386.go rename to vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_darwin_amd64.go rename to vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_darwin_arm.go rename to vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_darwin_arm64.go rename to vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_dragonfly_386.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_dragonfly_386.go rename to vendor/golang.org/x/sys/unix/zsysnum_dragonfly_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go rename to vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_freebsd_386.go rename to vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go rename to vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_freebsd_arm.go rename to vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_386.go rename to vendor/golang.org/x/sys/unix/zsysnum_linux_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_amd64.go rename to vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_arm.go rename to vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_arm64.go rename to vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_mips64.go rename to vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_mips64le.go rename to vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_ppc64.go rename to vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go rename to vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_netbsd_386.go rename to vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go rename to vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_netbsd_arm.go rename to vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_openbsd_386.go rename to vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go rename to vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/zsysnum_solaris_amd64.go rename to vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_darwin_386.go rename to vendor/golang.org/x/sys/unix/ztypes_darwin_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_darwin_amd64.go rename to vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_darwin_arm.go rename to vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_darwin_arm64.go rename to vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_dragonfly_386.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_dragonfly_386.go rename to vendor/golang.org/x/sys/unix/ztypes_dragonfly_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go rename to vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_freebsd_386.go rename to vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_freebsd_amd64.go rename to vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_freebsd_arm.go rename to vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_386.go rename to vendor/golang.org/x/sys/unix/ztypes_linux_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_amd64.go rename to vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_arm.go rename to vendor/golang.org/x/sys/unix/ztypes_linux_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_arm64.go rename to vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_mips64.go rename to vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_mips64le.go rename to vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_ppc64.go rename to vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_linux_ppc64le.go rename to vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_netbsd_386.go rename to vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_netbsd_amd64.go rename to vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_netbsd_arm.go rename to vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_openbsd_386.go rename to vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_openbsd_amd64.go rename to vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/sys/unix/ztypes_solaris_amd64.go rename to vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go diff --git a/Godeps/_workspace/src/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE similarity index 100% rename from Godeps/_workspace/src/golang.org/x/tools/LICENSE rename to vendor/golang.org/x/tools/LICENSE diff --git a/Godeps/_workspace/src/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS similarity index 100% rename from Godeps/_workspace/src/golang.org/x/tools/PATENTS rename to vendor/golang.org/x/tools/PATENTS diff --git a/Godeps/_workspace/src/golang.org/x/tools/go/vcs/discovery.go b/vendor/golang.org/x/tools/go/vcs/discovery.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/tools/go/vcs/discovery.go rename to vendor/golang.org/x/tools/go/vcs/discovery.go diff --git a/Godeps/_workspace/src/golang.org/x/tools/go/vcs/env.go b/vendor/golang.org/x/tools/go/vcs/env.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/tools/go/vcs/env.go rename to vendor/golang.org/x/tools/go/vcs/env.go diff --git a/Godeps/_workspace/src/golang.org/x/tools/go/vcs/http.go b/vendor/golang.org/x/tools/go/vcs/http.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/tools/go/vcs/http.go rename to vendor/golang.org/x/tools/go/vcs/http.go diff --git a/Godeps/_workspace/src/golang.org/x/tools/go/vcs/vcs.go b/vendor/golang.org/x/tools/go/vcs/vcs.go similarity index 100% rename from Godeps/_workspace/src/golang.org/x/tools/go/vcs/vcs.go rename to vendor/golang.org/x/tools/go/vcs/vcs.go