add Godeps, closes #10

This commit is contained in:
shawnps
2015-02-07 07:32:38 -08:00
parent 3dd8e2d465
commit 86fb258cc0
67 changed files with 26444 additions and 0 deletions

15
Godeps/Godeps.json generated Normal file
View File

@@ -0,0 +1,15 @@
{
"ImportPath": "github.com/gojp/goreportcard",
"GoVersion": "go1.4.1",
"Deps": [
{
"ImportPath": "gopkg.in/mgo.v2",
"Rev": "8c1ecfe7d8a0b5bc49a809f4c15955e514f77a80"
},
{
"ImportPath": "labix.org/v2/mgo/bson",
"Comment": "287",
"Rev": "gustavo@niemeyer.net-20140501184651-975yyw9ipld92pji"
}
]
}

5
Godeps/Readme generated Normal file
View File

@@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

2
Godeps/_workspace/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,2 @@
/pkg
/bin

25
Godeps/_workspace/src/gopkg.in/mgo.v2/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
mgo - MongoDB driver for Go
Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

5
Godeps/_workspace/src/gopkg.in/mgo.v2/Makefile generated vendored Normal file
View File

@@ -0,0 +1,5 @@
startdb:
@testdb/setup.sh start
stopdb:
@testdb/setup.sh stop

4
Godeps/_workspace/src/gopkg.in/mgo.v2/README.md generated vendored Normal file
View File

@@ -0,0 +1,4 @@
The MongoDB driver for Go
-------------------------
Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.

467
Godeps/_workspace/src/gopkg.in/mgo.v2/auth.go generated vendored Normal file
View File

@@ -0,0 +1,467 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"sync"
"gopkg.in/mgo.v2/bson"
"gopkg.in/mgo.v2/internal/scram"
)
type authCmd struct {
Authenticate int
Nonce string
User string
Key string
}
type startSaslCmd struct {
StartSASL int `bson:"startSasl"`
}
type authResult struct {
ErrMsg string
Ok bool
}
type getNonceCmd struct {
GetNonce int
}
type getNonceResult struct {
Nonce string
Err string "$err"
Code int
}
type logoutCmd struct {
Logout int
}
type saslCmd struct {
Start int `bson:"saslStart,omitempty"`
Continue int `bson:"saslContinue,omitempty"`
ConversationId int `bson:"conversationId,omitempty"`
Mechanism string `bson:"mechanism,omitempty"`
Payload []byte
}
type saslResult struct {
Ok bool `bson:"ok"`
NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
Done bool
ConversationId int `bson:"conversationId"`
Payload []byte
ErrMsg string
}
type saslStepper interface {
Step(serverData []byte) (clientData []byte, done bool, err error)
Close()
}
func (socket *mongoSocket) getNonce() (nonce string, err error) {
socket.Lock()
for socket.cachedNonce == "" && socket.dead == nil {
debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
socket.gotNonce.Wait()
}
if socket.cachedNonce == "mongos" {
socket.Unlock()
return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
}
debugf("Socket %p to %s: got nonce", socket, socket.addr)
nonce, err = socket.cachedNonce, socket.dead
socket.cachedNonce = ""
socket.Unlock()
if err != nil {
nonce = ""
}
return
}
func (socket *mongoSocket) resetNonce() {
debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
op := &queryOp{}
op.query = &getNonceCmd{GetNonce: 1}
op.collection = "admin.$cmd"
op.limit = -1
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
if err != nil {
socket.kill(errors.New("getNonce: "+err.Error()), true)
return
}
result := &getNonceResult{}
err = bson.Unmarshal(docData, &result)
if err != nil {
socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
return
}
debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
if result.Code == 13390 {
// mongos doesn't yet support auth (see http://j.mp/mongos-auth)
result.Nonce = "mongos"
} else if result.Nonce == "" {
var msg string
if result.Err != "" {
msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
} else {
msg = "Got an empty nonce"
}
socket.kill(errors.New(msg), true)
return
}
socket.Lock()
if socket.cachedNonce != "" {
socket.Unlock()
panic("resetNonce: nonce already cached")
}
socket.cachedNonce = result.Nonce
socket.gotNonce.Signal()
socket.Unlock()
}
err := socket.Query(op)
if err != nil {
socket.kill(errors.New("resetNonce: "+err.Error()), true)
}
}
func (socket *mongoSocket) Login(cred Credential) error {
socket.Lock()
if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
cred.Mechanism = "SCRAM-SHA-1"
}
for _, sockCred := range socket.creds {
if sockCred == cred {
debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
socket.Unlock()
return nil
}
}
if socket.dropLogout(cred) {
debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
}
socket.Unlock()
debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
var err error
switch cred.Mechanism {
case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
err = socket.loginClassic(cred)
case "PLAIN":
err = socket.loginPlain(cred)
case "MONGODB-X509":
err = socket.loginX509(cred)
default:
// Try SASL for everything else, if it is available.
err = socket.loginSASL(cred)
}
if err != nil {
debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
} else {
debugf("Socket %p to %s: login successful", socket, socket.addr)
}
return err
}
func (socket *mongoSocket) loginClassic(cred Credential) error {
// Note that this only works properly because this function is
// synchronous, which means the nonce won't get reset while we're
// using it and any other login requests will block waiting for a
// new nonce provided in the defer call below.
nonce, err := socket.getNonce()
if err != nil {
return err
}
defer socket.resetNonce()
psum := md5.New()
psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
ksum := md5.New()
ksum.Write([]byte(nonce + cred.Username))
ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
key := hex.EncodeToString(ksum.Sum(nil))
cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
type authX509Cmd struct {
Authenticate int
User string
Mechanism string
}
func (socket *mongoSocket) loginX509(cred Credential) error {
cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
func (socket *mongoSocket) loginPlain(cred Credential) error {
cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
func (socket *mongoSocket) loginSASL(cred Credential) error {
var sasl saslStepper
var err error
if cred.Mechanism == "SCRAM-SHA-1" {
// SCRAM is handled without external libraries.
sasl = saslNewScram(cred)
} else if len(cred.ServiceHost) > 0 {
sasl, err = saslNew(cred, cred.ServiceHost)
} else {
sasl, err = saslNew(cred, socket.Server().Addr)
}
if err != nil {
return err
}
defer sasl.Close()
// The goal of this logic is to carry a locked socket until the
// local SASL step confirms the auth is valid; the socket needs to be
// locked so that concurrent action doesn't leave the socket in an
// auth state that doesn't reflect the operations that took place.
// As a simple case, imagine inverting login=>logout to logout=>login.
//
// The logic below works because the lock func isn't called concurrently.
locked := false
lock := func(b bool) {
if locked != b {
locked = b
if b {
socket.Lock()
} else {
socket.Unlock()
}
}
}
lock(true)
defer lock(false)
start := 1
cmd := saslCmd{}
res := saslResult{}
for {
payload, done, err := sasl.Step(res.Payload)
if err != nil {
return err
}
if done && res.Done {
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
break
}
lock(false)
cmd = saslCmd{
Start: start,
Continue: 1 - start,
ConversationId: res.ConversationId,
Mechanism: cred.Mechanism,
Payload: payload,
}
start = 0
err = socket.loginRun(cred.Source, &cmd, &res, func() error {
// See the comment on lock for why this is necessary.
lock(true)
if !res.Ok || res.NotOk {
return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
}
return nil
})
if err != nil {
return err
}
if done && res.Done {
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
break
}
}
return nil
}
func saslNewScram(cred Credential) *saslScram {
credsum := md5.New()
credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
return &saslScram{cred: cred, client: client}
}
type saslScram struct {
cred Credential
client *scram.Client
}
func (s *saslScram) Close() {}
func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
more := s.client.Step(serverData)
return s.client.Out(), !more, s.client.Err()
}
func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
var mutex sync.Mutex
var replyErr error
mutex.Lock()
op := queryOp{}
op.query = query
op.collection = db + ".$cmd"
op.limit = -1
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
defer mutex.Unlock()
if err != nil {
replyErr = err
return
}
err = bson.Unmarshal(docData, result)
if err != nil {
replyErr = err
} else {
// Must handle this within the read loop for the socket, so
// that concurrent login requests are properly ordered.
replyErr = f()
}
}
err := socket.Query(&op)
if err != nil {
return err
}
mutex.Lock() // Wait.
return replyErr
}
func (socket *mongoSocket) Logout(db string) {
socket.Lock()
cred, found := socket.dropAuth(db)
if found {
debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
socket.logout = append(socket.logout, cred)
}
socket.Unlock()
}
func (socket *mongoSocket) LogoutAll() {
socket.Lock()
if l := len(socket.creds); l > 0 {
debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
socket.logout = append(socket.logout, socket.creds...)
socket.creds = socket.creds[0:0]
}
socket.Unlock()
}
func (socket *mongoSocket) flushLogout() (ops []interface{}) {
socket.Lock()
if l := len(socket.logout); l > 0 {
debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
for i := 0; i != l; i++ {
op := queryOp{}
op.query = &logoutCmd{1}
op.collection = socket.logout[i].Source + ".$cmd"
op.limit = -1
ops = append(ops, &op)
}
socket.logout = socket.logout[0:0]
}
socket.Unlock()
return
}
func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
for i, sockCred := range socket.creds {
if sockCred.Source == db {
copy(socket.creds[i:], socket.creds[i+1:])
socket.creds = socket.creds[:len(socket.creds)-1]
return sockCred, true
}
}
return cred, false
}
func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
for i, sockCred := range socket.logout {
if sockCred == cred {
copy(socket.logout[i:], socket.logout[i+1:])
socket.logout = socket.logout[:len(socket.logout)-1]
return true
}
}
return false
}

1181
Godeps/_workspace/src/gopkg.in/mgo.v2/auth_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

25
Godeps/_workspace/src/gopkg.in/mgo.v2/bson/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
BSON library for Go
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

698
Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson.go generated vendored Normal file
View File

@@ -0,0 +1,698 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package bson is an implementation of the BSON specification for Go:
//
// http://bsonspec.org
//
// It was created as part of the mgo MongoDB driver for Go, but is standalone
// and may be used on its own without the driver.
package bson
import (
"crypto/md5"
"crypto/rand"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
// --------------------------------------------------------------------------
// The public API.
// A value implementing the bson.Getter interface will have its GetBSON
// method called when the given value has to be marshalled, and the result
// of this method will be marshaled in place of the actual object.
//
// If GetBSON returns return a non-nil error, the marshalling procedure
// will stop and error out with the provided value.
type Getter interface {
GetBSON() (interface{}, error)
}
// A value implementing the bson.Setter interface will receive the BSON
// value via the SetBSON method during unmarshaling, and the object
// itself will not be changed as usual.
//
// If setting the value works, the method should return nil or alternatively
// bson.SetZero to set the respective field to its zero value (nil for
// pointer types). If SetBSON returns a value of type bson.TypeError, the
// BSON value will be omitted from a map or slice being decoded and the
// unmarshalling will continue. If it returns any other non-nil error, the
// unmarshalling procedure will stop and error out with the provided value.
//
// This interface is generally useful in pointer receivers, since the method
// will want to change the receiver. A type field that implements the Setter
// interface doesn't have to be a pointer, though.
//
// Unlike the usual behavior, unmarshalling onto a value that implements a
// Setter interface will NOT reset the value to its zero state. This allows
// the value to decide by itself how to be unmarshalled.
//
// For example:
//
// type MyString string
//
// func (s *MyString) SetBSON(raw bson.Raw) error {
// return raw.Unmarshal(s)
// }
//
type Setter interface {
SetBSON(raw Raw) error
}
// SetZero may be returned from a SetBSON method to have the value set to
// its respective zero value. When used in pointer values, this will set the
// field to nil rather than to the pre-allocated value.
var SetZero = errors.New("set to zero")
// M is a convenient alias for a map[string]interface{} map, useful for
// dealing with BSON in a native way. For instance:
//
// bson.M{"a": 1, "b": true}
//
// There's no special handling for this type in addition to what's done anyway
// for an equivalent map type. Elements in the map will be dumped in an
// undefined ordered. See also the bson.D type for an ordered alternative.
type M map[string]interface{}
// D represents a BSON document containing ordered elements. For example:
//
// bson.D{{"a", 1}, {"b", true}}
//
// In some situations, such as when creating indexes for MongoDB, the order in
// which the elements are defined is important. If the order is not important,
// using a map is generally more comfortable. See bson.M and bson.RawD.
type D []DocElem
// DocElem is an element of the bson.D document representation.
type DocElem struct {
Name string
Value interface{}
}
// Map returns a map out of the ordered element name/value pairs in d.
func (d D) Map() (m M) {
m = make(M, len(d))
for _, item := range d {
m[item.Name] = item.Value
}
return m
}
// The Raw type represents raw unprocessed BSON documents and elements.
// Kind is the kind of element as defined per the BSON specification, and
// Data is the raw unprocessed data for the respective element.
// Using this type it is possible to unmarshal or marshal values partially.
//
// Relevant documentation:
//
// http://bsonspec.org/#/specification
//
type Raw struct {
Kind byte
Data []byte
}
// RawD represents a BSON document containing raw unprocessed elements.
// This low-level representation may be useful when lazily processing
// documents of uncertain content, or when manipulating the raw content
// documents in general.
type RawD []RawDocElem
// See the RawD type.
type RawDocElem struct {
Name string
Value Raw
}
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
// long. MongoDB objects by default have such a property set in their "_id"
// property.
//
// http://www.mongodb.org/display/DOCS/Object+IDs
type ObjectId string
// ObjectIdHex returns an ObjectId from the provided hex representation.
// Calling this function with an invalid hex representation will
// cause a runtime panic. See the IsObjectIdHex function.
func ObjectIdHex(s string) ObjectId {
d, err := hex.DecodeString(s)
if err != nil || len(d) != 12 {
panic(fmt.Sprintf("Invalid input to ObjectIdHex: %q", s))
}
return ObjectId(d)
}
// IsObjectIdHex returns whether s is a valid hex representation of
// an ObjectId. See the ObjectIdHex function.
func IsObjectIdHex(s string) bool {
if len(s) != 24 {
return false
}
_, err := hex.DecodeString(s)
return err == nil
}
// objectIdCounter is atomically incremented when generating a new ObjectId
// using NewObjectId() function. It's used as a counter part of an id.
var objectIdCounter uint32 = 0
// machineId stores machine id generated once and used in subsequent calls
// to NewObjectId function.
var machineId = readMachineId()
// readMachineId generates machine id and puts it into the machineId global
// variable. If this function fails to get the hostname, it will cause
// a runtime error.
func readMachineId() []byte {
var sum [3]byte
id := sum[:]
hostname, err1 := os.Hostname()
if err1 != nil {
_, err2 := io.ReadFull(rand.Reader, id)
if err2 != nil {
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
}
return id
}
hw := md5.New()
hw.Write([]byte(hostname))
copy(id, hw.Sum(nil))
return id
}
// NewObjectId returns a new unique ObjectId.
func NewObjectId() ObjectId {
var b [12]byte
// Timestamp, 4 bytes, big endian
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
// Machine, first 3 bytes of md5(hostname)
b[4] = machineId[0]
b[5] = machineId[1]
b[6] = machineId[2]
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
pid := os.Getpid()
b[7] = byte(pid >> 8)
b[8] = byte(pid)
// Increment, 3 bytes, big endian
i := atomic.AddUint32(&objectIdCounter, 1)
b[9] = byte(i >> 16)
b[10] = byte(i >> 8)
b[11] = byte(i)
return ObjectId(b[:])
}
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
// with the provided number of seconds from epoch UTC, and all other parts
// filled with zeroes. It's not safe to insert a document with an id generated
// by this method, it is useful only for queries to find documents with ids
// generated before or after the specified timestamp.
func NewObjectIdWithTime(t time.Time) ObjectId {
var b [12]byte
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
return ObjectId(string(b[:]))
}
// String returns a hex string representation of the id.
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
func (id ObjectId) String() string {
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
}
// Hex returns a hex representation of the ObjectId.
func (id ObjectId) Hex() string {
return hex.EncodeToString([]byte(id))
}
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
func (id ObjectId) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
}
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
func (id *ObjectId) UnmarshalJSON(data []byte) error {
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s", string(data)))
}
var buf [12]byte
_, err := hex.Decode(buf[:], data[1:25])
if err != nil {
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s (%s)", string(data), err))
}
*id = ObjectId(string(buf[:]))
return nil
}
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
func (id ObjectId) Valid() bool {
return len(id) == 12
}
// byteSlice returns byte slice of id from start to end.
// Calling this function with an invalid id will cause a runtime panic.
func (id ObjectId) byteSlice(start, end int) []byte {
if len(id) != 12 {
panic(fmt.Sprintf("Invalid ObjectId: %q", string(id)))
}
return []byte(string(id)[start:end])
}
// Time returns the timestamp part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Time() time.Time {
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
return time.Unix(secs, 0)
}
// Machine returns the 3-byte machine id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Machine() []byte {
return id.byteSlice(4, 7)
}
// Pid returns the process id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Pid() uint16 {
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
}
// Counter returns the incrementing value part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Counter() int32 {
b := id.byteSlice(9, 12)
// Counter is stored as big-endian 3-byte value
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
}
// The Symbol type is similar to a string and is used in languages with a
// distinct symbol type.
type Symbol string
// Now returns the current time with millisecond precision. MongoDB stores
// timestamps with the same precision, so a Time returned from this method
// will not change after a roundtrip to the database. That's the only reason
// why this function exists. Using the time.Now function also works fine
// otherwise.
func Now() time.Time {
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
}
// MongoTimestamp is a special internal type used by MongoDB that for some
// strange reason has its own datatype defined in BSON.
type MongoTimestamp int64
type orderKey int64
// MaxKey is a special value that compares higher than all other possible BSON
// values in a MongoDB database.
var MaxKey = orderKey(1<<63 - 1)
// MinKey is a special value that compares lower than all other possible BSON
// values in a MongoDB database.
var MinKey = orderKey(-1 << 63)
type undefined struct{}
// Undefined represents the undefined BSON value.
var Undefined undefined
// Binary is a representation for non-standard binary values. Any kind should
// work, but the following are known as of this writing:
//
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
// 0x01 - Function (!?)
// 0x02 - Obsolete generic.
// 0x03 - UUID
// 0x05 - MD5
// 0x80 - User defined.
//
type Binary struct {
Kind byte
Data []byte
}
// RegEx represents a regular expression. The Options field may contain
// individual characters defining the way in which the pattern should be
// applied, and must be sorted. Valid options as of this writing are 'i' for
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
// unicode. The value of the Options parameter is not verified before being
// marshaled into the BSON format.
type RegEx struct {
Pattern string
Options string
}
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
// will be marshaled as a mapping from identifiers to values that may be
// used when evaluating the provided Code.
type JavaScript struct {
Code string
Scope interface{}
}
// DBPointer refers to a document id in a namespace.
//
// This type is deprecated in the BSON specification and should not be used
// except for backwards compatibility with ancient applications.
type DBPointer struct {
Namespace string
Id ObjectId
}
const initialBufferSize = 64
func handleErr(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
} else if _, ok := r.(externalPanic); ok {
panic(r)
} else if s, ok := r.(string); ok {
*err = errors.New(s)
} else if e, ok := r.(error); ok {
*err = e
} else {
panic(r)
}
}
}
// Marshal serializes the in value, which may be a map or a struct value.
// In the case of struct values, only exported fields will be serialized.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
//
// minsize Marshal an int64 value as an int32, if that's feasible
// while preserving the numeric value.
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the bson keys of other struct fields.
//
// Some examples:
//
// type T struct {
// A bool
// B int "myb"
// C string "myc,omitempty"
// D string `bson:",omitempty" json:"jsonkey"`
// E int64 ",minsize"
// F int64 "myf,omitempty,minsize"
// }
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := &encoder{make([]byte, 0, initialBufferSize)}
e.addDoc(reflect.ValueOf(in))
return e.out, nil
}
// Unmarshal deserializes data from in into the out value. The out value
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported during unmarshal (see the
// Marshal method for other flags):
//
// inline Inline the field, which must be a struct or a map.
// Inlined structs are handled as if its fields were part
// of the outer struct. An inlined map causes keys that do
// not match any other struct field to be inserted in the
// map rather than being discarded as usual.
//
// The target field or element types of out may not necessarily match
// the BSON values of the provided data. The following conversions are
// made automatically:
//
// - Numeric types are converted if at least the integer part of the
// value would be preserved correctly
// - Bools are converted to numeric types as 1 or 0
// - Numeric types are converted to bools as true if not 0 or false otherwise
// - Binary and string BSON data is converted to a string, array or byte slice
//
// If the value would not fit the type and cannot be converted, it's
// silently skipped.
//
// Pointer values are initialized when necessary.
func Unmarshal(in []byte, out interface{}) (err error) {
if raw, ok := out.(*Raw); ok {
raw.Kind = 3
raw.Data = in
return nil
}
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
fallthrough
case reflect.Map:
d := newDecoder(in)
d.readDocTo(v)
case reflect.Struct:
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Unmarshal needs a map or a pointer to a struct.")
}
return nil
}
// Unmarshal deserializes raw into the out value. If the out value type
// is not compatible with raw, a *bson.TypeError is returned.
//
// See the Unmarshal function documentation for more details on the
// unmarshalling process.
func (raw Raw) Unmarshal(out interface{}) (err error) {
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
fallthrough
case reflect.Map:
d := newDecoder(raw.Data)
good := d.readElemTo(v, raw.Kind)
if !good {
return &TypeError{v.Type(), raw.Kind}
}
case reflect.Struct:
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
}
return nil
}
type TypeError struct {
Type reflect.Type
Kind byte
}
func (e *TypeError) Error() string {
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
InlineMap int
Zero reflect.Value
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
MinSize bool
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var structMapMutex sync.RWMutex
type externalPanic string
func (e externalPanic) String() string {
return string(e)
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
structMapMutex.RLock()
sinfo, found := structMap[st]
structMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("bson")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
// XXX Drop this after a few releases.
if s := strings.Index(tag, "/"); s >= 0 {
recommend := tag[:s]
for _, c := range tag[s+1:] {
switch c {
case 'c':
recommend += ",omitempty"
case 's':
recommend += ",minsize"
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", string([]byte{uint8(c)}), tag, st)
panic(externalPanic(msg))
}
}
msg := fmt.Sprintf("Replace tag %q in field %s of type %s by %q", tag, field.Name, st, recommend)
panic(externalPanic(msg))
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "minsize":
info.MinSize = true
case "inline":
inline = true
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
panic(externalPanic(msg))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
panic("Option ,inline needs a struct value or map field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
fieldsMap,
fieldsList,
inlineMap,
reflect.New(st).Elem(),
}
structMapMutex.Lock()
structMap[st] = sinfo
structMapMutex.Unlock()
return sinfo, nil
}

1543
Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

820
Godeps/_workspace/src/gopkg.in/mgo.v2/bson/decode.go generated vendored Normal file
View File

@@ -0,0 +1,820 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"sync"
"time"
)
type decoder struct {
in []byte
i int
docType reflect.Type
}
var typeM = reflect.TypeOf(M{})
func newDecoder(in []byte) *decoder {
return &decoder{in, 0, typeM}
}
// --------------------------------------------------------------------------
// Some helper functions.
func corrupted() {
panic("Document is corrupted")
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
// --------------------------------------------------------------------------
// Unmarshaling of documents.
const (
setterUnknown = iota
setterNone
setterType
setterAddr
)
var setterStyles map[reflect.Type]int
var setterIface reflect.Type
var setterMutex sync.RWMutex
func init() {
var iface Setter
setterIface = reflect.TypeOf(&iface).Elem()
setterStyles = make(map[reflect.Type]int)
}
func setterStyle(outt reflect.Type) int {
setterMutex.RLock()
style := setterStyles[outt]
setterMutex.RUnlock()
if style == setterUnknown {
setterMutex.Lock()
defer setterMutex.Unlock()
if outt.Implements(setterIface) {
setterStyles[outt] = setterType
} else if reflect.PtrTo(outt).Implements(setterIface) {
setterStyles[outt] = setterAddr
} else {
setterStyles[outt] = setterNone
}
style = setterStyles[outt]
}
return style
}
func getSetter(outt reflect.Type, out reflect.Value) Setter {
style := setterStyle(outt)
if style == setterNone {
return nil
}
if style == setterAddr {
if !out.CanAddr() {
return nil
}
out = out.Addr()
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
return out.Interface().(Setter)
}
func clearMap(m reflect.Value) {
var none reflect.Value
for _, k := range m.MapKeys() {
m.SetMapIndex(k, none)
}
}
func (d *decoder) readDocTo(out reflect.Value) {
var elemType reflect.Type
outt := out.Type()
outk := outt.Kind()
for {
if outk == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
if setter := getSetter(outt, out); setter != nil {
var raw Raw
d.readDocTo(reflect.ValueOf(&raw))
err := setter.SetBSON(raw)
if _, ok := err.(*TypeError); err != nil && !ok {
panic(err)
}
return
}
if outk == reflect.Ptr {
out = out.Elem()
outt = out.Type()
outk = out.Kind()
continue
}
break
}
var fieldsMap map[string]fieldInfo
var inlineMap reflect.Value
start := d.i
origout := out
if outk == reflect.Interface {
if d.docType.Kind() == reflect.Map {
mv := reflect.MakeMap(d.docType)
out.Set(mv)
out = mv
} else {
dv := reflect.New(d.docType).Elem()
out.Set(dv)
out = dv
}
outt = out.Type()
outk = outt.Kind()
}
docType := d.docType
keyType := typeString
convertKey := false
switch outk {
case reflect.Map:
keyType = outt.Key()
if keyType.Kind() != reflect.String {
panic("BSON map must have string keys. Got: " + outt.String())
}
if keyType != typeString {
convertKey = true
}
elemType = outt.Elem()
if elemType == typeIface {
d.docType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(out.Type()))
} else if out.Len() > 0 {
clearMap(out)
}
case reflect.Struct:
if outt != typeRaw {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
fieldsMap = sinfo.FieldsMap
out.Set(sinfo.Zero)
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
clearMap(inlineMap)
}
elemType = inlineMap.Type().Elem()
if elemType == typeIface {
d.docType = inlineMap.Type()
}
}
}
case reflect.Slice:
switch outt.Elem() {
case typeDocElem:
origout.Set(d.readDocElems(outt))
return
case typeRawDocElem:
origout.Set(d.readRawDocElems(outt))
return
}
fallthrough
default:
panic("Unsupported document type for unmarshalling: " + out.Type().String())
}
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
switch outk {
case reflect.Map:
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
k := reflect.ValueOf(name)
if convertKey {
k = k.Convert(keyType)
}
out.SetMapIndex(k, e)
}
case reflect.Struct:
if outt == typeRaw {
d.dropElem(kind)
} else {
if info, ok := fieldsMap[name]; ok {
if info.Inline == nil {
d.readElemTo(out.Field(info.Num), kind)
} else {
d.readElemTo(out.FieldByIndex(info.Inline), kind)
}
} else if inlineMap.IsValid() {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
}
} else {
d.dropElem(kind)
}
}
case reflect.Slice:
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
d.docType = docType
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
}
}
func (d *decoder) readArrayDocTo(out reflect.Value) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
i := 0
l := out.Len()
for d.in[d.i] != '\x00' {
if i >= l {
panic("Length mismatch on array field")
}
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
d.readElemTo(out.Index(i), kind)
if d.i >= end {
corrupted()
}
i++
}
if i != l {
panic("Length mismatch on array field")
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
tmp := make([]reflect.Value, 0, 8)
elemType := t.Elem()
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
tmp = append(tmp, e)
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
n := len(tmp)
slice := reflect.MakeSlice(t, n, n)
for i := 0; i != n; i++ {
slice.Index(i).Set(tmp[i])
}
return slice.Interface()
}
var typeSlice = reflect.TypeOf([]interface{}{})
var typeIface = typeSlice.Elem()
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]DocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := DocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]RawDocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := RawDocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readDocWith(f func(kind byte, name string)) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
f(kind, name)
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
// --------------------------------------------------------------------------
// Unmarshaling of individual elements within a document.
var blackHole = settableValueOf(struct{}{})
func (d *decoder) dropElem(kind byte) {
d.readElemTo(blackHole, kind)
}
// Attempt to decode an element from the document and put it into out.
// If the types are not compatible, the returned ok value will be
// false and out will be unchanged.
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
start := d.i
if kind == '\x03' {
// Delegate unmarshaling of documents.
outt := out.Type()
outk := out.Kind()
switch outk {
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
d.readDocTo(out)
return true
}
if setterStyle(outt) != setterNone {
d.readDocTo(out)
return true
}
if outk == reflect.Slice {
switch outt.Elem() {
case typeDocElem:
out.Set(d.readDocElems(outt))
case typeRawDocElem:
out.Set(d.readRawDocElems(outt))
}
return true
}
d.readDocTo(blackHole)
return true
}
var in interface{}
switch kind {
case 0x01: // Float64
in = d.readFloat64()
case 0x02: // UTF-8 string
in = d.readStr()
case 0x03: // Document
panic("Can't happen. Handled above.")
case 0x04: // Array
outt := out.Type()
for outt.Kind() == reflect.Ptr {
outt = outt.Elem()
}
switch outt.Kind() {
case reflect.Array:
d.readArrayDocTo(out)
return true
case reflect.Slice:
in = d.readSliceDoc(outt)
default:
in = d.readSliceDoc(typeSlice)
}
case 0x05: // Binary
b := d.readBinary()
if b.Kind == 0x00 || b.Kind == 0x02 {
in = b.Data
} else {
in = b
}
case 0x06: // Undefined (obsolete, but still seen in the wild)
in = Undefined
case 0x07: // ObjectId
in = ObjectId(d.readBytes(12))
case 0x08: // Bool
in = d.readBool()
case 0x09: // Timestamp
// MongoDB handles timestamps as milliseconds.
i := d.readInt64()
if i == -62135596800000 {
in = time.Time{} // In UTC for convenience.
} else {
in = time.Unix(i/1e3, i%1e3*1e6)
}
case 0x0A: // Nil
in = nil
case 0x0B: // RegEx
in = d.readRegEx()
case 0x0C:
in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
case 0x0D: // JavaScript without scope
in = JavaScript{Code: d.readStr()}
case 0x0E: // Symbol
in = Symbol(d.readStr())
case 0x0F: // JavaScript with scope
d.i += 4 // Skip length
js := JavaScript{d.readStr(), make(M)}
d.readDocTo(reflect.ValueOf(js.Scope))
in = js
case 0x10: // Int32
in = int(d.readInt32())
case 0x11: // Mongo-specific timestamp
in = MongoTimestamp(d.readInt64())
case 0x12: // Int64
in = d.readInt64()
case 0x7F: // Max key
in = MaxKey
case 0xFF: // Min key
in = MinKey
default:
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
}
outt := out.Type()
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
return true
}
if setter := getSetter(outt, out); setter != nil {
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
if err == SetZero {
out.Set(reflect.Zero(outt))
return true
}
if err == nil {
return true
}
if _, ok := err.(*TypeError); !ok {
panic(err)
}
return false
}
if in == nil {
out.Set(reflect.Zero(outt))
return true
}
outk := outt.Kind()
// Dereference and initialize pointer if necessary.
first := true
for outk == reflect.Ptr {
if !out.IsNil() {
out = out.Elem()
} else {
elem := reflect.New(outt.Elem())
if first {
// Only set if value is compatible.
first = false
defer func(out, elem reflect.Value) {
if good {
out.Set(elem)
}
}(out, elem)
} else {
out.Set(elem)
}
out = elem
}
outt = out.Type()
outk = outt.Kind()
}
inv := reflect.ValueOf(in)
if outt == inv.Type() {
out.Set(inv)
return true
}
switch outk {
case reflect.Interface:
out.Set(inv)
return true
case reflect.String:
switch inv.Kind() {
case reflect.String:
out.SetString(inv.String())
return true
case reflect.Slice:
if b, ok := in.([]byte); ok {
out.SetString(string(b))
return true
}
case reflect.Int, reflect.Int64:
if outt == typeJSONNumber {
out.SetString(strconv.FormatInt(inv.Int(), 10))
return true
}
case reflect.Float64:
if outt == typeJSONNumber {
out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
return true
}
}
case reflect.Slice, reflect.Array:
// Remember, array (0x04) slices are built with the correct
// element type. If we are here, must be a cross BSON kind
// conversion (e.g. 0x05 unmarshalling on string).
if outt.Elem().Kind() != reflect.Uint8 {
break
}
switch inv.Kind() {
case reflect.String:
slice := []byte(inv.String())
out.Set(reflect.ValueOf(slice))
return true
case reflect.Slice:
switch outt.Kind() {
case reflect.Array:
reflect.Copy(out, inv)
case reflect.Slice:
out.SetBytes(inv.Bytes())
}
return true
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetInt(inv.Int())
return true
case reflect.Float32, reflect.Float64:
out.SetInt(int64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetInt(1)
} else {
out.SetInt(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("can't happen: no uint types in BSON (!?)")
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetUint(uint64(inv.Int()))
return true
case reflect.Float32, reflect.Float64:
out.SetUint(uint64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetUint(1)
} else {
out.SetUint(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON.")
}
case reflect.Float32, reflect.Float64:
switch inv.Kind() {
case reflect.Float32, reflect.Float64:
out.SetFloat(inv.Float())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetFloat(float64(inv.Int()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetFloat(1)
} else {
out.SetFloat(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Bool:
switch inv.Kind() {
case reflect.Bool:
out.SetBool(inv.Bool())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetBool(inv.Int() != 0)
return true
case reflect.Float32, reflect.Float64:
out.SetBool(inv.Float() != 0)
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Struct:
if outt == typeURL && inv.Kind() == reflect.String {
u, err := url.Parse(inv.String())
if err != nil {
panic(err)
}
out.Set(reflect.ValueOf(u).Elem())
return true
}
}
return false
}
// --------------------------------------------------------------------------
// Parsers of basic types.
func (d *decoder) readRegEx() RegEx {
re := RegEx{}
re.Pattern = d.readCStr()
re.Options = d.readCStr()
return re
}
func (d *decoder) readBinary() Binary {
l := d.readInt32()
b := Binary{}
b.Kind = d.readByte()
b.Data = d.readBytes(l)
if b.Kind == 0x02 && len(b.Data) >= 4 {
// Weird obsolete format with redundant length.
b.Data = b.Data[4:]
}
return b
}
func (d *decoder) readStr() string {
l := d.readInt32()
b := d.readBytes(l - 1)
if d.readByte() != '\x00' {
corrupted()
}
return string(b)
}
func (d *decoder) readCStr() string {
start := d.i
end := start
l := len(d.in)
for ; end != l; end++ {
if d.in[end] == '\x00' {
break
}
}
d.i = end + 1
if d.i > l {
corrupted()
}
return string(d.in[start:end])
}
func (d *decoder) readBool() bool {
if d.readByte() == 1 {
return true
}
return false
}
func (d *decoder) readFloat64() float64 {
return math.Float64frombits(uint64(d.readInt64()))
}
func (d *decoder) readInt32() int32 {
b := d.readBytes(4)
return int32((uint32(b[0]) << 0) |
(uint32(b[1]) << 8) |
(uint32(b[2]) << 16) |
(uint32(b[3]) << 24))
}
func (d *decoder) readInt64() int64 {
b := d.readBytes(8)
return int64((uint64(b[0]) << 0) |
(uint64(b[1]) << 8) |
(uint64(b[2]) << 16) |
(uint64(b[3]) << 24) |
(uint64(b[4]) << 32) |
(uint64(b[5]) << 40) |
(uint64(b[6]) << 48) |
(uint64(b[7]) << 56))
}
func (d *decoder) readByte() byte {
i := d.i
d.i++
if d.i > len(d.in) {
corrupted()
}
return d.in[i]
}
func (d *decoder) readBytes(length int32) []byte {
start := d.i
d.i += int(length)
if d.i > len(d.in) {
corrupted()
}
return d.in[start : start+int(length)]
}

489
Godeps/_workspace/src/gopkg.in/mgo.v2/bson/encode.go generated vendored Normal file
View File

@@ -0,0 +1,489 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"encoding/json"
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"time"
)
// --------------------------------------------------------------------------
// Some internal infrastructure.
var (
typeBinary = reflect.TypeOf(Binary{})
typeObjectId = reflect.TypeOf(ObjectId(""))
typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
typeSymbol = reflect.TypeOf(Symbol(""))
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
typeOrderKey = reflect.TypeOf(MinKey)
typeDocElem = reflect.TypeOf(DocElem{})
typeRawDocElem = reflect.TypeOf(RawDocElem{})
typeRaw = reflect.TypeOf(Raw{})
typeURL = reflect.TypeOf(url.URL{})
typeTime = reflect.TypeOf(time.Time{})
typeString = reflect.TypeOf("")
typeJSONNumber = reflect.TypeOf(json.Number(""))
)
const itoaCacheSize = 32
var itoaCache []string
func init() {
itoaCache = make([]string, itoaCacheSize)
for i := 0; i != itoaCacheSize; i++ {
itoaCache[i] = strconv.Itoa(i)
}
}
func itoa(i int) string {
if i < itoaCacheSize {
return itoaCache[i]
}
return strconv.Itoa(i)
}
// --------------------------------------------------------------------------
// Marshaling of the document value itself.
type encoder struct {
out []byte
}
func (e *encoder) addDoc(v reflect.Value) {
for {
if vi, ok := v.Interface().(Getter); ok {
getv, err := vi.GetBSON()
if err != nil {
panic(err)
}
v = reflect.ValueOf(getv)
continue
}
if v.Kind() == reflect.Ptr {
v = v.Elem()
continue
}
break
}
if v.Type() == typeRaw {
raw := v.Interface().(Raw)
if raw.Kind != 0x03 && raw.Kind != 0x00 {
panic("Attempted to unmarshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
}
e.addBytes(raw.Data...)
return
}
start := e.reserveInt32()
switch v.Kind() {
case reflect.Map:
e.addMap(v)
case reflect.Struct:
e.addStruct(v)
case reflect.Array, reflect.Slice:
e.addSlice(v)
default:
panic("Can't marshal " + v.Type().String() + " as a BSON document")
}
e.addBytes(0)
e.setInt32(start, int32(len(e.out)-start))
}
func (e *encoder) addMap(v reflect.Value) {
for _, k := range v.MapKeys() {
e.addElem(k.String(), v.MapIndex(k), false)
}
}
func (e *encoder) addStruct(v reflect.Value) {
sinfo, err := getStructInfo(v.Type())
if err != nil {
panic(err)
}
var value reflect.Value
if sinfo.InlineMap >= 0 {
m := v.Field(sinfo.InlineMap)
if m.Len() > 0 {
for _, k := range m.MapKeys() {
ks := k.String()
if _, found := sinfo.FieldsMap[ks]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
}
e.addElem(ks, m.MapIndex(k), false)
}
}
}
for _, info := range sinfo.FieldsList {
if info.Inline == nil {
value = v.Field(info.Num)
} else {
value = v.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.addElem(info.Key, value, info.MinSize)
}
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Ptr, reflect.Interface:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
if v.Type() == typeTime {
return v.Interface().(time.Time).IsZero()
}
for i := v.NumField()-1; i >= 0; i-- {
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}
func (e *encoder) addSlice(v reflect.Value) {
vi := v.Interface()
if d, ok := vi.(D); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if d, ok := vi.(RawD); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
l := v.Len()
et := v.Type().Elem()
if et == typeDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(DocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if et == typeRawDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(RawDocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
for i := 0; i < l; i++ {
e.addElem(itoa(i), v.Index(i), false)
}
}
// --------------------------------------------------------------------------
// Marshaling of elements in a document.
func (e *encoder) addElemName(kind byte, name string) {
e.addBytes(kind)
e.addBytes([]byte(name)...)
e.addBytes(0)
}
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
if !v.IsValid() {
e.addElemName('\x0A', name)
return
}
if getter, ok := v.Interface().(Getter); ok {
getv, err := getter.GetBSON()
if err != nil {
panic(err)
}
e.addElem(name, reflect.ValueOf(getv), minSize)
return
}
switch v.Kind() {
case reflect.Interface:
e.addElem(name, v.Elem(), minSize)
case reflect.Ptr:
e.addElem(name, v.Elem(), minSize)
case reflect.String:
s := v.String()
switch v.Type() {
case typeObjectId:
if len(s) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s)) + ")")
}
e.addElemName('\x07', name)
e.addBytes([]byte(s)...)
case typeSymbol:
e.addElemName('\x0E', name)
e.addStr(s)
case typeJSONNumber:
n := v.Interface().(json.Number)
if i, err := n.Int64(); err == nil {
e.addElemName('\x12', name)
e.addInt64(i)
} else if f, err := n.Float64(); err == nil {
e.addElemName('\x01', name)
e.addFloat64(f)
} else {
panic("failed to convert json.Number to a number: " + s)
}
default:
e.addElemName('\x02', name)
e.addStr(s)
}
case reflect.Float32, reflect.Float64:
e.addElemName('\x01', name)
e.addFloat64(v.Float())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
u := v.Uint()
if int64(u) < 0 {
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
e.addElemName('\x10', name)
e.addInt32(int32(u))
} else {
e.addElemName('\x12', name)
e.addInt64(int64(u))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type() {
case typeMongoTimestamp:
e.addElemName('\x11', name)
e.addInt64(v.Int())
case typeOrderKey:
if v.Int() == int64(MaxKey) {
e.addElemName('\x7F', name)
} else {
e.addElemName('\xFF', name)
}
default:
i := v.Int()
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
// It fits into an int32, encode as such.
e.addElemName('\x10', name)
e.addInt32(int32(i))
} else {
e.addElemName('\x12', name)
e.addInt64(i)
}
}
case reflect.Bool:
e.addElemName('\x08', name)
if v.Bool() {
e.addBytes(1)
} else {
e.addBytes(0)
}
case reflect.Map:
e.addElemName('\x03', name)
e.addDoc(v)
case reflect.Slice:
vt := v.Type()
et := vt.Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
e.addBinary('\x00', v.Bytes())
} else if et == typeDocElem || et == typeRawDocElem {
e.addElemName('\x03', name)
e.addDoc(v)
} else {
e.addElemName('\x04', name)
e.addDoc(v)
}
case reflect.Array:
et := v.Type().Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte))
} else {
e.addElemName('\x04', name)
e.addDoc(v)
}
case reflect.Struct:
switch s := v.Interface().(type) {
case Raw:
kind := s.Kind
if kind == 0x00 {
kind = 0x03
}
e.addElemName(kind, name)
e.addBytes(s.Data...)
case Binary:
e.addElemName('\x05', name)
e.addBinary(s.Kind, s.Data)
case DBPointer:
e.addElemName('\x0C', name)
e.addStr(s.Namespace)
if len(s.Id) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s.Id)) + ")")
}
e.addBytes([]byte(s.Id)...)
case RegEx:
e.addElemName('\x0B', name)
e.addCStr(s.Pattern)
e.addCStr(s.Options)
case JavaScript:
if s.Scope == nil {
e.addElemName('\x0D', name)
e.addStr(s.Code)
} else {
e.addElemName('\x0F', name)
start := e.reserveInt32()
e.addStr(s.Code)
e.addDoc(reflect.ValueOf(s.Scope))
e.setInt32(start, int32(len(e.out)-start))
}
case time.Time:
// MongoDB handles timestamps as milliseconds.
e.addElemName('\x09', name)
e.addInt64(s.Unix() * 1000 + int64(s.Nanosecond() / 1e6))
case url.URL:
e.addElemName('\x02', name)
e.addStr(s.String())
case undefined:
e.addElemName('\x06', name)
default:
e.addElemName('\x03', name)
e.addDoc(v)
}
default:
panic("Can't marshal " + v.Type().String() + " in a BSON document")
}
}
// --------------------------------------------------------------------------
// Marshaling of base types.
func (e *encoder) addBinary(subtype byte, v []byte) {
if subtype == 0x02 {
// Wonder how that brilliant idea came to life. Obsolete, luckily.
e.addInt32(int32(len(v) + 4))
e.addBytes(subtype)
e.addInt32(int32(len(v)))
} else {
e.addInt32(int32(len(v)))
e.addBytes(subtype)
}
e.addBytes(v...)
}
func (e *encoder) addStr(v string) {
e.addInt32(int32(len(v) + 1))
e.addCStr(v)
}
func (e *encoder) addCStr(v string) {
e.addBytes([]byte(v)...)
e.addBytes(0)
}
func (e *encoder) reserveInt32() (pos int) {
pos = len(e.out)
e.addBytes(0, 0, 0, 0)
return pos
}
func (e *encoder) setInt32(pos int, v int32) {
e.out[pos+0] = byte(v)
e.out[pos+1] = byte(v >> 8)
e.out[pos+2] = byte(v >> 16)
e.out[pos+3] = byte(v >> 24)
}
func (e *encoder) addInt32(v int32) {
u := uint32(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
}
func (e *encoder) addInt64(v int64) {
u := uint64(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
}
func (e *encoder) addFloat64(v float64) {
e.addInt64(int64(math.Float64bits(v)))
}
func (e *encoder) addBytes(v ...byte) {
e.out = append(e.out, v...)
}

71
Godeps/_workspace/src/gopkg.in/mgo.v2/bulk.go generated vendored Normal file
View File

@@ -0,0 +1,71 @@
package mgo
// Bulk represents an operation that can be prepared with several
// orthogonal changes before being delivered to the server.
//
// WARNING: This API is still experimental.
//
// Relevant documentation:
//
// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
//
type Bulk struct {
c *Collection
ordered bool
inserts []interface{}
}
// BulkError holds an error returned from running a Bulk operation.
//
// TODO: This is private for the moment, until we understand exactly how
// to report these multi-errors in a useful and convenient way.
type bulkError struct {
err error
}
// BulkResult holds the results for a bulk operation.
type BulkResult struct {
// Be conservative while we understand exactly how to report these
// results in a useful and convenient way, and also how to emulate
// them with prior servers.
private bool
}
func (e *bulkError) Error() string {
return e.err.Error()
}
// Bulk returns a value to prepare the execution of a bulk operation.
//
// WARNING: This API is still experimental.
//
func (c *Collection) Bulk() *Bulk {
return &Bulk{c: c, ordered: true}
}
// Unordered puts the bulk operation in unordered mode.
//
// In unordered mode the indvidual operations may be sent
// out of order, which means latter operations may proceed
// even if prior ones have failed.
func (b *Bulk) Unordered() {
b.ordered = false
}
// Insert queues up the provided documents for insertion.
func (b *Bulk) Insert(docs ...interface{}) {
b.inserts = append(b.inserts, docs...)
}
// Run runs all the operations queued up.
func (b *Bulk) Run() (*BulkResult, error) {
op := &insertOp{b.c.FullName, b.inserts, 0}
if !b.ordered {
op.flags = 1 // ContinueOnError
}
_, err := b.c.writeQuery(op)
if err != nil {
return nil, &bulkError{err}
}
return &BulkResult{}, nil
}

93
Godeps/_workspace/src/gopkg.in/mgo.v2/bulk_test.go generated vendored Normal file
View File

@@ -0,0 +1,93 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo_test
import (
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2"
)
func (s *S) TestBulkInsert(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Insert(M{"n": 1})
bulk.Insert(M{"n": 2}, M{"n": 3})
r, err := bulk.Run()
c.Assert(err, IsNil)
c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
type doc struct{ N int }
var res []doc
err = coll.Find(nil).Sort("n").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
}
func (s *S) TestBulkInsertError(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"n": 3})
_, err = bulk.Run()
c.Assert(err, ErrorMatches, ".*duplicate key.*")
type doc struct {
N int `_id`
}
var res []doc
err = coll.Find(nil).Sort("_id").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {2}})
}
func (s *S) TestBulkInsertErrorUnordered(c *C) {
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
defer session.Close()
coll := session.DB("mydb").C("mycoll")
bulk := coll.Bulk()
bulk.Unordered()
bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
_, err = bulk.Run()
c.Assert(err, ErrorMatches, ".*duplicate key.*")
type doc struct {
N int `_id`
}
var res []doc
err = coll.Find(nil).Sort("_id").All(&res)
c.Assert(err, IsNil)
c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
}

621
Godeps/_workspace/src/gopkg.in/mgo.v2/cluster.go generated vendored Normal file
View File

@@ -0,0 +1,621 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"net"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
// ---------------------------------------------------------------------------
// Mongo cluster encapsulation.
//
// A cluster enables the communication with one or more servers participating
// in a mongo cluster. This works with individual servers, a replica set,
// a replica pair, one or multiple mongos routers, etc.
type mongoCluster struct {
sync.RWMutex
serverSynced sync.Cond
userSeeds []string
dynaSeeds []string
servers mongoServers
masters mongoServers
references int
syncing bool
direct bool
failFast bool
syncCount uint
cachedIndex map[string]bool
sync chan bool
dial dialer
}
func newCluster(userSeeds []string, direct, failFast bool, dial dialer) *mongoCluster {
cluster := &mongoCluster{
userSeeds: userSeeds,
references: 1,
direct: direct,
failFast: failFast,
dial: dial,
}
cluster.serverSynced.L = cluster.RWMutex.RLocker()
cluster.sync = make(chan bool, 1)
stats.cluster(+1)
go cluster.syncServersLoop()
return cluster
}
// Acquire increases the reference count for the cluster.
func (cluster *mongoCluster) Acquire() {
cluster.Lock()
cluster.references++
debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
cluster.Unlock()
}
// Release decreases the reference count for the cluster. Once
// it reaches zero, all servers will be closed.
func (cluster *mongoCluster) Release() {
cluster.Lock()
if cluster.references == 0 {
panic("cluster.Release() with references == 0")
}
cluster.references--
debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
if cluster.references == 0 {
for _, server := range cluster.servers.Slice() {
server.Close()
}
// Wake up the sync loop so it can die.
cluster.syncServers()
stats.cluster(-1)
}
cluster.Unlock()
}
func (cluster *mongoCluster) LiveServers() (servers []string) {
cluster.RLock()
for _, serv := range cluster.servers.Slice() {
servers = append(servers, serv.Addr)
}
cluster.RUnlock()
return servers
}
func (cluster *mongoCluster) removeServer(server *mongoServer) {
cluster.Lock()
cluster.masters.Remove(server)
other := cluster.servers.Remove(server)
cluster.Unlock()
if other != nil {
other.Close()
log("Removed server ", server.Addr, " from cluster.")
}
server.Close()
}
type isMasterResult struct {
IsMaster bool
Secondary bool
Primary string
Hosts []string
Passives []string
Tags bson.D
Msg string
MaxWireVersion int `bson:"maxWireVersion"`
}
func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
// Monotonic let's it talk to a slave and still hold the socket.
session := newSession(Monotonic, cluster, 10*time.Second)
session.setSocket(socket)
err := session.Run("ismaster", result)
session.Close()
return err
}
type possibleTimeout interface {
Timeout() bool
}
var syncSocketTimeout = 5 * time.Second
func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
var syncTimeout time.Duration
if raceDetector {
// This variable is only ever touched by tests.
globalMutex.Lock()
syncTimeout = syncSocketTimeout
globalMutex.Unlock()
} else {
syncTimeout = syncSocketTimeout
}
addr := server.Addr
log("SYNC Processing ", addr, "...")
// Retry a few times to avoid knocking a server down for a hiccup.
var result isMasterResult
var tryerr error
for retry := 0; ; retry++ {
if retry == 3 || retry == 1 && cluster.failFast {
return nil, nil, tryerr
}
if retry > 0 {
// Don't abuse the server needlessly if there's something actually wrong.
if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
// Give a chance for waiters to timeout as well.
cluster.serverSynced.Broadcast()
}
time.Sleep(syncShortDelay)
}
// It's not clear what would be a good timeout here. Is it
// better to wait longer or to retry?
socket, _, err := server.AcquireSocket(0, syncTimeout)
if err != nil {
tryerr = err
logf("SYNC Failed to get socket to %s: %v", addr, err)
continue
}
err = cluster.isMaster(socket, &result)
socket.Release()
if err != nil {
tryerr = err
logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
continue
}
debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
break
}
if result.IsMaster {
debugf("SYNC %s is a master.", addr)
// Made an incorrect assumption above, so fix stats.
stats.conn(-1, false)
stats.conn(+1, true)
} else if result.Secondary {
debugf("SYNC %s is a slave.", addr)
} else if cluster.direct {
logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
} else {
logf("SYNC %s is neither a master nor a slave.", addr)
// Made an incorrect assumption above, so fix stats.
stats.conn(-1, false)
return nil, nil, errors.New(addr + " is not a master nor slave")
}
info = &mongoServerInfo{
Master: result.IsMaster,
Mongos: result.Msg == "isdbgrid",
Tags: result.Tags,
MaxWireVersion: result.MaxWireVersion,
}
hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
if result.Primary != "" {
// First in the list to speed up master discovery.
hosts = append(hosts, result.Primary)
}
hosts = append(hosts, result.Hosts...)
hosts = append(hosts, result.Passives...)
debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
return info, hosts, nil
}
type syncKind bool
const (
completeSync syncKind = true
partialSync syncKind = false
)
func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
cluster.Lock()
current := cluster.servers.Search(server.ResolvedAddr)
if current == nil {
if syncKind == partialSync {
cluster.Unlock()
server.Close()
log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
return
}
cluster.servers.Add(server)
if info.Master {
cluster.masters.Add(server)
log("SYNC Adding ", server.Addr, " to cluster as a master.")
} else {
log("SYNC Adding ", server.Addr, " to cluster as a slave.")
}
} else {
if server != current {
panic("addServer attempting to add duplicated server")
}
if server.Info().Master != info.Master {
if info.Master {
log("SYNC Server ", server.Addr, " is now a master.")
cluster.masters.Add(server)
} else {
log("SYNC Server ", server.Addr, " is now a slave.")
cluster.masters.Remove(server)
}
}
}
server.SetInfo(info)
debugf("SYNC Broadcasting availability of server %s", server.Addr)
cluster.serverSynced.Broadcast()
cluster.Unlock()
}
func (cluster *mongoCluster) getKnownAddrs() []string {
cluster.RLock()
max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
seen := make(map[string]bool, max)
known := make([]string, 0, max)
add := func(addr string) {
if _, found := seen[addr]; !found {
seen[addr] = true
known = append(known, addr)
}
}
for _, addr := range cluster.userSeeds {
add(addr)
}
for _, addr := range cluster.dynaSeeds {
add(addr)
}
for _, serv := range cluster.servers.Slice() {
add(serv.Addr)
}
cluster.RUnlock()
return known
}
// syncServers injects a value into the cluster.sync channel to force
// an iteration of the syncServersLoop function.
func (cluster *mongoCluster) syncServers() {
select {
case cluster.sync <- true:
default:
}
}
// How long to wait for a checkup of the cluster topology if nothing
// else kicks a synchronization before that.
const syncServersDelay = 30 * time.Second
const syncShortDelay = 500 * time.Millisecond
// syncServersLoop loops while the cluster is alive to keep its idea of
// the server topology up-to-date. It must be called just once from
// newCluster. The loop iterates once syncServersDelay has passed, or
// if somebody injects a value into the cluster.sync channel to force a
// synchronization. A loop iteration will contact all servers in
// parallel, ask them about known peers and their own role within the
// cluster, and then attempt to do the same with all the peers
// retrieved.
func (cluster *mongoCluster) syncServersLoop() {
for {
debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
cluster.Lock()
if cluster.references == 0 {
cluster.Unlock()
break
}
cluster.references++ // Keep alive while syncing.
direct := cluster.direct
cluster.Unlock()
cluster.syncServersIteration(direct)
// We just synchronized, so consume any outstanding requests.
select {
case <-cluster.sync:
default:
}
cluster.Release()
// Hold off before allowing another sync. No point in
// burning CPU looking for down servers.
if !cluster.failFast {
time.Sleep(syncShortDelay)
}
cluster.Lock()
if cluster.references == 0 {
cluster.Unlock()
break
}
cluster.syncCount++
// Poke all waiters so they have a chance to timeout or
// restart syncing if they wish to.
cluster.serverSynced.Broadcast()
// Check if we have to restart immediately either way.
restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
cluster.Unlock()
if restart {
log("SYNC No masters found. Will synchronize again.")
time.Sleep(syncShortDelay)
continue
}
debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
// Hold off until somebody explicitly requests a synchronization
// or it's time to check for a cluster topology change again.
select {
case <-cluster.sync:
case <-time.After(syncServersDelay):
}
}
debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
}
func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
cluster.RLock()
server := cluster.servers.Search(tcpaddr.String())
cluster.RUnlock()
if server != nil {
return server
}
return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
}
func resolveAddr(addr string) (*net.TCPAddr, error) {
// This hack allows having a timeout on resolution.
conn, err := net.DialTimeout("udp", addr, 10*time.Second)
if err != nil {
log("SYNC Failed to resolve server address: ", addr)
return nil, errors.New("failed to resolve server address: " + addr)
}
tcpaddr := (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
conn.Close()
if tcpaddr.String() != addr {
debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
}
return tcpaddr, nil
}
type pendingAdd struct {
server *mongoServer
info *mongoServerInfo
}
func (cluster *mongoCluster) syncServersIteration(direct bool) {
log("SYNC Starting full topology synchronization...")
var wg sync.WaitGroup
var m sync.Mutex
notYetAdded := make(map[string]pendingAdd)
addIfFound := make(map[string]bool)
seen := make(map[string]bool)
syncKind := partialSync
var spawnSync func(addr string, byMaster bool)
spawnSync = func(addr string, byMaster bool) {
wg.Add(1)
go func() {
defer wg.Done()
tcpaddr, err := resolveAddr(addr)
if err != nil {
log("SYNC Failed to start sync of ", addr, ": ", err.Error())
return
}
resolvedAddr := tcpaddr.String()
m.Lock()
if byMaster {
if pending, ok := notYetAdded[resolvedAddr]; ok {
delete(notYetAdded, resolvedAddr)
m.Unlock()
cluster.addServer(pending.server, pending.info, completeSync)
return
}
addIfFound[resolvedAddr] = true
}
if seen[resolvedAddr] {
m.Unlock()
return
}
seen[resolvedAddr] = true
m.Unlock()
server := cluster.server(addr, tcpaddr)
info, hosts, err := cluster.syncServer(server)
if err != nil {
cluster.removeServer(server)
return
}
m.Lock()
add := direct || info.Master || addIfFound[resolvedAddr]
if add {
syncKind = completeSync
} else {
notYetAdded[resolvedAddr] = pendingAdd{server, info}
}
m.Unlock()
if add {
cluster.addServer(server, info, completeSync)
}
if !direct {
for _, addr := range hosts {
spawnSync(addr, info.Master)
}
}
}()
}
knownAddrs := cluster.getKnownAddrs()
for _, addr := range knownAddrs {
spawnSync(addr, false)
}
wg.Wait()
if syncKind == completeSync {
logf("SYNC Synchronization was complete (got data from primary).")
for _, pending := range notYetAdded {
cluster.removeServer(pending.server)
}
} else {
logf("SYNC Synchronization was partial (cannot talk to primary).")
for _, pending := range notYetAdded {
cluster.addServer(pending.server, pending.info, partialSync)
}
}
cluster.Lock()
ml := cluster.masters.Len()
logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", ml, cluster.servers.Len()-ml)
// Update dynamic seeds, but only if we have any good servers. Otherwise,
// leave them alone for better chances of a successful sync in the future.
if syncKind == completeSync {
dynaSeeds := make([]string, cluster.servers.Len())
for i, server := range cluster.servers.Slice() {
dynaSeeds[i] = server.Addr
}
cluster.dynaSeeds = dynaSeeds
debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
}
cluster.Unlock()
}
// AcquireSocket returns a socket to a server in the cluster. If slaveOk is
// true, it will attempt to return a socket to a slave server. If it is
// false, the socket will necessarily be to a master server.
func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
var started time.Time
var syncCount uint
warnedLimit := false
for {
cluster.RLock()
for {
ml := cluster.masters.Len()
sl := cluster.servers.Len()
debugf("Cluster has %d known masters and %d known slaves.", ml, sl-ml)
if ml > 0 || slaveOk && sl > 0 {
break
}
if started.IsZero() {
// Initialize after fast path above.
started = time.Now()
syncCount = cluster.syncCount
} else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
cluster.RUnlock()
return nil, errors.New("no reachable servers")
}
log("Waiting for servers to synchronize...")
cluster.syncServers()
// Remember: this will release and reacquire the lock.
cluster.serverSynced.Wait()
}
var server *mongoServer
if slaveOk {
server = cluster.servers.BestFit(serverTags)
} else {
server = cluster.masters.BestFit(nil)
}
cluster.RUnlock()
if server == nil {
// Must have failed the requested tags. Sleep to avoid spinning.
time.Sleep(1e8)
continue
}
s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
if err == errPoolLimit {
if !warnedLimit {
warnedLimit = true
log("WARNING: Per-server connection limit reached.")
}
time.Sleep(100 * time.Millisecond)
continue
}
if err != nil {
cluster.removeServer(server)
cluster.syncServers()
continue
}
if abended && !slaveOk {
var result isMasterResult
err := cluster.isMaster(s, &result)
if err != nil || !result.IsMaster {
logf("Cannot confirm server %s as master (%v)", server.Addr, err)
s.Release()
cluster.syncServers()
time.Sleep(100 * time.Millisecond)
continue
}
}
return s, nil
}
panic("unreached")
}
func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
cluster.Lock()
if cluster.cachedIndex == nil {
cluster.cachedIndex = make(map[string]bool)
}
if exists {
cluster.cachedIndex[cacheKey] = true
} else {
delete(cluster.cachedIndex, cacheKey)
}
cluster.Unlock()
}
func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
cluster.RLock()
if cluster.cachedIndex != nil {
result = cluster.cachedIndex[cacheKey]
}
cluster.RUnlock()
return
}
func (cluster *mongoCluster) ResetIndexCache() {
cluster.Lock()
cluster.cachedIndex = make(map[string]bool)
cluster.Unlock()
}

1596
Godeps/_workspace/src/gopkg.in/mgo.v2/cluster_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

31
Godeps/_workspace/src/gopkg.in/mgo.v2/doc.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
// Package mgo offers a rich MongoDB driver for Go.
//
// Details about the mgo project (pronounced as "mango") are found
// in its web page:
//
// http://labix.org/mgo
//
// Usage of the driver revolves around the concept of sessions. To
// get started, obtain a session using the Dial function:
//
// session, err := mgo.Dial(url)
//
// This will establish one or more connections with the cluster of
// servers defined by the url parameter. From then on, the cluster
// may be queried with multiple consistency rules (see SetMode) and
// documents retrieved with statements such as:
//
// c := session.DB(database).C(collection)
// err := c.Find(query).One(&result)
//
// New sessions are typically created by calling session.Copy on the
// initial session obtained at dial time. These new sessions will share
// the same cluster information and connection cache, and may be easily
// handed into other methods and functions for organizing logic.
// Every session created must have its Close method called at the end
// of its life time, so its resources may be put back in the pool or
// collected, depending on the case.
//
// For more details, see the documentation for the types and methods.
//
package mgo

33
Godeps/_workspace/src/gopkg.in/mgo.v2/export_test.go generated vendored Normal file
View File

@@ -0,0 +1,33 @@
package mgo
import (
"time"
)
func HackPingDelay(newDelay time.Duration) (restore func()) {
globalMutex.Lock()
defer globalMutex.Unlock()
oldDelay := pingDelay
restore = func() {
globalMutex.Lock()
pingDelay = oldDelay
globalMutex.Unlock()
}
pingDelay = newDelay
return
}
func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) {
globalMutex.Lock()
defer globalMutex.Unlock()
oldTimeout := syncSocketTimeout
restore = func() {
globalMutex.Lock()
syncSocketTimeout = oldTimeout
globalMutex.Unlock()
}
syncSocketTimeout = newTimeout
return
}

754
Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs.go generated vendored Normal file
View File

@@ -0,0 +1,754 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"crypto/md5"
"encoding/hex"
"errors"
"hash"
"io"
"os"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
type GridFS struct {
Files *Collection
Chunks *Collection
}
type gfsFileMode int
const (
gfsClosed gfsFileMode = 0
gfsReading gfsFileMode = 1
gfsWriting gfsFileMode = 2
)
type GridFile struct {
m sync.Mutex
c sync.Cond
gfs *GridFS
mode gfsFileMode
err error
chunk int
offset int64
wpending int
wbuf []byte
wsum hash.Hash
rbuf []byte
rcache *gfsCachedChunk
doc gfsFile
}
type gfsFile struct {
Id interface{} "_id"
ChunkSize int "chunkSize"
UploadDate time.Time "uploadDate"
Length int64 ",minsize"
MD5 string
Filename string ",omitempty"
ContentType string "contentType,omitempty"
Metadata *bson.Raw ",omitempty"
}
type gfsChunk struct {
Id interface{} "_id"
FilesId interface{} "files_id"
N int
Data []byte
}
type gfsCachedChunk struct {
wait sync.Mutex
n int
data []byte
err error
}
func newGridFS(db *Database, prefix string) *GridFS {
return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
}
func (gfs *GridFS) newFile() *GridFile {
file := &GridFile{gfs: gfs}
file.c.L = &file.m
//runtime.SetFinalizer(file, finalizeFile)
return file
}
func finalizeFile(file *GridFile) {
file.Close()
}
// Create creates a new file with the provided name in the GridFS. If the file
// name already exists, a new version will be inserted with an up-to-date
// uploadDate that will cause it to be atomically visible to the Open and
// OpenId methods. If the file name is not important, an empty name may be
// provided and the file Id used instead.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// A simple example inserting a new file:
//
// func check(err error) {
// if err != nil {
// panic(err.String())
// }
// }
// file, err := db.GridFS("fs").Create("myfile.txt")
// check(err)
// n, err := file.Write([]byte("Hello world!")
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes written\n", n)
//
// The io.Writer interface is implemented by *GridFile and may be used to
// help on the file creation. For example:
//
// file, err := db.GridFS("fs").Create("myfile.txt")
// check(err)
// messages, err := os.Open("/var/log/messages")
// check(err)
// defer messages.Close()
// err = io.Copy(file, messages)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
file = gfs.newFile()
file.mode = gfsWriting
file.wsum = md5.New()
file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
return
}
// OpenId returns the file with the provided id, for reading.
// If the file isn't found, err will be set to mgo.ErrNotFound.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// The following example will print the first 8192 bytes from the file:
//
// func check(err error) {
// if err != nil {
// panic(err.String())
// }
// }
// file, err := db.GridFS("fs").OpenId(objid)
// check(err)
// b := make([]byte, 8192)
// n, err := file.Read(b)
// check(err)
// fmt.Println(string(b))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes read\n", n)
//
// The io.Reader interface is implemented by *GridFile and may be used to
// deal with it. As an example, the following snippet will dump the whole
// file into the standard output:
//
// file, err := db.GridFS("fs").OpenId(objid)
// check(err)
// err = io.Copy(os.Stdout, file)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
var doc gfsFile
err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
if err != nil {
return
}
file = gfs.newFile()
file.mode = gfsReading
file.doc = doc
return
}
// Open returns the most recently uploaded file with the provided
// name, for reading. If the file isn't found, err will be set
// to mgo.ErrNotFound.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// The following example will print the first 8192 bytes from the file:
//
// file, err := db.GridFS("fs").Open("myfile.txt")
// check(err)
// b := make([]byte, 8192)
// n, err := file.Read(b)
// check(err)
// fmt.Println(string(b))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes read\n", n)
//
// The io.Reader interface is implemented by *GridFile and may be used to
// deal with it. As an example, the following snippet will dump the whole
// file into the standard output:
//
// file, err := db.GridFS("fs").Open("myfile.txt")
// check(err)
// err = io.Copy(os.Stdout, file)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
var doc gfsFile
err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
if err != nil {
return
}
file = gfs.newFile()
file.mode = gfsReading
file.doc = doc
return
}
// OpenNext opens the next file from iter for reading, sets *file to it,
// and returns true on the success case. If no more documents are available
// on iter or an error occurred, *file is set to nil and the result is false.
// Errors will be available via iter.Err().
//
// The iter parameter must be an iterator on the GridFS files collection.
// Using the GridFS.Find method is an easy way to obtain such an iterator,
// but any iterator on the collection will work.
//
// If the provided *file is non-nil, OpenNext will close it before attempting
// to iterate to the next element. This means that in a loop one only
// has to worry about closing files when breaking out of the loop early
// (break, return, or panic).
//
// For example:
//
// gfs := db.GridFS("fs")
// query := gfs.Find(nil).Sort("filename")
// iter := query.Iter()
// var f *mgo.GridFile
// for gfs.OpenNext(iter, &f) {
// fmt.Printf("Filename: %s\n", f.Name())
// }
// if iter.Close() != nil {
// panic(iter.Close())
// }
//
func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
if *file != nil {
// Ignoring the error here shouldn't be a big deal
// as we're reading the file and the loop iteration
// for this file is finished.
_ = (*file).Close()
}
var doc gfsFile
if !iter.Next(&doc) {
*file = nil
return false
}
f := gfs.newFile()
f.mode = gfsReading
f.doc = doc
*file = f
return true
}
// Find runs query on GridFS's files collection and returns
// the resulting Query.
//
// This logic:
//
// gfs := db.GridFS("fs")
// iter := gfs.Find(nil).Iter()
//
// Is equivalent to:
//
// files := db.C("fs" + ".files")
// iter := files.Find(nil).Iter()
//
func (gfs *GridFS) Find(query interface{}) *Query {
return gfs.Files.Find(query)
}
// RemoveId deletes the file with the provided id from the GridFS.
func (gfs *GridFS) RemoveId(id interface{}) error {
err := gfs.Files.Remove(bson.M{"_id": id})
if err != nil {
return err
}
_, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
return err
}
type gfsDocId struct {
Id interface{} "_id"
}
// Remove deletes all files with the provided name from the GridFS.
func (gfs *GridFS) Remove(name string) (err error) {
iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
var doc gfsDocId
for iter.Next(&doc) {
if e := gfs.RemoveId(doc.Id); e != nil {
err = e
}
}
if err == nil {
err = iter.Close()
}
return err
}
func (file *GridFile) assertMode(mode gfsFileMode) {
switch file.mode {
case mode:
return
case gfsWriting:
panic("GridFile is open for writing")
case gfsReading:
panic("GridFile is open for reading")
case gfsClosed:
panic("GridFile is closed")
default:
panic("internal error: missing GridFile mode")
}
}
// SetChunkSize sets size of saved chunks. Once the file is written to, it
// will be split in blocks of that size and each block saved into an
// independent chunk document. The default chunk size is 256kb.
//
// It is a runtime error to call this function once the file has started
// being written to.
func (file *GridFile) SetChunkSize(bytes int) {
file.assertMode(gfsWriting)
debugf("GridFile %p: setting chunk size to %d", file, bytes)
file.m.Lock()
file.doc.ChunkSize = bytes
file.m.Unlock()
}
// Id returns the current file Id.
func (file *GridFile) Id() interface{} {
return file.doc.Id
}
// SetId changes the current file Id.
//
// It is a runtime error to call this function once the file has started
// being written to, or when the file is not open for writing.
func (file *GridFile) SetId(id interface{}) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.Id = id
file.m.Unlock()
}
// Name returns the optional file name. An empty string will be returned
// in case it is unset.
func (file *GridFile) Name() string {
return file.doc.Filename
}
// SetName changes the optional file name. An empty string may be used to
// unset it.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetName(name string) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.Filename = name
file.m.Unlock()
}
// ContentType returns the optional file content type. An empty string will be
// returned in case it is unset.
func (file *GridFile) ContentType() string {
return file.doc.ContentType
}
// ContentType changes the optional file content type. An empty string may be
// used to unset it.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetContentType(ctype string) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.ContentType = ctype
file.m.Unlock()
}
// GetMeta unmarshals the optional "metadata" field associated with the
// file into the result parameter. The meaning of keys under that field
// is user-defined. For example:
//
// result := struct{ INode int }{}
// err = file.GetMeta(&result)
// if err != nil {
// panic(err.String())
// }
// fmt.Printf("inode: %d\n", result.INode)
//
func (file *GridFile) GetMeta(result interface{}) (err error) {
file.m.Lock()
if file.doc.Metadata != nil {
err = bson.Unmarshal(file.doc.Metadata.Data, result)
}
file.m.Unlock()
return
}
// SetMeta changes the optional "metadata" field associated with the
// file. The meaning of keys under that field is user-defined.
// For example:
//
// file.SetMeta(bson.M{"inode": inode})
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetMeta(metadata interface{}) {
file.assertMode(gfsWriting)
data, err := bson.Marshal(metadata)
file.m.Lock()
if err != nil && file.err == nil {
file.err = err
} else {
file.doc.Metadata = &bson.Raw{Data: data}
}
file.m.Unlock()
}
// Size returns the file size in bytes.
func (file *GridFile) Size() (bytes int64) {
file.m.Lock()
bytes = file.doc.Length
file.m.Unlock()
return
}
// MD5 returns the file MD5 as a hex-encoded string.
func (file *GridFile) MD5() (md5 string) {
return file.doc.MD5
}
// UploadDate returns the file upload time.
func (file *GridFile) UploadDate() time.Time {
return file.doc.UploadDate
}
// SetUploadDate changes the file upload time.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetUploadDate(t time.Time) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.UploadDate = t
file.m.Unlock()
}
// Close flushes any pending changes in case the file is being written
// to, waits for any background operations to finish, and closes the file.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
func (file *GridFile) Close() (err error) {
file.m.Lock()
defer file.m.Unlock()
if file.mode == gfsWriting {
if len(file.wbuf) > 0 && file.err == nil {
file.insertChunk(file.wbuf)
file.wbuf = file.wbuf[0:0]
}
file.completeWrite()
} else if file.mode == gfsReading && file.rcache != nil {
file.rcache.wait.Lock()
file.rcache = nil
}
file.mode = gfsClosed
debugf("GridFile %p: closed", file)
return file.err
}
func (file *GridFile) completeWrite() {
for file.wpending > 0 {
debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
file.c.Wait()
}
if file.err != nil {
file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
return
}
hexsum := hex.EncodeToString(file.wsum.Sum(nil))
if file.doc.UploadDate.IsZero() {
file.doc.UploadDate = bson.Now()
}
file.doc.MD5 = hexsum
file.err = file.gfs.Files.Insert(file.doc)
file.gfs.Chunks.EnsureIndexKey("files_id", "n")
}
// Abort cancels an in-progress write, preventing the file from being
// automically created and ensuring previously written chunks are
// removed when the file is closed.
//
// It is a runtime error to call Abort when the file was not opened
// for writing.
func (file *GridFile) Abort() {
if file.mode != gfsWriting {
panic("file.Abort must be called on file opened for writing")
}
file.err = errors.New("write aborted")
}
// Write writes the provided data to the file and returns the
// number of bytes written and an error in case something
// wrong happened.
//
// The file will internally cache the data so that all but the last
// chunk sent to the database have the size defined by SetChunkSize.
// This also means that errors may be deferred until a future call
// to Write or Close.
//
// The parameters and behavior of this function turn the file
// into an io.Writer.
func (file *GridFile) Write(data []byte) (n int, err error) {
file.assertMode(gfsWriting)
file.m.Lock()
debugf("GridFile %p: writing %d bytes", file, len(data))
defer file.m.Unlock()
if file.err != nil {
return 0, file.err
}
n = len(data)
file.doc.Length += int64(n)
chunkSize := file.doc.ChunkSize
if len(file.wbuf)+len(data) < chunkSize {
file.wbuf = append(file.wbuf, data...)
return
}
// First, flush file.wbuf complementing with data.
if len(file.wbuf) > 0 {
missing := chunkSize - len(file.wbuf)
if missing > len(data) {
missing = len(data)
}
file.wbuf = append(file.wbuf, data[:missing]...)
data = data[missing:]
file.insertChunk(file.wbuf)
file.wbuf = file.wbuf[0:0]
}
// Then, flush all chunks from data without copying.
for len(data) > chunkSize {
size := chunkSize
if size > len(data) {
size = len(data)
}
file.insertChunk(data[:size])
data = data[size:]
}
// And append the rest for a future call.
file.wbuf = append(file.wbuf, data...)
return n, file.err
}
func (file *GridFile) insertChunk(data []byte) {
n := file.chunk
file.chunk++
debugf("GridFile %p: adding to checksum: %q", file, string(data))
file.wsum.Write(data)
for file.doc.ChunkSize*file.wpending >= 1024*1024 {
// Hold on.. we got a MB pending.
file.c.Wait()
if file.err != nil {
return
}
}
file.wpending++
debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
// We may not own the memory of data, so rather than
// simply copying it, we'll marshal the document ahead of time.
data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
if err != nil {
file.err = err
return
}
go func() {
err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
file.m.Lock()
file.wpending--
if err != nil && file.err == nil {
file.err = err
}
file.c.Broadcast()
file.m.Unlock()
}()
}
// Seek sets the offset for the next Read or Write on file to
// offset, interpreted according to whence: 0 means relative to
// the origin of the file, 1 means relative to the current offset,
// and 2 means relative to the end. It returns the new offset and
// an error, if any.
func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
file.m.Lock()
debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
defer file.m.Unlock()
switch whence {
case os.SEEK_SET:
case os.SEEK_CUR:
offset += file.offset
case os.SEEK_END:
offset += file.doc.Length
default:
panic("unsupported whence value")
}
if offset > file.doc.Length {
return file.offset, errors.New("seek past end of file")
}
if offset == file.doc.Length {
// If we're seeking to the end of the file,
// no need to read anything. This enables
// a client to find the size of the file using only the
// io.ReadSeeker interface with low overhead.
file.offset = offset
return file.offset, nil
}
chunk := int(offset / int64(file.doc.ChunkSize))
if chunk+1 == file.chunk && offset >= file.offset {
file.rbuf = file.rbuf[int(offset-file.offset):]
file.offset = offset
return file.offset, nil
}
file.offset = offset
file.chunk = chunk
file.rbuf = nil
file.rbuf, err = file.getChunk()
if err == nil {
file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
}
return file.offset, err
}
// Read reads into b the next available data from the file and
// returns the number of bytes written and an error in case
// something wrong happened. At the end of the file, n will
// be zero and err will be set to os.EOF.
//
// The parameters and behavior of this function turn the file
// into an io.Reader.
func (file *GridFile) Read(b []byte) (n int, err error) {
file.assertMode(gfsReading)
file.m.Lock()
debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
defer file.m.Unlock()
if file.offset == file.doc.Length {
return 0, io.EOF
}
for err == nil {
i := copy(b, file.rbuf)
n += i
file.offset += int64(i)
file.rbuf = file.rbuf[i:]
if i == len(b) || file.offset == file.doc.Length {
break
}
b = b[i:]
file.rbuf, err = file.getChunk()
}
return n, err
}
func (file *GridFile) getChunk() (data []byte, err error) {
cache := file.rcache
file.rcache = nil
if cache != nil && cache.n == file.chunk {
debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
cache.wait.Lock()
data, err = cache.data, cache.err
} else {
debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
var doc gfsChunk
err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
data = doc.Data
}
file.chunk++
if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
// Read the next one in background.
cache = &gfsCachedChunk{n: file.chunk}
cache.wait.Lock()
debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
// Clone the session to avoid having it closed in between.
chunks := file.gfs.Chunks
session := chunks.Database.Session.Clone()
go func(id interface{}, n int) {
defer session.Close()
chunks = chunks.With(session)
var doc gfsChunk
cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
cache.data = doc.Data
cache.wait.Unlock()
}(file.doc.Id, file.chunk)
file.rcache = cache
}
debugf("Returning err: %#v", err)
return
}

680
Godeps/_workspace/src/gopkg.in/mgo.v2/gridfs_test.go generated vendored Normal file
View File

@@ -0,0 +1,680 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo_test
import (
"io"
"os"
"time"
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
func (s *S) TestGridFSCreate(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
before := bson.Now()
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
n, err := file.Write([]byte("some data"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 9)
err = file.Close()
c.Assert(err, IsNil)
after := bson.Now()
// Check the file information.
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
fileId, ok := result["_id"].(bson.ObjectId)
c.Assert(ok, Equals, true)
c.Assert(fileId.Valid(), Equals, true)
result["_id"] = "<id>"
ud, ok := result["uploadDate"].(time.Time)
c.Assert(ok, Equals, true)
c.Assert(ud.After(before) && ud.Before(after), Equals, true)
result["uploadDate"] = "<timestamp>"
expected := M{
"_id": "<id>",
"length": 9,
"chunkSize": 255 * 1024,
"uploadDate": "<timestamp>",
"md5": "1e50210a0202497fb79bc38b6ade6c34",
}
c.Assert(result, DeepEquals, expected)
// Check the chunk.
result = M{}
err = db.C("fs.chunks").Find(nil).One(result)
c.Assert(err, IsNil)
chunkId, ok := result["_id"].(bson.ObjectId)
c.Assert(ok, Equals, true)
c.Assert(chunkId.Valid(), Equals, true)
result["_id"] = "<id>"
expected = M{
"_id": "<id>",
"files_id": fileId,
"n": 0,
"data": []byte("some data"),
}
c.Assert(result, DeepEquals, expected)
// Check that an index was created.
indexes, err := db.C("fs.chunks").Indexes()
c.Assert(err, IsNil)
c.Assert(len(indexes), Equals, 2)
c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})
}
func (s *S) TestGridFSFileDetails(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile1.txt")
c.Assert(err, IsNil)
n, err := file.Write([]byte("some"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 4)
c.Assert(file.Size(), Equals, int64(4))
n, err = file.Write([]byte(" data"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 5)
c.Assert(file.Size(), Equals, int64(9))
id, _ := file.Id().(bson.ObjectId)
c.Assert(id.Valid(), Equals, true)
c.Assert(file.Name(), Equals, "myfile1.txt")
c.Assert(file.ContentType(), Equals, "")
var info interface{}
err = file.GetMeta(&info)
c.Assert(err, IsNil)
c.Assert(info, IsNil)
file.SetId("myid")
file.SetName("myfile2.txt")
file.SetContentType("text/plain")
file.SetMeta(M{"any": "thing"})
c.Assert(file.Id(), Equals, "myid")
c.Assert(file.Name(), Equals, "myfile2.txt")
c.Assert(file.ContentType(), Equals, "text/plain")
err = file.GetMeta(&info)
c.Assert(err, IsNil)
c.Assert(info, DeepEquals, bson.M{"any": "thing"})
err = file.Close()
c.Assert(err, IsNil)
c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")
ud := file.UploadDate()
now := time.Now()
c.Assert(ud.Before(now), Equals, true)
c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
result["uploadDate"] = "<timestamp>"
expected := M{
"_id": "myid",
"length": 9,
"chunkSize": 255 * 1024,
"uploadDate": "<timestamp>",
"md5": "1e50210a0202497fb79bc38b6ade6c34",
"filename": "myfile2.txt",
"contentType": "text/plain",
"metadata": M{"any": "thing"},
}
c.Assert(result, DeepEquals, expected)
}
func (s *S) TestGridFSSetUploadDate(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local)
file.SetUploadDate(t)
err = file.Close()
c.Assert(err, IsNil)
// Check the file information.
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
ud := result["uploadDate"].(time.Time)
if !ud.Equal(t) {
c.Fatalf("want upload date %s, got %s", t, ud)
}
}
func (s *S) TestGridFSCreateWithChunking(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
file.SetChunkSize(5)
// Smaller than the chunk size.
n, err := file.Write([]byte("abc"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
// Boundary in the middle.
n, err = file.Write([]byte("defg"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 4)
// Boundary at the end.
n, err = file.Write([]byte("hij"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
// Larger than the chunk size, with 3 chunks.
n, err = file.Write([]byte("klmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 12)
err = file.Close()
c.Assert(err, IsNil)
// Check the file information.
result := M{}
err = db.C("fs.files").Find(nil).One(result)
c.Assert(err, IsNil)
fileId, _ := result["_id"].(bson.ObjectId)
c.Assert(fileId.Valid(), Equals, true)
result["_id"] = "<id>"
result["uploadDate"] = "<timestamp>"
expected := M{
"_id": "<id>",
"length": 22,
"chunkSize": 5,
"uploadDate": "<timestamp>",
"md5": "44a66044834cbe55040089cabfc102d5",
}
c.Assert(result, DeepEquals, expected)
// Check the chunks.
iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()
dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}
for i := 0; ; i++ {
result = M{}
if !iter.Next(result) {
if i != 5 {
c.Fatalf("Expected 5 chunks, got %d", i)
}
break
}
c.Assert(iter.Close(), IsNil)
result["_id"] = "<id>"
expected = M{
"_id": "<id>",
"files_id": fileId,
"n": i,
"data": []byte(dataChunks[i]),
}
c.Assert(result, DeepEquals, expected)
}
}
func (s *S) TestGridFSAbort(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
file.SetChunkSize(5)
n, err := file.Write([]byte("some data"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 9)
var count int
for i := 0; i < 10; i++ {
count, err = db.C("fs.chunks").Count()
if count > 0 || err != nil {
break
}
}
c.Assert(err, IsNil)
c.Assert(count, Equals, 1)
file.Abort()
err = file.Close()
c.Assert(err, ErrorMatches, "write aborted")
count, err = db.C("fs.chunks").Count()
c.Assert(err, IsNil)
c.Assert(count, Equals, 0)
}
func (s *S) TestGridFSOpenNotFound(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.OpenId("non-existent")
c.Assert(err == mgo.ErrNotFound, Equals, true)
c.Assert(file, IsNil)
file, err = gfs.Open("non-existent")
c.Assert(err == mgo.ErrNotFound, Equals, true)
c.Assert(file, IsNil)
}
func (s *S) TestGridFSReadAll(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
id := file.Id()
file.SetChunkSize(5)
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 22)
err = file.Close()
c.Assert(err, IsNil)
file, err = gfs.OpenId(id)
c.Assert(err, IsNil)
b := make([]byte, 30)
n, err = file.Read(b)
c.Assert(n, Equals, 22)
c.Assert(err, IsNil)
n, err = file.Read(b)
c.Assert(n, Equals, 0)
c.Assert(err == io.EOF, Equals, true)
err = file.Close()
c.Assert(err, IsNil)
}
func (s *S) TestGridFSReadChunking(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
id := file.Id()
file.SetChunkSize(5)
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 22)
err = file.Close()
c.Assert(err, IsNil)
file, err = gfs.OpenId(id)
c.Assert(err, IsNil)
b := make([]byte, 30)
// Smaller than the chunk size.
n, err = file.Read(b[:3])
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
c.Assert(b[:3], DeepEquals, []byte("abc"))
// Boundary in the middle.
n, err = file.Read(b[:4])
c.Assert(err, IsNil)
c.Assert(n, Equals, 4)
c.Assert(b[:4], DeepEquals, []byte("defg"))
// Boundary at the end.
n, err = file.Read(b[:3])
c.Assert(err, IsNil)
c.Assert(n, Equals, 3)
c.Assert(b[:3], DeepEquals, []byte("hij"))
// Larger than the chunk size, with 3 chunks.
n, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(n, Equals, 12)
c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))
n, err = file.Read(b)
c.Assert(n, Equals, 0)
c.Assert(err == io.EOF, Equals, true)
err = file.Close()
c.Assert(err, IsNil)
}
func (s *S) TestGridFSOpen(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
file.Close()
file, err = gfs.Open("myfile.txt")
c.Assert(err, IsNil)
defer file.Close()
var b [1]byte
_, err = file.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "2")
}
func (s *S) TestGridFSSeek(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("")
c.Assert(err, IsNil)
id := file.Id()
file.SetChunkSize(5)
n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
c.Assert(err, IsNil)
c.Assert(n, Equals, 22)
err = file.Close()
c.Assert(err, IsNil)
b := make([]byte, 5)
file, err = gfs.OpenId(id)
c.Assert(err, IsNil)
o, err := file.Seek(3, os.SEEK_SET)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(3))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("defgh"))
o, err = file.Seek(5, os.SEEK_CUR)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(13))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("nopqr"))
o, err = file.Seek(0, os.SEEK_END)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(22))
n, err = file.Read(b)
c.Assert(err, Equals, io.EOF)
c.Assert(n, Equals, 0)
o, err = file.Seek(-10, os.SEEK_END)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(12))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("mnopq"))
o, err = file.Seek(8, os.SEEK_SET)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(8))
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("ijklm"))
// Trivial seek forward within same chunk. Already
// got the data, shouldn't touch the database.
sent := mgo.GetStats().SentOps
o, err = file.Seek(1, os.SEEK_CUR)
c.Assert(err, IsNil)
c.Assert(o, Equals, int64(14))
c.Assert(mgo.GetStats().SentOps, Equals, sent)
_, err = file.Read(b)
c.Assert(err, IsNil)
c.Assert(b, DeepEquals, []byte("opqrs"))
// Try seeking past end of file.
file.Seek(3, os.SEEK_SET)
o, err = file.Seek(23, os.SEEK_SET)
c.Assert(err, ErrorMatches, "seek past end of file")
c.Assert(o, Equals, int64(3))
}
func (s *S) TestGridFSRemoveId(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
id := file.Id()
file.Close()
err = gfs.RemoveId(id)
c.Assert(err, IsNil)
file, err = gfs.Open("myfile.txt")
c.Assert(err, IsNil)
defer file.Close()
var b [1]byte
_, err = file.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "1")
n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *S) TestGridFSRemove(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
file.Close()
err = gfs.Remove("myfile.txt")
c.Assert(err, IsNil)
_, err = gfs.Open("myfile.txt")
c.Assert(err == mgo.ErrNotFound, Equals, true)
n, err := db.C("fs.chunks").Find(nil).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *S) TestGridFSOpenNext(c *C) {
session, err := mgo.Dial("localhost:40011")
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("mydb")
gfs := db.GridFS("fs")
file, err := gfs.Create("myfile1.txt")
c.Assert(err, IsNil)
file.Write([]byte{'1'})
file.Close()
file, err = gfs.Create("myfile2.txt")
c.Assert(err, IsNil)
file.Write([]byte{'2'})
file.Close()
var f *mgo.GridFile
var b [1]byte
iter := gfs.Find(nil).Sort("-filename").Iter()
ok := gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, true)
c.Check(f.Name(), Equals, "myfile2.txt")
_, err = f.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "2")
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, true)
c.Check(f.Name(), Equals, "myfile1.txt")
_, err = f.Read(b[:])
c.Assert(err, IsNil)
c.Assert(string(b[:]), Equals, "1")
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, false)
c.Assert(iter.Close(), IsNil)
c.Assert(f, IsNil)
// Do it again with a more restrictive query to make sure
// it's actually taken into account.
iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, true)
c.Check(f.Name(), Equals, "myfile1.txt")
ok = gfs.OpenNext(iter, &f)
c.Assert(ok, Equals, false)
c.Assert(iter.Close(), IsNil)
c.Assert(f, IsNil)
}

View File

@@ -0,0 +1,266 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Pacakage scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
//
// http://tools.ietf.org/html/rfc5802
//
package scram
import (
"bytes"
"crypto/hmac"
"crypto/rand"
"encoding/base64"
"fmt"
"hash"
"strconv"
"strings"
)
// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
//
// A Client may be used within a SASL conversation with logic resembling:
//
// var in []byte
// var client = scram.NewClient(sha1.New, user, pass)
// for client.Step(in) {
// out := client.Out()
// // send out to server
// in := serverOut
// }
// if client.Err() != nil {
// // auth failed
// }
//
type Client struct {
newHash func() hash.Hash
user string
pass string
step int
out bytes.Buffer
err error
clientNonce []byte
serverNonce []byte
saltedPass []byte
authMsg bytes.Buffer
}
// NewClient returns a new SCRAM-* client with the provided hash algorithm.
//
// For SCRAM-SHA-1, for example, use:
//
// client := scram.NewClient(sha1.New, user, pass)
//
func NewClient(newHash func() hash.Hash, user, pass string) *Client {
c := &Client{
newHash: newHash,
user: user,
pass: pass,
}
c.out.Grow(256)
c.authMsg.Grow(256)
return c
}
// Out returns the data to be sent to the server in the current step.
func (c *Client) Out() []byte {
if c.out.Len() == 0 {
return nil
}
return c.out.Bytes()
}
// Err returns the error that ocurred, or nil if there were no errors.
func (c *Client) Err() error {
return c.err
}
// SetNonce sets the client nonce to the provided value.
// If not set, the nonce is generated automatically out of crypto/rand on the first step.
func (c *Client) SetNonce(nonce []byte) {
c.clientNonce = nonce
}
var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
// Step processes the incoming data from the server and makes the
// next round of data for the server available via Client.Out.
// Step returns false if there are no errors and more data is
// still expected.
func (c *Client) Step(in []byte) bool {
c.out.Reset()
if c.step > 2 || c.err != nil {
return false
}
c.step++
switch c.step {
case 1:
c.err = c.step1(in)
case 2:
c.err = c.step2(in)
case 3:
c.err = c.step3(in)
}
return c.step > 2 || c.err != nil
}
func (c *Client) step1(in []byte) error {
if len(c.clientNonce) == 0 {
const nonceLen = 6
buf := make([]byte, nonceLen + b64.EncodedLen(nonceLen))
if _, err := rand.Read(buf[:nonceLen]); err != nil {
return fmt.Errorf("cannot read random SCRAM-SHA-1 nonce from operating system: %v", err)
}
c.clientNonce = buf[nonceLen:]
b64.Encode(c.clientNonce, buf[:nonceLen])
}
c.authMsg.WriteString("n=")
escaper.WriteString(&c.authMsg, c.user)
c.authMsg.WriteString(",r=")
c.authMsg.Write(c.clientNonce)
c.out.WriteString("n,,")
c.out.Write(c.authMsg.Bytes())
return nil
}
var b64 = base64.StdEncoding
func (c *Client) step2(in []byte) error {
c.authMsg.WriteByte(',')
c.authMsg.Write(in)
fields := bytes.Split(in, []byte(","))
if len(fields) != 3 {
return fmt.Errorf("expected 3 fields in first SCRAM-SHA-1 server message, got %d: %q", len(fields), in)
}
if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 nonce: %q", fields[0])
}
if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 salt: %q", fields[1])
}
if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
}
c.serverNonce = fields[0][2:]
if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
return fmt.Errorf("server SCRAM-SHA-1 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
}
salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
n, err := b64.Decode(salt, fields[1][2:])
if err != nil {
return fmt.Errorf("cannot decode SCRAM-SHA-1 salt sent by server: %q", fields[1])
}
salt = salt[:n]
iterCount, err := strconv.Atoi(string(fields[2][2:]))
if err != nil {
return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
}
c.saltPassword(salt, iterCount)
c.authMsg.WriteString(",c=biws,r=")
c.authMsg.Write(c.serverNonce)
c.out.WriteString("c=biws,r=")
c.out.Write(c.serverNonce)
c.out.WriteString(",p=")
c.out.Write(c.clientProof())
return nil
}
func (c *Client) step3(in []byte) error {
var isv, ise bool
var fields = bytes.Split(in, []byte(","))
if len(fields) == 1 {
isv = bytes.HasPrefix(fields[0], []byte("v="))
ise = bytes.HasPrefix(fields[0], []byte("e="))
}
if ise {
return fmt.Errorf("SCRAM-SHA-1 authentication error: %s", fields[0][2:])
} else if !isv {
return fmt.Errorf("unsupported SCRAM-SHA-1 final message from server: %q", in)
}
if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
return fmt.Errorf("cannot authenticate SCRAM-SHA-1 server signature: %q", fields[0][2:])
}
return nil
}
func (c *Client) saltPassword(salt []byte, iterCount int) {
mac := hmac.New(c.newHash, []byte(c.pass))
mac.Write(salt)
mac.Write([]byte{0, 0, 0, 1})
ui := mac.Sum(nil)
hi := make([]byte, len(ui))
copy(hi, ui)
for i := 1; i < iterCount; i++ {
mac.Reset()
mac.Write(ui)
mac.Sum(ui[:0])
for j, b := range ui {
hi[j] ^= b
}
}
c.saltedPass = hi
}
func (c *Client) clientProof() []byte {
mac := hmac.New(c.newHash, c.saltedPass)
mac.Write([]byte("Client Key"))
clientKey := mac.Sum(nil)
hash := c.newHash()
hash.Write(clientKey)
storedKey := hash.Sum(nil)
mac = hmac.New(c.newHash, storedKey)
mac.Write(c.authMsg.Bytes())
clientProof := mac.Sum(nil)
for i, b := range clientKey {
clientProof[i] ^= b
}
clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
b64.Encode(clientProof64, clientProof)
return clientProof64
}
func (c *Client) serverSignature() []byte {
mac := hmac.New(c.newHash, c.saltedPass)
mac.Write([]byte("Server Key"))
serverKey := mac.Sum(nil)
mac = hmac.New(c.newHash, serverKey)
mac.Write(c.authMsg.Bytes())
serverSignature := mac.Sum(nil)
encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
b64.Encode(encoded, serverSignature)
return encoded
}

View File

@@ -0,0 +1,67 @@
package scram_test
import (
"crypto/sha1"
"testing"
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2/internal/scram"
"strings"
)
var _ = Suite(&S{})
func Test(t *testing.T) { TestingT(t) }
type S struct{}
var tests = [][]string{{
"U: user pencil",
"N: fyko+d2lbbFgONRv9qkxdawL",
"C: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL",
"S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096",
"C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=",
"S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ=",
}, {
"U: root fe8c89e308ec08763df36333cbf5d3a2",
"N: OTcxNDk5NjM2MzE5",
"C: n,,n=root,r=OTcxNDk5NjM2MzE5",
"S: r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,s=XRDkVrFC9JuL7/F4tG0acQ==,i=10000",
"C: c=biws,r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,p=6y1jp9R7ETyouTXS9fW9k5UHdBc=",
"S: v=LBnd9dUJRxdqZiEq91NKP3z/bHA=",
}}
func (s *S) TestExamples(c *C) {
for _, steps := range tests {
if len(steps) < 2 || len(steps[0]) < 3 || !strings.HasPrefix(steps[0], "U: ") {
c.Fatalf("Invalid test: %#v", steps)
}
auth := strings.Fields(steps[0][3:])
client := scram.NewClient(sha1.New, auth[0], auth[1])
first, done := true, false
c.Logf("-----")
c.Logf("%s", steps[0])
for _, step := range steps[1:] {
c.Logf("%s", step)
switch step[:3] {
case "N: ":
client.SetNonce([]byte(step[3:]))
case "C: ":
if first {
first = false
done = client.Step(nil)
}
c.Assert(done, Equals, false)
c.Assert(client.Err(), IsNil)
c.Assert(string(client.Out()), Equals, step[3:])
case "S: ":
first = false
done = client.Step([]byte(step[3:]))
default:
panic("invalid test line: " + step)
}
}
c.Assert(done, Equals, true)
c.Assert(client.Err(), IsNil)
}
}

133
Godeps/_workspace/src/gopkg.in/mgo.v2/log.go generated vendored Normal file
View File

@@ -0,0 +1,133 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"fmt"
"sync"
)
// ---------------------------------------------------------------------------
// Logging integration.
// Avoid importing the log type information unnecessarily. There's a small cost
// associated with using an interface rather than the type. Depending on how
// often the logger is plugged in, it would be worth using the type instead.
type log_Logger interface {
Output(calldepth int, s string) error
}
var (
globalLogger log_Logger
globalDebug bool
globalMutex sync.Mutex
)
// RACE WARNING: There are known data races when logging, which are manually
// silenced when the race detector is in use. These data races won't be
// observed in typical use, because logging is supposed to be set up once when
// the application starts. Having raceDetector as a constant, the compiler
// should elide the locks altogether in actual use.
// Specify the *log.Logger object where log messages should be sent to.
func SetLogger(logger log_Logger) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
globalLogger = logger
}
// Enable the delivery of debug messages to the logger. Only meaningful
// if a logger is also set.
func SetDebug(debug bool) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
globalDebug = debug
}
func log(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalLogger != nil {
globalLogger.Output(2, fmt.Sprint(v...))
}
}
func logln(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalLogger != nil {
globalLogger.Output(2, fmt.Sprintln(v...))
}
}
func logf(format string, v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalLogger != nil {
globalLogger.Output(2, fmt.Sprintf(format, v...))
}
}
func debug(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalDebug && globalLogger != nil {
globalLogger.Output(2, fmt.Sprint(v...))
}
}
func debugln(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalDebug && globalLogger != nil {
globalLogger.Output(2, fmt.Sprintln(v...))
}
}
func debugf(format string, v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalDebug && globalLogger != nil {
globalLogger.Output(2, fmt.Sprintf(format, v...))
}
}

91
Godeps/_workspace/src/gopkg.in/mgo.v2/queue.go generated vendored Normal file
View File

@@ -0,0 +1,91 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
type queue struct {
elems []interface{}
nelems, popi, pushi int
}
func (q *queue) Len() int {
return q.nelems
}
func (q *queue) Push(elem interface{}) {
//debugf("Pushing(pushi=%d popi=%d cap=%d): %#v\n",
// q.pushi, q.popi, len(q.elems), elem)
if q.nelems == len(q.elems) {
q.expand()
}
q.elems[q.pushi] = elem
q.nelems++
q.pushi = (q.pushi + 1) % len(q.elems)
//debugf(" Pushed(pushi=%d popi=%d cap=%d): %#v\n",
// q.pushi, q.popi, len(q.elems), elem)
}
func (q *queue) Pop() (elem interface{}) {
//debugf("Popping(pushi=%d popi=%d cap=%d)\n",
// q.pushi, q.popi, len(q.elems))
if q.nelems == 0 {
return nil
}
elem = q.elems[q.popi]
q.elems[q.popi] = nil // Help GC.
q.nelems--
q.popi = (q.popi + 1) % len(q.elems)
//debugf(" Popped(pushi=%d popi=%d cap=%d): %#v\n",
// q.pushi, q.popi, len(q.elems), elem)
return elem
}
func (q *queue) expand() {
curcap := len(q.elems)
var newcap int
if curcap == 0 {
newcap = 8
} else if curcap < 1024 {
newcap = curcap * 2
} else {
newcap = curcap + (curcap / 4)
}
elems := make([]interface{}, newcap)
if q.popi == 0 {
copy(elems, q.elems)
q.pushi = curcap
} else {
newpopi := newcap - (curcap - q.popi)
copy(elems, q.elems[:q.popi])
copy(elems[newpopi:], q.elems[q.popi:])
q.popi = newpopi
}
for i := range q.elems {
q.elems[i] = nil // Help GC.
}
q.elems = elems
}

101
Godeps/_workspace/src/gopkg.in/mgo.v2/queue_test.go generated vendored Normal file
View File

@@ -0,0 +1,101 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
. "gopkg.in/check.v1"
)
type QS struct{}
var _ = Suite(&QS{})
func (s *QS) TestSequentialGrowth(c *C) {
q := queue{}
n := 2048
for i := 0; i != n; i++ {
q.Push(i)
}
for i := 0; i != n; i++ {
c.Assert(q.Pop(), Equals, i)
}
}
var queueTestLists = [][]int{
// {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
// {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
{0, 1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11},
// {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
{0, 1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11},
// {0, 1, 2, 3, 4, 5, 6, 7, 8}
{0, 1, 2, 3, 4, 5, 6, 7, 8,
-1, -1, -1, -1, -1, -1, -1, -1, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8},
}
func (s *QS) TestQueueTestLists(c *C) {
test := []int{}
testi := 0
reset := func() {
test = test[0:0]
testi = 0
}
push := func(i int) {
test = append(test, i)
}
pop := func() (i int) {
if testi == len(test) {
return -1
}
i = test[testi]
testi++
return
}
for _, list := range queueTestLists {
reset()
q := queue{}
for _, n := range list {
if n == -1 {
c.Assert(q.Pop(), Equals, pop(), Commentf("With list %#v", list))
} else {
q.Push(n)
push(n)
}
}
for n := pop(); n != -1; n = pop() {
c.Assert(q.Pop(), Equals, n, Commentf("With list %#v", list))
}
c.Assert(q.Pop(), Equals, nil, Commentf("With list %#v", list))
}
}

5
Godeps/_workspace/src/gopkg.in/mgo.v2/raceoff.go generated vendored Normal file
View File

@@ -0,0 +1,5 @@
// +build !race
package mgo
const raceDetector = false

5
Godeps/_workspace/src/gopkg.in/mgo.v2/raceon.go generated vendored Normal file
View File

@@ -0,0 +1,5 @@
// +build race
package mgo
const raceDetector = true

77
Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl.c generated vendored Normal file
View File

@@ -0,0 +1,77 @@
// +build !windows
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sasl/sasl.h>
static int mgo_sasl_simple(void *context, int id, const char **result, unsigned int *len)
{
if (!result) {
return SASL_BADPARAM;
}
switch (id) {
case SASL_CB_USER:
*result = (char *)context;
break;
case SASL_CB_AUTHNAME:
*result = (char *)context;
break;
case SASL_CB_LANGUAGE:
*result = NULL;
break;
default:
return SASL_BADPARAM;
}
if (len) {
*len = *result ? strlen(*result) : 0;
}
return SASL_OK;
}
typedef int (*callback)(void);
static int mgo_sasl_secret(sasl_conn_t *conn, void *context, int id, sasl_secret_t **result)
{
if (!conn || !result || id != SASL_CB_PASS) {
return SASL_BADPARAM;
}
*result = (sasl_secret_t *)context;
return SASL_OK;
}
sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password)
{
sasl_callback_t *cb = malloc(4 * sizeof(sasl_callback_t));
int n = 0;
size_t len = strlen(password);
sasl_secret_t *secret = (sasl_secret_t*)malloc(sizeof(sasl_secret_t) + len);
if (!secret) {
free(cb);
return NULL;
}
strcpy((char *)secret->data, password);
secret->len = len;
cb[n].id = SASL_CB_PASS;
cb[n].proc = (callback)&mgo_sasl_secret;
cb[n].context = secret;
n++;
cb[n].id = SASL_CB_USER;
cb[n].proc = (callback)&mgo_sasl_simple;
cb[n].context = (char*)username;
n++;
cb[n].id = SASL_CB_AUTHNAME;
cb[n].proc = (callback)&mgo_sasl_simple;
cb[n].context = (char*)username;
n++;
cb[n].id = SASL_CB_LIST_END;
cb[n].proc = NULL;
cb[n].context = NULL;
return cb;
}

138
Godeps/_workspace/src/gopkg.in/mgo.v2/sasl/sasl.go generated vendored Normal file
View File

@@ -0,0 +1,138 @@
// Package sasl is an implementation detail of the mgo package.
//
// This package is not meant to be used by itself.
//
// +build !windows
package sasl
// #cgo LDFLAGS: -lsasl2
//
// struct sasl_conn {};
//
// #include <stdlib.h>
// #include <sasl/sasl.h>
//
// sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password);
//
import "C"
import (
"fmt"
"strings"
"sync"
"unsafe"
)
type saslStepper interface {
Step(serverData []byte) (clientData []byte, done bool, err error)
Close()
}
type saslSession struct {
conn *C.sasl_conn_t
step int
mech string
cstrings []*C.char
callbacks *C.sasl_callback_t
}
var initError error
var initOnce sync.Once
func initSASL() {
rc := C.sasl_client_init(nil)
if rc != C.SASL_OK {
initError = saslError(rc, nil, "cannot initialize SASL library")
}
}
func New(username, password, mechanism, service, host string) (saslStepper, error) {
initOnce.Do(initSASL)
if initError != nil {
return nil, initError
}
ss := &saslSession{mech: mechanism}
if service == "" {
service = "mongodb"
}
if i := strings.Index(host, ":"); i >= 0 {
host = host[:i]
}
ss.callbacks = C.mgo_sasl_callbacks(ss.cstr(username), ss.cstr(password))
rc := C.sasl_client_new(ss.cstr(service), ss.cstr(host), nil, nil, ss.callbacks, 0, &ss.conn)
if rc != C.SASL_OK {
ss.Close()
return nil, saslError(rc, nil, "cannot create new SASL client")
}
return ss, nil
}
func (ss *saslSession) cstr(s string) *C.char {
cstr := C.CString(s)
ss.cstrings = append(ss.cstrings, cstr)
return cstr
}
func (ss *saslSession) Close() {
for _, cstr := range ss.cstrings {
C.free(unsafe.Pointer(cstr))
}
ss.cstrings = nil
if ss.callbacks != nil {
C.free(unsafe.Pointer(ss.callbacks))
}
// The documentation of SASL dispose makes it clear that this should only
// be done when the connection is done, not when the authentication phase
// is done, because an encryption layer may have been negotiated.
// Even then, we'll do this for now, because it's simpler and prevents
// keeping track of this state for every socket. If it breaks, we'll fix it.
C.sasl_dispose(&ss.conn)
}
func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
ss.step++
if ss.step > 10 {
return nil, false, fmt.Errorf("too many SASL steps without authentication")
}
var cclientData *C.char
var cclientDataLen C.uint
var rc C.int
if ss.step == 1 {
var mechanism *C.char // ignored - must match cred
rc = C.sasl_client_start(ss.conn, ss.cstr(ss.mech), nil, &cclientData, &cclientDataLen, &mechanism)
} else {
var cserverData *C.char
var cserverDataLen C.uint
if len(serverData) > 0 {
cserverData = (*C.char)(unsafe.Pointer(&serverData[0]))
cserverDataLen = C.uint(len(serverData))
}
rc = C.sasl_client_step(ss.conn, cserverData, cserverDataLen, nil, &cclientData, &cclientDataLen)
}
if cclientData != nil && cclientDataLen > 0 {
clientData = C.GoBytes(unsafe.Pointer(cclientData), C.int(cclientDataLen))
}
if rc == C.SASL_OK {
return clientData, true, nil
}
if rc == C.SASL_CONTINUE {
return clientData, false, nil
}
return nil, false, saslError(rc, ss.conn, "cannot establish SASL session")
}
func saslError(rc C.int, conn *C.sasl_conn_t, msg string) error {
var detail string
if conn == nil {
detail = C.GoString(C.sasl_errstring(rc, nil, nil))
} else {
detail = C.GoString(C.sasl_errdetail(conn))
}
return fmt.Errorf(msg + ": " + detail)
}

View File

@@ -0,0 +1,118 @@
#include "sasl_windows.h"
static const LPSTR SSPI_PACKAGE_NAME = "kerberos";
SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle *cred_handle, char *username, char *password, char *domain)
{
SEC_WINNT_AUTH_IDENTITY auth_identity;
SECURITY_INTEGER ignored;
auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
auth_identity.User = (LPSTR) username;
auth_identity.UserLength = strlen(username);
auth_identity.Password = (LPSTR) password;
auth_identity.PasswordLength = strlen(password);
auth_identity.Domain = (LPSTR) domain;
auth_identity.DomainLength = strlen(domain);
return call_sspi_acquire_credentials_handle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, cred_handle, &ignored);
}
int sspi_step(CredHandle *cred_handle, int has_context, CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *target)
{
SecBufferDesc inbuf;
SecBuffer in_bufs[1];
SecBufferDesc outbuf;
SecBuffer out_bufs[1];
if (has_context > 0) {
// If we already have a context, we now have data to send.
// Put this data in an inbuf.
inbuf.ulVersion = SECBUFFER_VERSION;
inbuf.cBuffers = 1;
inbuf.pBuffers = in_bufs;
in_bufs[0].pvBuffer = *buffer;
in_bufs[0].cbBuffer = *buffer_length;
in_bufs[0].BufferType = SECBUFFER_TOKEN;
}
outbuf.ulVersion = SECBUFFER_VERSION;
outbuf.cBuffers = 1;
outbuf.pBuffers = out_bufs;
out_bufs[0].pvBuffer = NULL;
out_bufs[0].cbBuffer = 0;
out_bufs[0].BufferType = SECBUFFER_TOKEN;
ULONG context_attr = 0;
int ret = call_sspi_initialize_security_context(cred_handle,
has_context > 0 ? context : NULL,
(LPSTR) target,
ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH,
0,
SECURITY_NETWORK_DREP,
has_context > 0 ? &inbuf : NULL,
0,
context,
&outbuf,
&context_attr,
NULL);
*buffer = malloc(out_bufs[0].cbBuffer);
*buffer_length = out_bufs[0].cbBuffer;
memcpy(*buffer, out_bufs[0].pvBuffer, *buffer_length);
return ret;
}
int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *user_plus_realm)
{
SecPkgContext_Sizes sizes;
SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes);
if (status != SEC_E_OK) {
return status;
}
size_t user_plus_realm_length = strlen(user_plus_realm);
int msgSize = 4 + user_plus_realm_length;
char *msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char));
msg[sizes.cbSecurityTrailer + 0] = 1;
msg[sizes.cbSecurityTrailer + 1] = 0;
msg[sizes.cbSecurityTrailer + 2] = 0;
msg[sizes.cbSecurityTrailer + 3] = 0;
memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, user_plus_realm_length);
SecBuffer wrapBufs[3];
SecBufferDesc wrapBufDesc;
wrapBufDesc.cBuffers = 3;
wrapBufDesc.pBuffers = wrapBufs;
wrapBufDesc.ulVersion = SECBUFFER_VERSION;
wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer;
wrapBufs[0].BufferType = SECBUFFER_TOKEN;
wrapBufs[0].pvBuffer = msg;
wrapBufs[1].cbBuffer = msgSize;
wrapBufs[1].BufferType = SECBUFFER_DATA;
wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer;
wrapBufs[2].cbBuffer = sizes.cbBlockSize;
wrapBufs[2].BufferType = SECBUFFER_PADDING;
wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize;
status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0);
if (status != SEC_E_OK) {
free(msg);
return status;
}
*buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer;
*buffer = malloc(*buffer_length);
memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer);
memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer);
memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer);
free(msg);
return SEC_E_OK;
}

View File

@@ -0,0 +1,140 @@
package sasl
// #include "sasl_windows.h"
import "C"
import (
"fmt"
"strings"
"sync"
"unsafe"
)
type saslStepper interface {
Step(serverData []byte) (clientData []byte, done bool, err error)
Close()
}
type saslSession struct {
// Credentials
mech string
service string
host string
userPlusRealm string
target string
domain string
// Internal state
authComplete bool
errored bool
step int
// C internal state
credHandle C.CredHandle
context C.CtxtHandle
hasContext C.int
// Keep track of pointers we need to explicitly free
stringsToFree []*C.char
}
var initError error
var initOnce sync.Once
func initSSPI() {
rc := C.load_secur32_dll()
if rc != 0 {
initError = fmt.Errorf("Error loading libraries: %v", rc)
}
}
func New(username, password, mechanism, service, host string) (saslStepper, error) {
initOnce.Do(initSSPI)
ss := &saslSession{mech: mechanism, hasContext: 0, userPlusRealm: username}
if service == "" {
service = "mongodb"
}
if i := strings.Index(host, ":"); i >= 0 {
host = host[:i]
}
ss.service = service
ss.host = host
usernameComponents := strings.Split(username, "@")
if len(usernameComponents) < 2 {
return nil, fmt.Errorf("Username '%v' doesn't contain a realm!", username)
}
user := usernameComponents[0]
ss.domain = usernameComponents[1]
ss.target = fmt.Sprintf("%s/%s", ss.service, ss.host)
var status C.SECURITY_STATUS
// Step 0: call AcquireCredentialsHandle to get a nice SSPI CredHandle
if len(password) > 0 {
status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(ss.domain))
} else {
status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain))
}
if status != C.SEC_E_OK {
ss.errored = true
return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status)
}
return ss, nil
}
func (ss *saslSession) cstr(s string) *C.char {
cstr := C.CString(s)
ss.stringsToFree = append(ss.stringsToFree, cstr)
return cstr
}
func (ss *saslSession) Close() {
for _, cstr := range ss.stringsToFree {
C.free(unsafe.Pointer(cstr))
}
}
func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
ss.step++
if ss.step > 10 {
return nil, false, fmt.Errorf("too many SSPI steps without authentication")
}
var buffer C.PVOID
var bufferLength C.ULONG
if len(serverData) > 0 {
buffer = (C.PVOID)(unsafe.Pointer(&serverData[0]))
bufferLength = C.ULONG(len(serverData))
}
var status C.int
if ss.authComplete {
// Step 3: last bit of magic to use the correct server credentials
status = C.sspi_send_client_authz_id(&ss.context, &buffer, &bufferLength, ss.cstr(ss.userPlusRealm))
} else {
// Step 1 + Step 2: set up security context with the server and TGT
status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(ss.target))
}
if buffer != C.PVOID(nil) {
defer C.free(unsafe.Pointer(buffer))
}
if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED {
ss.errored = true
return nil, false, ss.handleSSPIErrorCode(status)
}
clientData = C.GoBytes(unsafe.Pointer(buffer), C.int(bufferLength))
if status == C.SEC_E_OK {
ss.authComplete = true
return clientData, true, nil
} else {
ss.hasContext = 1
return clientData, false, nil
}
}
func (ss *saslSession) handleSSPIErrorCode(code C.int) error {
switch {
case code == C.SEC_E_TARGET_UNKNOWN:
return fmt.Errorf("Target %v@%v not found", ss.target, ss.domain)
}
return fmt.Errorf("Unknown error doing step %v, error code %v", ss.step, code)
}

View File

@@ -0,0 +1,7 @@
#include <windows.h>
#include "sspi_windows.h"
SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain);
int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target);
int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm);

View File

@@ -0,0 +1,96 @@
// Code adapted from the NodeJS kerberos library:
//
// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c
//
// Under the terms of the Apache License, Version 2.0:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
#include <stdlib.h>
#include "sspi_windows.h"
static HINSTANCE sspi_secur32_dll = NULL;
int load_secur32_dll()
{
sspi_secur32_dll = LoadLibrary("secur32.dll");
if (sspi_secur32_dll == NULL) {
return GetLastError();
}
return 0;
}
SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo)
{
if (sspi_secur32_dll == NULL) {
return -1;
}
encryptMessage_fn pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(sspi_secur32_dll, "EncryptMessage");
if (!pfn_encryptMessage) {
return -2;
}
return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo);
}
SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
PCredHandle phCredential, PTimeStamp ptsExpiry)
{
if (sspi_secur32_dll == NULL) {
return -1;
}
acquireCredentialsHandle_fn pfn_acquireCredentialsHandle;
#ifdef _UNICODE
pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleW");
#else
pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleA");
#endif
if (!pfn_acquireCredentialsHandle) {
return -2;
}
return (*pfn_acquireCredentialsHandle)(
pszPrincipal, pszPackage, fCredentialUse, pvLogonId, pAuthData,
pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
}
SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName,
unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep,
PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext,
PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry)
{
if (sspi_secur32_dll == NULL) {
return -1;
}
initializeSecurityContext_fn pfn_initializeSecurityContext;
#ifdef _UNICODE
pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextW");
#else
pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextA");
#endif
if (!pfn_initializeSecurityContext) {
return -2;
}
return (*pfn_initializeSecurityContext)(
phCredential, phContext, pszTargetName, fContextReq, Reserved1, TargetDataRep,
pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry);
}
SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer)
{
if (sspi_secur32_dll == NULL) {
return -1;
}
queryContextAttributes_fn pfn_queryContextAttributes;
#ifdef _UNICODE
pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesW");
#else
pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesA");
#endif
if (!pfn_queryContextAttributes) {
return -2;
}
return (*pfn_queryContextAttributes)(phContext, ulAttribute, pBuffer);
}

View File

@@ -0,0 +1,70 @@
// Code adapted from the NodeJS kerberos library:
//
// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h
//
// Under the terms of the Apache License, Version 2.0:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
#ifndef SSPI_WINDOWS_H
#define SSPI_WINDOWS_H
#define SECURITY_WIN32 1
#include <windows.h>
#include <sspi.h>
int load_secur32_dll();
SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo);
typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo);
SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
LPSTR pszPrincipal, // Name of principal
LPSTR pszPackage, // Name of package
unsigned long fCredentialUse, // Flags indicating use
void *pvLogonId, // Pointer to logon ID
void *pAuthData, // Package specific data
SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func
void *pvGetKeyArgument, // Value to pass to GetKey()
PCredHandle phCredential, // (out) Cred Handle
PTimeStamp ptsExpiry // (out) Lifetime (optional)
);
typedef DWORD (WINAPI *acquireCredentialsHandle_fn)(
LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
PCredHandle phCredential, PTimeStamp ptsExpiry
);
SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
PCredHandle phCredential, // Cred to base context
PCtxtHandle phContext, // Existing context (OPT)
LPSTR pszTargetName, // Name of target
unsigned long fContextReq, // Context Requirements
unsigned long Reserved1, // Reserved, MBZ
unsigned long TargetDataRep, // Data rep of target
PSecBufferDesc pInput, // Input Buffers
unsigned long Reserved2, // Reserved, MBZ
PCtxtHandle phNewContext, // (out) New Context handle
PSecBufferDesc pOutput, // (inout) Output Buffers
unsigned long *pfContextAttr, // (out) Context attrs
PTimeStamp ptsExpiry // (out) Life span (OPT)
);
typedef DWORD (WINAPI *initializeSecurityContext_fn)(
PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq,
unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2,
PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry);
SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(
PCtxtHandle phContext, // Context to query
unsigned long ulAttribute, // Attribute to query
void *pBuffer // Buffer for attributes
);
typedef DWORD (WINAPI *queryContextAttributes_fn)(
PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer);
#endif // SSPI_WINDOWS_H

11
Godeps/_workspace/src/gopkg.in/mgo.v2/saslimpl.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
//+build sasl
package mgo
import (
"gopkg.in/mgo.v2/sasl"
)
func saslNew(cred Credential, host string) (saslStepper, error) {
return sasl.New(cred.Username, cred.Password, cred.Mechanism, cred.Service, host)
}

11
Godeps/_workspace/src/gopkg.in/mgo.v2/saslstub.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
//+build !sasl
package mgo
import (
"fmt"
)
func saslNew(cred Credential, host string) (saslStepper, error) {
return nil, fmt.Errorf("SASL support not enabled during build (-tags sasl)")
}

447
Godeps/_workspace/src/gopkg.in/mgo.v2/server.go generated vendored Normal file
View File

@@ -0,0 +1,447 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"net"
"sort"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
// ---------------------------------------------------------------------------
// Mongo server encapsulation.
type mongoServer struct {
sync.RWMutex
Addr string
ResolvedAddr string
tcpaddr *net.TCPAddr
unusedSockets []*mongoSocket
liveSockets []*mongoSocket
closed bool
abended bool
sync chan bool
dial dialer
pingValue time.Duration
pingIndex int
pingCount uint32
pingWindow [6]time.Duration
info *mongoServerInfo
}
type dialer struct {
old func(addr net.Addr) (net.Conn, error)
new func(addr *ServerAddr) (net.Conn, error)
}
func (dial dialer) isSet() bool {
return dial.old != nil || dial.new != nil
}
type mongoServerInfo struct {
Master bool
Mongos bool
Tags bson.D
MaxWireVersion int
}
var defaultServerInfo mongoServerInfo
func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) *mongoServer {
server := &mongoServer{
Addr: addr,
ResolvedAddr: tcpaddr.String(),
tcpaddr: tcpaddr,
sync: sync,
dial: dial,
info: &defaultServerInfo,
}
// Once so the server gets a ping value, then loop in background.
server.pinger(false)
go server.pinger(true)
return server
}
var errPoolLimit = errors.New("per-server connection limit reached")
var errServerClosed = errors.New("server was closed")
// AcquireSocket returns a socket for communicating with the server.
// This will attempt to reuse an old connection, if one is available. Otherwise,
// it will establish a new one. The returned socket is owned by the call site,
// and will return to the cache when the socket has its Release method called
// the same number of times as AcquireSocket + Acquire were called for it.
// If the poolLimit argument is greater than zero and the number of sockets in
// use in this server is greater than the provided limit, errPoolLimit is
// returned.
func (server *mongoServer) AcquireSocket(poolLimit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) {
for {
server.Lock()
abended = server.abended
if server.closed {
server.Unlock()
return nil, abended, errServerClosed
}
n := len(server.unusedSockets)
if poolLimit > 0 && len(server.liveSockets)-n >= poolLimit {
server.Unlock()
return nil, false, errPoolLimit
}
if n > 0 {
socket = server.unusedSockets[n-1]
server.unusedSockets[n-1] = nil // Help GC.
server.unusedSockets = server.unusedSockets[:n-1]
info := server.info
server.Unlock()
err = socket.InitialAcquire(info, timeout)
if err != nil {
continue
}
} else {
server.Unlock()
socket, err = server.Connect(timeout)
if err == nil {
server.Lock()
// We've waited for the Connect, see if we got
// closed in the meantime
if server.closed {
server.Unlock()
socket.Release()
socket.Close()
return nil, abended, errServerClosed
}
server.liveSockets = append(server.liveSockets, socket)
server.Unlock()
}
}
return
}
panic("unreachable")
}
// Connect establishes a new connection to the server. This should
// generally be done through server.AcquireSocket().
func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) {
server.RLock()
master := server.info.Master
dial := server.dial
server.RUnlock()
logf("Establishing new connection to %s (timeout=%s)...", server.Addr, timeout)
var conn net.Conn
var err error
switch {
case !dial.isSet():
// Cannot do this because it lacks timeout support. :-(
//conn, err = net.DialTCP("tcp", nil, server.tcpaddr)
conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout)
case dial.old != nil:
conn, err = dial.old(server.tcpaddr)
case dial.new != nil:
conn, err = dial.new(&ServerAddr{server.Addr, server.tcpaddr})
default:
panic("dialer is set, but both dial.old and dial.new are nil")
}
if err != nil {
logf("Connection to %s failed: %v", server.Addr, err.Error())
return nil, err
}
logf("Connection to %s established.", server.Addr)
stats.conn(+1, master)
return newSocket(server, conn, timeout), nil
}
// Close forces closing all sockets that are alive, whether
// they're currently in use or not.
func (server *mongoServer) Close() {
server.Lock()
server.closed = true
liveSockets := server.liveSockets
unusedSockets := server.unusedSockets
server.liveSockets = nil
server.unusedSockets = nil
server.Unlock()
logf("Connections to %s closing (%d live sockets).", server.Addr, len(liveSockets))
for i, s := range liveSockets {
s.Close()
liveSockets[i] = nil
}
for i := range unusedSockets {
unusedSockets[i] = nil
}
}
// RecycleSocket puts socket back into the unused cache.
func (server *mongoServer) RecycleSocket(socket *mongoSocket) {
server.Lock()
if !server.closed {
server.unusedSockets = append(server.unusedSockets, socket)
}
server.Unlock()
}
func removeSocket(sockets []*mongoSocket, socket *mongoSocket) []*mongoSocket {
for i, s := range sockets {
if s == socket {
copy(sockets[i:], sockets[i+1:])
n := len(sockets) - 1
sockets[n] = nil
sockets = sockets[:n]
break
}
}
return sockets
}
// AbendSocket notifies the server that the given socket has terminated
// abnormally, and thus should be discarded rather than cached.
func (server *mongoServer) AbendSocket(socket *mongoSocket) {
server.Lock()
server.abended = true
if server.closed {
server.Unlock()
return
}
server.liveSockets = removeSocket(server.liveSockets, socket)
server.unusedSockets = removeSocket(server.unusedSockets, socket)
server.Unlock()
// Maybe just a timeout, but suggest a cluster sync up just in case.
select {
case server.sync <- true:
default:
}
}
func (server *mongoServer) SetInfo(info *mongoServerInfo) {
server.Lock()
server.info = info
server.Unlock()
}
func (server *mongoServer) Info() *mongoServerInfo {
server.Lock()
info := server.info
server.Unlock()
return info
}
func (server *mongoServer) hasTags(serverTags []bson.D) bool {
NextTagSet:
for _, tags := range serverTags {
NextReqTag:
for _, req := range tags {
for _, has := range server.info.Tags {
if req.Name == has.Name {
if req.Value == has.Value {
continue NextReqTag
}
continue NextTagSet
}
}
continue NextTagSet
}
return true
}
return false
}
var pingDelay = 5 * time.Second
func (server *mongoServer) pinger(loop bool) {
var delay time.Duration
if raceDetector {
// This variable is only ever touched by tests.
globalMutex.Lock()
delay = pingDelay
globalMutex.Unlock()
} else {
delay = pingDelay
}
op := queryOp{
collection: "admin.$cmd",
query: bson.D{{"ping", 1}},
flags: flagSlaveOk,
limit: -1,
}
for {
if loop {
time.Sleep(delay)
}
op := op
socket, _, err := server.AcquireSocket(0, 3*delay)
if err == nil {
start := time.Now()
_, _ = socket.SimpleQuery(&op)
delay := time.Now().Sub(start)
server.pingWindow[server.pingIndex] = delay
server.pingIndex = (server.pingIndex + 1) % len(server.pingWindow)
server.pingCount++
var max time.Duration
for i := 0; i < len(server.pingWindow) && uint32(i) < server.pingCount; i++ {
if server.pingWindow[i] > max {
max = server.pingWindow[i]
}
}
socket.Release()
server.Lock()
if server.closed {
loop = false
}
server.pingValue = max
server.Unlock()
logf("Ping for %s is %d ms", server.Addr, max/time.Millisecond)
} else if err == errServerClosed {
return
}
if !loop {
return
}
}
}
type mongoServerSlice []*mongoServer
func (s mongoServerSlice) Len() int {
return len(s)
}
func (s mongoServerSlice) Less(i, j int) bool {
return s[i].ResolvedAddr < s[j].ResolvedAddr
}
func (s mongoServerSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s mongoServerSlice) Sort() {
sort.Sort(s)
}
func (s mongoServerSlice) Search(resolvedAddr string) (i int, ok bool) {
n := len(s)
i = sort.Search(n, func(i int) bool {
return s[i].ResolvedAddr >= resolvedAddr
})
return i, i != n && s[i].ResolvedAddr == resolvedAddr
}
type mongoServers struct {
slice mongoServerSlice
}
func (servers *mongoServers) Search(resolvedAddr string) (server *mongoServer) {
if i, ok := servers.slice.Search(resolvedAddr); ok {
return servers.slice[i]
}
return nil
}
func (servers *mongoServers) Add(server *mongoServer) {
servers.slice = append(servers.slice, server)
servers.slice.Sort()
}
func (servers *mongoServers) Remove(other *mongoServer) (server *mongoServer) {
if i, found := servers.slice.Search(other.ResolvedAddr); found {
server = servers.slice[i]
copy(servers.slice[i:], servers.slice[i+1:])
n := len(servers.slice) - 1
servers.slice[n] = nil // Help GC.
servers.slice = servers.slice[:n]
}
return
}
func (servers *mongoServers) Slice() []*mongoServer {
return ([]*mongoServer)(servers.slice)
}
func (servers *mongoServers) Get(i int) *mongoServer {
return servers.slice[i]
}
func (servers *mongoServers) Len() int {
return len(servers.slice)
}
func (servers *mongoServers) Empty() bool {
return len(servers.slice) == 0
}
// BestFit returns the best guess of what would be the most interesting
// server to perform operations on at this point in time.
func (servers *mongoServers) BestFit(serverTags []bson.D) *mongoServer {
var best *mongoServer
for _, next := range servers.slice {
if best == nil {
best = next
best.RLock()
if serverTags != nil && !next.info.Mongos && !best.hasTags(serverTags) {
best.RUnlock()
best = nil
}
continue
}
next.RLock()
swap := false
switch {
case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags):
// Must have requested tags.
case next.info.Master != best.info.Master:
// Prefer slaves.
swap = best.info.Master
case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond:
// Prefer nearest server.
swap = next.pingValue < best.pingValue
case len(next.liveSockets)-len(next.unusedSockets) < len(best.liveSockets)-len(best.unusedSockets):
// Prefer servers with less connections.
swap = true
}
if swap {
best.RUnlock()
best = next
} else {
next.RUnlock()
}
}
if best != nil {
best.RUnlock()
}
return best
}
func absDuration(d time.Duration) time.Duration {
if d < 0 {
return -d
}
return d
}

3867
Godeps/_workspace/src/gopkg.in/mgo.v2/session.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

3484
Godeps/_workspace/src/gopkg.in/mgo.v2/session_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

675
Godeps/_workspace/src/gopkg.in/mgo.v2/socket.go generated vendored Normal file
View File

@@ -0,0 +1,675 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"net"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
type replyFunc func(err error, reply *replyOp, docNum int, docData []byte)
type mongoSocket struct {
sync.Mutex
server *mongoServer // nil when cached
conn net.Conn
timeout time.Duration
addr string // For debugging only.
nextRequestId uint32
replyFuncs map[uint32]replyFunc
references int
creds []Credential
logout []Credential
cachedNonce string
gotNonce sync.Cond
dead error
serverInfo *mongoServerInfo
}
type queryOpFlags uint32
const (
_ queryOpFlags = 1 << iota
flagTailable
flagSlaveOk
flagLogReplay
flagNoCursorTimeout
flagAwaitData
)
type queryOp struct {
collection string
query interface{}
skip int32
limit int32
selector interface{}
flags queryOpFlags
replyFunc replyFunc
options queryWrapper
hasOptions bool
serverTags []bson.D
}
type queryWrapper struct {
Query interface{} "$query"
OrderBy interface{} "$orderby,omitempty"
Hint interface{} "$hint,omitempty"
Explain bool "$explain,omitempty"
Snapshot bool "$snapshot,omitempty"
ReadPreference bson.D "$readPreference,omitempty"
MaxScan int "$maxScan,omitempty"
}
func (op *queryOp) finalQuery(socket *mongoSocket) interface{} {
if op.flags&flagSlaveOk != 0 && len(op.serverTags) > 0 && socket.ServerInfo().Mongos {
op.hasOptions = true
op.options.ReadPreference = bson.D{{"mode", "secondaryPreferred"}, {"tags", op.serverTags}}
}
if op.hasOptions {
if op.query == nil {
var empty bson.D
op.options.Query = empty
} else {
op.options.Query = op.query
}
debugf("final query is %#v\n", &op.options)
return &op.options
}
return op.query
}
type getMoreOp struct {
collection string
limit int32
cursorId int64
replyFunc replyFunc
}
type replyOp struct {
flags uint32
cursorId int64
firstDoc int32
replyDocs int32
}
type insertOp struct {
collection string // "database.collection"
documents []interface{} // One or more documents to insert
flags uint32
}
type updateOp struct {
collection string // "database.collection"
selector interface{}
update interface{}
flags uint32
}
type deleteOp struct {
collection string // "database.collection"
selector interface{}
flags uint32
}
type killCursorsOp struct {
cursorIds []int64
}
type requestInfo struct {
bufferPos int
replyFunc replyFunc
}
func newSocket(server *mongoServer, conn net.Conn, timeout time.Duration) *mongoSocket {
socket := &mongoSocket{
conn: conn,
addr: server.Addr,
server: server,
replyFuncs: make(map[uint32]replyFunc),
}
socket.gotNonce.L = &socket.Mutex
if err := socket.InitialAcquire(server.Info(), timeout); err != nil {
panic("newSocket: InitialAcquire returned error: " + err.Error())
}
stats.socketsAlive(+1)
debugf("Socket %p to %s: initialized", socket, socket.addr)
socket.resetNonce()
go socket.readLoop()
return socket
}
// Server returns the server that the socket is associated with.
// It returns nil while the socket is cached in its respective server.
func (socket *mongoSocket) Server() *mongoServer {
socket.Lock()
server := socket.server
socket.Unlock()
return server
}
// ServerInfo returns details for the server at the time the socket
// was initially acquired.
func (socket *mongoSocket) ServerInfo() *mongoServerInfo {
socket.Lock()
serverInfo := socket.serverInfo
socket.Unlock()
return serverInfo
}
// InitialAcquire obtains the first reference to the socket, either
// right after the connection is made or once a recycled socket is
// being put back in use.
func (socket *mongoSocket) InitialAcquire(serverInfo *mongoServerInfo, timeout time.Duration) error {
socket.Lock()
if socket.references > 0 {
panic("Socket acquired out of cache with references")
}
if socket.dead != nil {
dead := socket.dead
socket.Unlock()
return dead
}
socket.references++
socket.serverInfo = serverInfo
socket.timeout = timeout
stats.socketsInUse(+1)
stats.socketRefs(+1)
socket.Unlock()
return nil
}
// Acquire obtains an additional reference to the socket.
// The socket will only be recycled when it's released as many
// times as it's been acquired.
func (socket *mongoSocket) Acquire() (info *mongoServerInfo) {
socket.Lock()
if socket.references == 0 {
panic("Socket got non-initial acquire with references == 0")
}
// We'll track references to dead sockets as well.
// Caller is still supposed to release the socket.
socket.references++
stats.socketRefs(+1)
serverInfo := socket.serverInfo
socket.Unlock()
return serverInfo
}
// Release decrements a socket reference. The socket will be
// recycled once its released as many times as it's been acquired.
func (socket *mongoSocket) Release() {
socket.Lock()
if socket.references == 0 {
panic("socket.Release() with references == 0")
}
socket.references--
stats.socketRefs(-1)
if socket.references == 0 {
stats.socketsInUse(-1)
server := socket.server
socket.Unlock()
socket.LogoutAll()
// If the socket is dead server is nil.
if server != nil {
server.RecycleSocket(socket)
}
} else {
socket.Unlock()
}
}
// SetTimeout changes the timeout used on socket operations.
func (socket *mongoSocket) SetTimeout(d time.Duration) {
socket.Lock()
socket.timeout = d
socket.Unlock()
}
type deadlineType int
const (
readDeadline deadlineType = 1
writeDeadline deadlineType = 2
)
func (socket *mongoSocket) updateDeadline(which deadlineType) {
var when time.Time
if socket.timeout > 0 {
when = time.Now().Add(socket.timeout)
}
whichstr := ""
switch which {
case readDeadline | writeDeadline:
whichstr = "read/write"
socket.conn.SetDeadline(when)
case readDeadline:
whichstr = "read"
socket.conn.SetReadDeadline(when)
case writeDeadline:
whichstr = "write"
socket.conn.SetWriteDeadline(when)
default:
panic("invalid parameter to updateDeadline")
}
debugf("Socket %p to %s: updated %s deadline to %s ahead (%s)", socket, socket.addr, whichstr, socket.timeout, when)
}
// Close terminates the socket use.
func (socket *mongoSocket) Close() {
socket.kill(errors.New("Closed explicitly"), false)
}
func (socket *mongoSocket) kill(err error, abend bool) {
socket.Lock()
if socket.dead != nil {
debugf("Socket %p to %s: killed again: %s (previously: %s)", socket, socket.addr, err.Error(), socket.dead.Error())
socket.Unlock()
return
}
logf("Socket %p to %s: closing: %s (abend=%v)", socket, socket.addr, err.Error(), abend)
socket.dead = err
socket.conn.Close()
stats.socketsAlive(-1)
replyFuncs := socket.replyFuncs
socket.replyFuncs = make(map[uint32]replyFunc)
server := socket.server
socket.server = nil
socket.gotNonce.Broadcast()
socket.Unlock()
for _, replyFunc := range replyFuncs {
logf("Socket %p to %s: notifying replyFunc of closed socket: %s", socket, socket.addr, err.Error())
replyFunc(err, nil, -1, nil)
}
if abend {
server.AbendSocket(socket)
}
}
func (socket *mongoSocket) SimpleQuery(op *queryOp) (data []byte, err error) {
var wait, change sync.Mutex
var replyDone bool
var replyData []byte
var replyErr error
wait.Lock()
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
change.Lock()
if !replyDone {
replyDone = true
replyErr = err
if err == nil {
replyData = docData
}
}
change.Unlock()
wait.Unlock()
}
err = socket.Query(op)
if err != nil {
return nil, err
}
wait.Lock()
change.Lock()
data = replyData
err = replyErr
change.Unlock()
return data, err
}
func (socket *mongoSocket) Query(ops ...interface{}) (err error) {
if lops := socket.flushLogout(); len(lops) > 0 {
ops = append(lops, ops...)
}
buf := make([]byte, 0, 256)
// Serialize operations synchronously to avoid interrupting
// other goroutines while we can't really be sending data.
// Also, record id positions so that we can compute request
// ids at once later with the lock already held.
requests := make([]requestInfo, len(ops))
requestCount := 0
for _, op := range ops {
debugf("Socket %p to %s: serializing op: %#v", socket, socket.addr, op)
start := len(buf)
var replyFunc replyFunc
switch op := op.(type) {
case *updateOp:
buf = addHeader(buf, 2001)
buf = addInt32(buf, 0) // Reserved
buf = addCString(buf, op.collection)
buf = addInt32(buf, int32(op.flags))
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector)
buf, err = addBSON(buf, op.selector)
if err != nil {
return err
}
debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.update)
buf, err = addBSON(buf, op.update)
if err != nil {
return err
}
case *insertOp:
buf = addHeader(buf, 2002)
buf = addInt32(buf, int32(op.flags))
buf = addCString(buf, op.collection)
for _, doc := range op.documents {
debugf("Socket %p to %s: serializing document for insertion: %#v", socket, socket.addr, doc)
buf, err = addBSON(buf, doc)
if err != nil {
return err
}
}
case *queryOp:
buf = addHeader(buf, 2004)
buf = addInt32(buf, int32(op.flags))
buf = addCString(buf, op.collection)
buf = addInt32(buf, op.skip)
buf = addInt32(buf, op.limit)
buf, err = addBSON(buf, op.finalQuery(socket))
if err != nil {
return err
}
if op.selector != nil {
buf, err = addBSON(buf, op.selector)
if err != nil {
return err
}
}
replyFunc = op.replyFunc
case *getMoreOp:
buf = addHeader(buf, 2005)
buf = addInt32(buf, 0) // Reserved
buf = addCString(buf, op.collection)
buf = addInt32(buf, op.limit)
buf = addInt64(buf, op.cursorId)
replyFunc = op.replyFunc
case *deleteOp:
buf = addHeader(buf, 2006)
buf = addInt32(buf, 0) // Reserved
buf = addCString(buf, op.collection)
buf = addInt32(buf, int32(op.flags))
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector)
buf, err = addBSON(buf, op.selector)
if err != nil {
return err
}
case *killCursorsOp:
buf = addHeader(buf, 2007)
buf = addInt32(buf, 0) // Reserved
buf = addInt32(buf, int32(len(op.cursorIds)))
for _, cursorId := range op.cursorIds {
buf = addInt64(buf, cursorId)
}
default:
panic("internal error: unknown operation type")
}
setInt32(buf, start, int32(len(buf)-start))
if replyFunc != nil {
request := &requests[requestCount]
request.replyFunc = replyFunc
request.bufferPos = start
requestCount++
}
}
// Buffer is ready for the pipe. Lock, allocate ids, and enqueue.
socket.Lock()
if socket.dead != nil {
dead := socket.dead
socket.Unlock()
debugf("Socket %p to %s: failing query, already closed: %s", socket, socket.addr, socket.dead.Error())
// XXX This seems necessary in case the session is closed concurrently
// with a query being performed, but it's not yet tested:
for i := 0; i != requestCount; i++ {
request := &requests[i]
if request.replyFunc != nil {
request.replyFunc(dead, nil, -1, nil)
}
}
return dead
}
wasWaiting := len(socket.replyFuncs) > 0
// Reserve id 0 for requests which should have no responses.
requestId := socket.nextRequestId + 1
if requestId == 0 {
requestId++
}
socket.nextRequestId = requestId + uint32(requestCount)
for i := 0; i != requestCount; i++ {
request := &requests[i]
setInt32(buf, request.bufferPos+4, int32(requestId))
socket.replyFuncs[requestId] = request.replyFunc
requestId++
}
debugf("Socket %p to %s: sending %d op(s) (%d bytes)", socket, socket.addr, len(ops), len(buf))
stats.sentOps(len(ops))
socket.updateDeadline(writeDeadline)
_, err = socket.conn.Write(buf)
if !wasWaiting && requestCount > 0 {
socket.updateDeadline(readDeadline)
}
socket.Unlock()
return err
}
func fill(r net.Conn, b []byte) error {
l := len(b)
n, err := r.Read(b)
for n != l && err == nil {
var ni int
ni, err = r.Read(b[n:])
n += ni
}
return err
}
// Estimated minimum cost per socket: 1 goroutine + memory for the largest
// document ever seen.
func (socket *mongoSocket) readLoop() {
p := make([]byte, 36) // 16 from header + 20 from OP_REPLY fixed fields
s := make([]byte, 4)
conn := socket.conn // No locking, conn never changes.
for {
// XXX Handle timeouts, , etc
err := fill(conn, p)
if err != nil {
socket.kill(err, true)
return
}
totalLen := getInt32(p, 0)
responseTo := getInt32(p, 8)
opCode := getInt32(p, 12)
// Don't use socket.server.Addr here. socket is not
// locked and socket.server may go away.
debugf("Socket %p to %s: got reply (%d bytes)", socket, socket.addr, totalLen)
_ = totalLen
if opCode != 1 {
socket.kill(errors.New("opcode != 1, corrupted data?"), true)
return
}
reply := replyOp{
flags: uint32(getInt32(p, 16)),
cursorId: getInt64(p, 20),
firstDoc: getInt32(p, 28),
replyDocs: getInt32(p, 32),
}
stats.receivedOps(+1)
stats.receivedDocs(int(reply.replyDocs))
socket.Lock()
replyFunc, ok := socket.replyFuncs[uint32(responseTo)]
if ok {
delete(socket.replyFuncs, uint32(responseTo))
}
socket.Unlock()
if replyFunc != nil && reply.replyDocs == 0 {
replyFunc(nil, &reply, -1, nil)
} else {
for i := 0; i != int(reply.replyDocs); i++ {
err := fill(conn, s)
if err != nil {
if replyFunc != nil {
replyFunc(err, nil, -1, nil)
}
socket.kill(err, true)
return
}
b := make([]byte, int(getInt32(s, 0)))
// copy(b, s) in an efficient way.
b[0] = s[0]
b[1] = s[1]
b[2] = s[2]
b[3] = s[3]
err = fill(conn, b[4:])
if err != nil {
if replyFunc != nil {
replyFunc(err, nil, -1, nil)
}
socket.kill(err, true)
return
}
if globalDebug && globalLogger != nil {
m := bson.M{}
if err := bson.Unmarshal(b, m); err == nil {
debugf("Socket %p to %s: received document: %#v", socket, socket.addr, m)
}
}
if replyFunc != nil {
replyFunc(nil, &reply, i, b)
}
// XXX Do bound checking against totalLen.
}
}
socket.Lock()
if len(socket.replyFuncs) == 0 {
// Nothing else to read for now. Disable deadline.
socket.conn.SetReadDeadline(time.Time{})
} else {
socket.updateDeadline(readDeadline)
}
socket.Unlock()
// XXX Do bound checking against totalLen.
}
}
var emptyHeader = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
func addHeader(b []byte, opcode int) []byte {
i := len(b)
b = append(b, emptyHeader...)
// Enough for current opcodes.
b[i+12] = byte(opcode)
b[i+13] = byte(opcode >> 8)
return b
}
func addInt32(b []byte, i int32) []byte {
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24))
}
func addInt64(b []byte, i int64) []byte {
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24),
byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56))
}
func addCString(b []byte, s string) []byte {
b = append(b, []byte(s)...)
b = append(b, 0)
return b
}
func addBSON(b []byte, doc interface{}) ([]byte, error) {
if doc == nil {
return append(b, 5, 0, 0, 0, 0), nil
}
data, err := bson.Marshal(doc)
if err != nil {
return b, err
}
return append(b, data...), nil
}
func setInt32(b []byte, pos int, i int32) {
b[pos] = byte(i)
b[pos+1] = byte(i >> 8)
b[pos+2] = byte(i >> 16)
b[pos+3] = byte(i >> 24)
}
func getInt32(b []byte, pos int) int32 {
return (int32(b[pos+0])) |
(int32(b[pos+1]) << 8) |
(int32(b[pos+2]) << 16) |
(int32(b[pos+3]) << 24)
}
func getInt64(b []byte, pos int) int64 {
return (int64(b[pos+0])) |
(int64(b[pos+1]) << 8) |
(int64(b[pos+2]) << 16) |
(int64(b[pos+3]) << 24) |
(int64(b[pos+4]) << 32) |
(int64(b[pos+5]) << 40) |
(int64(b[pos+6]) << 48) |
(int64(b[pos+7]) << 56)
}

147
Godeps/_workspace/src/gopkg.in/mgo.v2/stats.go generated vendored Normal file
View File

@@ -0,0 +1,147 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"sync"
)
var stats *Stats
var statsMutex sync.Mutex
func SetStats(enabled bool) {
statsMutex.Lock()
if enabled {
if stats == nil {
stats = &Stats{}
}
} else {
stats = nil
}
statsMutex.Unlock()
}
func GetStats() (snapshot Stats) {
statsMutex.Lock()
snapshot = *stats
statsMutex.Unlock()
return
}
func ResetStats() {
statsMutex.Lock()
debug("Resetting stats")
old := stats
stats = &Stats{}
// These are absolute values:
stats.Clusters = old.Clusters
stats.SocketsInUse = old.SocketsInUse
stats.SocketsAlive = old.SocketsAlive
stats.SocketRefs = old.SocketRefs
statsMutex.Unlock()
return
}
type Stats struct {
Clusters int
MasterConns int
SlaveConns int
SentOps int
ReceivedOps int
ReceivedDocs int
SocketsAlive int
SocketsInUse int
SocketRefs int
}
func (stats *Stats) cluster(delta int) {
if stats != nil {
statsMutex.Lock()
stats.Clusters += delta
statsMutex.Unlock()
}
}
func (stats *Stats) conn(delta int, master bool) {
if stats != nil {
statsMutex.Lock()
if master {
stats.MasterConns += delta
} else {
stats.SlaveConns += delta
}
statsMutex.Unlock()
}
}
func (stats *Stats) sentOps(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SentOps += delta
statsMutex.Unlock()
}
}
func (stats *Stats) receivedOps(delta int) {
if stats != nil {
statsMutex.Lock()
stats.ReceivedOps += delta
statsMutex.Unlock()
}
}
func (stats *Stats) receivedDocs(delta int) {
if stats != nil {
statsMutex.Lock()
stats.ReceivedDocs += delta
statsMutex.Unlock()
}
}
func (stats *Stats) socketsInUse(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SocketsInUse += delta
statsMutex.Unlock()
}
}
func (stats *Stats) socketsAlive(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SocketsAlive += delta
statsMutex.Unlock()
}
}
func (stats *Stats) socketRefs(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SocketRefs += delta
statsMutex.Unlock()
}
}

254
Godeps/_workspace/src/gopkg.in/mgo.v2/suite_test.go generated vendored Normal file
View File

@@ -0,0 +1,254 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo_test
import (
"errors"
"flag"
"fmt"
"net"
"os/exec"
"runtime"
"strconv"
"testing"
"time"
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
var fast = flag.Bool("fast", false, "Skip slow tests")
type M bson.M
type cLogger C
func (c *cLogger) Output(calldepth int, s string) error {
ns := time.Now().UnixNano()
t := float64(ns%100e9) / 1e9
((*C)(c)).Logf("[LOG] %.05f %s", t, s)
return nil
}
func TestAll(t *testing.T) {
TestingT(t)
}
type S struct {
session *mgo.Session
stopped bool
build mgo.BuildInfo
frozen []string
}
func (s *S) versionAtLeast(v ...int) bool {
for i := range v {
if i == len(s.build.VersionArray) {
return false
}
if s.build.VersionArray[i] < v[i] {
return false
}
}
return true
}
var _ = Suite(&S{})
func (s *S) SetUpSuite(c *C) {
mgo.SetDebug(true)
mgo.SetStats(true)
s.StartAll()
session, err := mgo.Dial("localhost:40001")
c.Assert(err, IsNil)
s.build, err = session.BuildInfo()
c.Check(err, IsNil)
session.Close()
}
func (s *S) SetUpTest(c *C) {
err := run("mongo --nodb testdb/dropall.js")
if err != nil {
panic(err.Error())
}
mgo.SetLogger((*cLogger)(c))
mgo.ResetStats()
}
func (s *S) TearDownTest(c *C) {
if s.stopped {
s.StartAll()
}
for _, host := range s.frozen {
if host != "" {
s.Thaw(host)
}
}
var stats mgo.Stats
for i := 0; ; i++ {
stats = mgo.GetStats()
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
break
}
if i == 20 {
c.Fatal("Test left sockets in a dirty state")
}
c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
time.Sleep(500 * time.Millisecond)
}
for i := 0; ; i++ {
stats = mgo.GetStats()
if stats.Clusters == 0 {
break
}
if i == 60 {
c.Fatal("Test left clusters alive")
}
c.Logf("Waiting for clusters to die: %d alive", stats.Clusters)
time.Sleep(1 * time.Second)
}
}
func (s *S) Stop(host string) {
// Give a moment for slaves to sync and avoid getting rollback issues.
panicOnWindows()
time.Sleep(2 * time.Second)
err := run("cd _testdb && supervisorctl stop " + supvName(host))
if err != nil {
panic(err)
}
s.stopped = true
}
func (s *S) pid(host string) int {
output, err := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fp").CombinedOutput()
if err != nil {
panic(err)
}
pidstr := string(output[1 : len(output)-1])
pid, err := strconv.Atoi(pidstr)
if err != nil {
panic("cannot convert pid to int: " + pidstr)
}
return pid
}
func (s *S) Freeze(host string) {
err := stop(s.pid(host))
if err != nil {
panic(err)
}
s.frozen = append(s.frozen, host)
}
func (s *S) Thaw(host string) {
err := cont(s.pid(host))
if err != nil {
panic(err)
}
for i, frozen := range s.frozen {
if frozen == host {
s.frozen[i] = ""
}
}
}
func (s *S) StartAll() {
// Restart any stopped nodes.
run("cd _testdb && supervisorctl start all")
err := run("cd testdb && mongo --nodb wait.js")
if err != nil {
panic(err)
}
s.stopped = false
}
func run(command string) error {
var output []byte
var err error
if runtime.GOOS == "windows" {
output, err = exec.Command("cmd", "/C", command).CombinedOutput()
} else {
output, err = exec.Command("/bin/sh", "-c", command).CombinedOutput()
}
if err != nil {
msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output))
return errors.New(msg)
}
return nil
}
var supvNames = map[string]string{
"40001": "db1",
"40002": "db2",
"40011": "rs1a",
"40012": "rs1b",
"40013": "rs1c",
"40021": "rs2a",
"40022": "rs2b",
"40023": "rs2c",
"40031": "rs3a",
"40032": "rs3b",
"40033": "rs3c",
"40041": "rs4a",
"40101": "cfg1",
"40102": "cfg2",
"40103": "cfg3",
"40201": "s1",
"40202": "s2",
"40203": "s3",
}
// supvName returns the supervisord name for the given host address.
func supvName(host string) string {
host, port, err := net.SplitHostPort(host)
if err != nil {
panic(err)
}
name, ok := supvNames[port]
if !ok {
panic("Unknown host: " + host)
}
return name
}
func hostPort(host string) string {
_, port, err := net.SplitHostPort(host)
if err != nil {
panic(err)
}
return port
}
func panicOnWindows() {
if runtime.GOOS == "windows" {
panic("the test suite is not yet fully supported on Windows")
}
}

15
Godeps/_workspace/src/gopkg.in/mgo.v2/syscall_test.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
// +build !windows
package mgo_test
import (
"syscall"
)
func stop(pid int) (err error) {
return syscall.Kill(pid, syscall.SIGSTOP)
}
func cont(pid int) (err error) {
return syscall.Kill(pid, syscall.SIGCONT)
}

View File

@@ -0,0 +1,11 @@
package mgo_test
func stop(pid int) (err error) {
panicOnWindows() // Always does.
return nil
}
func cont(pid int) (err error) {
panicOnWindows() // Always does.
return nil
}

View File

@@ -0,0 +1,44 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAwE2sl8YeTTSetwo9kykJ5mCZ/FtfPtn/0X4nOlTM2Qc/uWzA
sjSYoSV4UkuOiWjKQQH2EDeXaltshOo7F0oCY5ozVeQe+phe987iKTvLtf7NoXJD
KqNqR4Kb4ylbCrEky7+Xvw6yrrqw8qgWy+9VsrilR3q8LsETE9SBMtfp3BUaaNQp
peNm+iAhx3uZSv3mdzSLFSA/o61kAyG0scLExYDjo/7xyMNQoloLvNmx4Io160+y
lOz077/qqU620tmuDLRz1QdxK/bptmXTnsBCRxl+U8nzbwVZgWFENhXplbcN+SjN
LhdnvTiU2qFhgZmc7ZtCKdPIpx3W6pH9bx7kTwIDAQABAoIBAQCOQygyo8NY9FuS
J8ZDrvF+9+oS8fm1QorpDT2x/ngI+j7fSyAG9bgQRusLXpAVAWvWyb+iYa3nZbkT
X0DVys+XpcTifr+YPc7L3sYbIPxkKBsxm5kq2vfN7Uart7V9ZG1HOfblxdbUQpKT
AVzUA7vPWqATEC5VHEqjuerWlTqRr9YLZE/nkE7ICLISqdl4WDYfUYJwoXWfYkXQ
Lfl5Qh2leyri9S3urvDrhnURTQ1lM182IbTRA+9rUiFzsRW+9U4HPY7Ao2Itp8dr
GRP4rcq4TP+NcF0Ky64cNfKXCWmwqTBRFYAlTD6gwjN/s2BzvWD/2nlnc0DYAXrB
TgFCPk7xAoGBAOwuHICwwTxtzrdWjuRGU3RxL4eLEXedtL8yon/yPci3e+8eploX
1Fp0rEK2gIGDp/X8DiOtrKXih8XPusCwE/I3EvjHdI0RylLZXTPOp1Ei21dXRsiV
YxcF+d5s11q5tJtF+5ISUeIz2iSc9Z2LBnb8JDK1jcCRa5Q212q3ZWW5AoGBANBw
9CoMbxINLG1q0NvOOSwMKDk2OB+9JbQ5lwF4ijZl2I6qRoOCzQ3lBs0Qv/AeBjNR
SerDs2+eWnIBUbgSdiqcOKnXAI/Qbl1IkVFYV/2g9m6dgu1fNWNBv8NIYDHCLfDx
W3fpO5JMf+iE5XC4XqCfSBIME2yxPSGQjal6tB5HAoGAddYDzolhv/6hVoPPQ0F7
PeuC5UOTcXSzy3k97kw0W0KAiStnoCengYIYuChKMVQ4ptgdTdvG+fTt/NnJuX2g
Vgb4ZjtNgVzQ70kX4VNH04lqmkcnP8iY6dHHexwezls9KwNdouGVDSEFw6K0QOgu
T4s5nDtNADkNzaMXE11xL7ECgYBoML3rstFmTY1ymB0Uck3jtaP5jR+axdpt7weL
Zax4qooILhcXL6++DUhMAt5ecTOaPTzci7xKw/Xj3MLzZs8IV5R/WQhf2sj/+gEh
jy5UijwEaNmEO74dAkWPoMLsvGpocMzO8JeldnXNTXi+0noCgfvtgXnIMAQlnfMh
z0LviwKBgQCg5KR9JC4iuKses7Kfv2YelcO8vOZkRzBu3NdRWMsiJQC+qfetgd57
RjRjlRWd1WCHJ5Kmx3hkUaZZOrX5knqfsRW3Nl0I74xgWl7Bli2eSJ9VWl59bcd6
DqphhY7/gcW+QZlhXpnqbf0W8jB2gPhTYERyCBoS9LfhZWZu/11wuQ==
-----END RSA PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
MIICyTCCAjKgAwIBAgIBATANBgkqhkiG9w0BAQUFADBcMQswCQYDVQQGEwJHTzEM
MAoGA1UECBMDTUdPMQwwCgYDVQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UE
CxMGU2VydmVyMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMTQwOTI0MTQwMzUzWhcN
MTUwOTI0MTQwMzUzWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECBMDTUdPMQwwCgYD
VQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UECxMGQ2xpZW50MRIwEAYDVQQD
Ewlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDATayX
xh5NNJ63Cj2TKQnmYJn8W18+2f/Rfic6VMzZBz+5bMCyNJihJXhSS46JaMpBAfYQ
N5dqW2yE6jsXSgJjmjNV5B76mF73zuIpO8u1/s2hckMqo2pHgpvjKVsKsSTLv5e/
DrKuurDyqBbL71WyuKVHerwuwRMT1IEy1+ncFRpo1Cml42b6ICHHe5lK/eZ3NIsV
ID+jrWQDIbSxwsTFgOOj/vHIw1CiWgu82bHgijXrT7KU7PTvv+qpTrbS2a4MtHPV
B3Er9um2ZdOewEJHGX5TyfNvBVmBYUQ2FemVtw35KM0uF2e9OJTaoWGBmZztm0Ip
08inHdbqkf1vHuRPAgMBAAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMCMA0GCSqG
SIb3DQEBBQUAA4GBAJZD7idSIRzhGlJYARPKWnX2CxD4VVB0F5cH5Mlc2YnoUSU/
rKuPZFuOYND3awKqez6K3rNb3+tQmNitmoOT8ImmX1uJKBo5w9tuo4B2MmLQcPMk
3fhPePuQCjtlArSmKVrNTrYPkyB9NwKS6q0+FzseFTw9ZJUIKiO9sSjMe+HP
-----END CERTIFICATE-----

View File

@@ -0,0 +1,52 @@
var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203]
var auth = [40002, 40103, 40203, 40031]
var db1 = new Mongo("localhost:40001")
if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion != "") {
ports.push(40003)
auth.push(40003)
}
for (var i in ports) {
var port = ports[i]
var server = "localhost:" + port
var mongo = new Mongo("localhost:" + port)
var admin = mongo.getDB("admin")
for (var j in auth) {
if (auth[j] == port) {
admin.auth("root", "rapadura")
admin.system.users.find().forEach(function(u) {
if (u.user == "root" || u.user == "reader") {
return;
}
if (typeof admin.dropUser == "function") {
mongo.getDB(u.db).dropUser(u.user);
} else {
admin.removeUser(u.user);
}
})
break
}
}
var result = admin.runCommand({"listDatabases": 1})
// Why is the command returning undefined!?
while (typeof result.databases == "undefined") {
result = admin.runCommand({"listDatabases": 1})
}
var dbs = result.databases
for (var j = 0; j != dbs.length; j++) {
var db = dbs[j]
switch (db.name) {
case "admin":
case "local":
case "config":
break
default:
mongo.getDB(db.name).dropDatabase()
}
}
}
// vim:ts=4:sw=4:et

110
Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/init.js generated vendored Normal file
View File

@@ -0,0 +1,110 @@
//var settings = {heartbeatSleep: 0.05, heartbeatTimeout: 0.5}
var settings = {};
// We know the master of the first set (pri=1), but not of the second.
var rs1cfg = {_id: "rs1",
members: [{_id: 1, host: "127.0.0.1:40011", priority: 1, tags: {rs1: "a"}},
{_id: 2, host: "127.0.0.1:40012", priority: 0, tags: {rs1: "b"}},
{_id: 3, host: "127.0.0.1:40013", priority: 0, tags: {rs1: "c"}}],
settings: settings}
var rs2cfg = {_id: "rs2",
members: [{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}},
{_id: 2, host: "127.0.0.1:40022", priority: 1, tags: {rs2: "b"}},
{_id: 3, host: "127.0.0.1:40023", priority: 1, tags: {rs2: "c"}}],
settings: settings}
var rs3cfg = {_id: "rs3",
members: [{_id: 1, host: "127.0.0.1:40031", priority: 1, tags: {rs3: "a"}},
{_id: 2, host: "127.0.0.1:40032", priority: 1, tags: {rs3: "b"}},
{_id: 3, host: "127.0.0.1:40033", priority: 1, tags: {rs3: "c"}}],
settings: settings}
for (var i = 0; i != 60; i++) {
try {
db1 = new Mongo("127.0.0.1:40001").getDB("admin")
db2 = new Mongo("127.0.0.1:40002").getDB("admin")
rs1a = new Mongo("127.0.0.1:40011").getDB("admin")
rs2a = new Mongo("127.0.0.1:40021").getDB("admin")
rs3a = new Mongo("127.0.0.1:40031").getDB("admin")
break
} catch(err) {
print("Can't connect yet...")
}
sleep(1000)
}
function hasSSL() {
return db1.serverBuildInfo().OpenSSLVersion != ""
}
rs1a.runCommand({replSetInitiate: rs1cfg})
rs2a.runCommand({replSetInitiate: rs2cfg})
rs3a.runCommand({replSetInitiate: rs3cfg})
function configShards() {
cfg1 = new Mongo("127.0.0.1:40201").getDB("admin")
cfg1.runCommand({addshard: "127.0.0.1:40001"})
cfg1.runCommand({addshard: "rs1/127.0.0.1:40011"})
cfg2 = new Mongo("127.0.0.1:40202").getDB("admin")
cfg2.runCommand({addshard: "rs2/127.0.0.1:40021"})
cfg3 = new Mongo("127.0.0.1:40203").getDB("admin")
cfg3.runCommand({addshard: "rs3/127.0.0.1:40031"})
}
function configAuth() {
var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"]
if (hasSSL()) {
addrs.push("127.0.0.1:40003")
}
for (var i in addrs) {
var db = new Mongo(addrs[i]).getDB("admin")
var v = db.serverBuildInfo().versionArray
if (v < [2, 5]) {
db.addUser("root", "rapadura")
} else {
db.createUser({user: "root", pwd: "rapadura", roles: ["root"]})
}
db.auth("root", "rapadura")
if (v >= [2, 6]) {
db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]})
} else if (v >= [2, 4]) {
db.addUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]})
} else {
db.addUser("reader", "rapadura", true)
}
}
}
function countHealthy(rs) {
var status = rs.runCommand({replSetGetStatus: 1})
var count = 0
if (typeof status.members != "undefined") {
for (var i = 0; i != status.members.length; i++) {
var m = status.members[i]
if (m.health == 1 && (m.state == 1 || m.state == 2)) {
count += 1
}
}
}
return count
}
var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length
for (var i = 0; i != 60; i++) {
var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
print("Replica sets have", count, "healthy nodes.")
if (count == totalRSMembers) {
sleep(2000)
configShards()
configAuth()
quit(0)
}
sleep(1000)
}
print("Replica sets didn't sync up properly.")
quit(12)
// vim:ts=4:sw=4:et

View File

@@ -0,0 +1,33 @@
-----BEGIN CERTIFICATE-----
MIIC+DCCAmGgAwIBAgIJAJ5pBAq2HXAsMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV
BAYTAkdPMQwwCgYDVQQIEwNNR08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdP
MQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNDA5MjQx
MzUxMTBaFw0xNTA5MjQxMzUxMTBaMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNN
R08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIx
EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
pQ5wO2L23xMI4PzpVt/Ftvez82IvA9amwr3fUd7RjlYwiFsFeMnG24a4CUoOeKF0
fpQWc9rmCs0EeP5ofZ2otOsfxoVWXZAZWdgauuwlYB6EeFaAMH3fxVH3IiH+21RR
q2w9sH/s4fqh5stavUfyPdVmCcb8NW0jD8jlqniJL0kCAwEAAaOBwTCBvjAdBgNV
HQ4EFgQUjyVWGMHBrmPDGwCY5VusHsKIpzIwgY4GA1UdIwSBhjCBg4AUjyVWGMHB
rmPDGwCY5VusHsKIpzKhYKReMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNNR08x
DDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQ
BgNVBAMTCWxvY2FsaG9zdIIJAJ5pBAq2HXAsMAwGA1UdEwQFMAMBAf8wDQYJKoZI
hvcNAQEFBQADgYEAa65TgDKp3SRUDNAILSuQOCEbenWh/DMPL4vTVgo/Dxd4emoO
7i8/4HMTa0XeYIVbAsxO+dqtxqt32IcV7DurmQozdUZ7q0ueJRXon6APnCN0IqPC
sF71w63xXfpmnvTAfQXi7x6TUAyAQ2nScHExAjzc000DF1dO/6+nIINqNQE=
-----END CERTIFICATE-----
-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQClDnA7YvbfEwjg/OlW38W297PzYi8D1qbCvd9R3tGOVjCIWwV4
ycbbhrgJSg54oXR+lBZz2uYKzQR4/mh9nai06x/GhVZdkBlZ2Bq67CVgHoR4VoAw
fd/FUfciIf7bVFGrbD2wf+zh+qHmy1q9R/I91WYJxvw1bSMPyOWqeIkvSQIDAQAB
AoGABA9S22MXx2zkbwRJiQWAC3wURQxJM8L33xpkf9MHPIUKNJBolgwAhC3QIQpd
SMJP5z0lQDxGJEXesksvrsdN+vsgbleRfQsAIcY/rEhr9h8m6auM08f+69oIX32o
aTOWJJRofjbgzE5c/RijqhIaYGdq54a0EE9mAaODwZoa2/ECQQDRGrIRI5L3pdRA
yifDKNjvAFOk6TbdGe+J9zHFw4F7bA2In/b+rno9vrj+EanOevD8LRLzeFshzXrG
WQFzZ69/AkEAyhLSY7WNiQTeJWCwXawVnoSl5AMSRYFA/A2sEUokfORR5BS7gqvL
mmEKmvslnZp5qlMtM4AyrW2OaoGvE6sFNwJACB3xK5kl61cUli9Cu+CqCx0IIi6r
YonPMpvV4sdkD1ZycAtFmz1KoXr102b8IHfFQwS855aUcwt26Jwr4j70IQJAXv9+
PTXq9hF9xiCwiTkPaNh/jLQM8PQU8uoSjIZIpRZJkWpVxNay/z7D15xeULuAmxxD
UcThDjtFCrkw75Qk/QJAFfcM+5r31R1RrBGM1QPKwDqkFTGsFKnMWuS/pXyLTTOv
I+In9ZJyA/R5zKeJZjM7xtZs0ANU9HpOpgespq6CvA==
-----END RSA PRIVATE KEY-----

58
Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/setup.sh generated vendored Normal file
View File

@@ -0,0 +1,58 @@
#!/bin/sh -e
start() {
mkdir _testdb
cd _testdb
mkdir db1 db2 db3 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3
cp ../testdb/supervisord.conf supervisord.conf
cp ../testdb/server.pem server.pem
echo keyfile > keyfile
chmod 600 keyfile
COUNT=$(grep '^\[program' supervisord.conf | wc -l | tr -d ' ')
if ! mongod --help | grep -q -- --ssl; then
COUNT=$(($COUNT - 1))
fi
echo "Running supervisord..."
supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 )
echo "Supervisord is up, starting $COUNT processes..."
for i in $(seq 10); do
RUNNING=$(supervisorctl status | grep RUNNING | wc -l | tr -d ' ')
echo "$RUNNING processes running..."
if [ x$COUNT = x$RUNNING ]; then
echo "Running setup.js with mongo..."
mongo --nodb ../testdb/init.js
exit 0
fi
sleep 1
done
echo "Failed to start all processes. Check out what's up at $PWD now!"
exit 1
}
stop() {
if [ -d _testdb ]; then
echo "Shutting down test cluster..."
(cd _testdb && supervisorctl shutdown)
rm -rf _testdb
fi
}
if [ ! -f suite_test.go ]; then
echo "This script must be run from within the source directory."
exit 1
fi
case "$1" in
start)
start $2
;;
stop)
stop $2
;;
esac
# vim:ts=4:sw=4:et

View File

@@ -0,0 +1,65 @@
[supervisord]
logfile = %(here)s/supervisord.log
pidfile = %(here)s/supervisord.pid
directory = %(here)s
#nodaemon = true
[inet_http_server]
port = 127.0.0.1:9001
[supervisorctl]
serverurl = http://127.0.0.1:9001
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[program:db1]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1 --port 40001
[program:db2]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth
[program:db3]
command = mongod -nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem
[program:rs1a]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011
[program:rs1b]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1b --bind_ip=127.0.0.1 --port 40012
[program:rs1c]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1c --bind_ip=127.0.0.1 --port 40013
[program:rs2a]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2a --bind_ip=127.0.0.1 --port 40021
[program:rs2b]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2b --bind_ip=127.0.0.1 --port 40022
[program:rs2c]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2c --bind_ip=127.0.0.1 --port 40023
[program:rs3a]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3a --bind_ip=127.0.0.1 --port 40031 --auth --keyFile=%(here)s/keyfile
[program:rs3b]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3b --bind_ip=127.0.0.1 --port 40032 --auth --keyFile=%(here)s/keyfile
[program:rs3c]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3c --bind_ip=127.0.0.1 --port 40033 --auth --keyFile=%(here)s/keyfile
[program:rs4a]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(here)s/rs4a --bind_ip=127.0.0.1 --port 40041
[program:cfg1]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101
[program:cfg2]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102
[program:cfg3]
command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile
[program:s1]
command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1
[program:s2]
command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1
[program:s3]
command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(here)s/keyfile

58
Godeps/_workspace/src/gopkg.in/mgo.v2/testdb/wait.js generated vendored Normal file
View File

@@ -0,0 +1,58 @@
// We know the master of the first set (pri=1), but not of the second.
var settings = {}
var rs1cfg = {_id: "rs1",
members: [{_id: 1, host: "127.0.0.1:40011", priority: 1},
{_id: 2, host: "127.0.0.1:40012", priority: 0},
{_id: 3, host: "127.0.0.1:40013", priority: 0}]}
var rs2cfg = {_id: "rs2",
members: [{_id: 1, host: "127.0.0.1:40021", priority: 1},
{_id: 2, host: "127.0.0.1:40022", priority: 1},
{_id: 3, host: "127.0.0.1:40023", priority: 0}]}
var rs3cfg = {_id: "rs3",
members: [{_id: 1, host: "127.0.0.1:40031", priority: 1},
{_id: 2, host: "127.0.0.1:40032", priority: 1},
{_id: 3, host: "127.0.0.1:40033", priority: 1}],
settings: settings}
for (var i = 0; i != 60; i++) {
try {
rs1a = new Mongo("127.0.0.1:40011").getDB("admin")
rs2a = new Mongo("127.0.0.1:40021").getDB("admin")
rs3a = new Mongo("127.0.0.1:40031").getDB("admin")
rs3a.auth("root", "rapadura")
db1 = new Mongo("127.0.0.1:40001").getDB("admin")
db2 = new Mongo("127.0.0.1:40002").getDB("admin")
break
} catch(err) {
print("Can't connect yet...")
}
sleep(1000)
}
function countHealthy(rs) {
var status = rs.runCommand({replSetGetStatus: 1})
var count = 0
if (typeof status.members != "undefined") {
for (var i = 0; i != status.members.length; i++) {
var m = status.members[i]
if (m.health == 1 && (m.state == 1 || m.state == 2)) {
count += 1
}
}
}
return count
}
var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length
for (var i = 0; i != 60; i++) {
var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
print("Replica sets have", count, "healthy nodes.")
if (count == totalRSMembers) {
quit(0)
}
sleep(1000)
}
print("Replica sets didn't sync up properly.")
quit(12)

68
Godeps/_workspace/src/gopkg.in/mgo.v2/txn/chaos.go generated vendored Normal file
View File

@@ -0,0 +1,68 @@
package txn
import (
mrand "math/rand"
"time"
)
var chaosEnabled = false
var chaosSetting Chaos
// Chaos holds parameters for the failure injection mechanism.
type Chaos struct {
// KillChance is the 0.0 to 1.0 chance that a given checkpoint
// within the algorithm will raise an interruption that will
// stop the procedure.
KillChance float64
// SlowdownChance is the 0.0 to 1.0 chance that a given checkpoint
// within the algorithm will be delayed by Slowdown before
// continuing.
SlowdownChance float64
Slowdown time.Duration
// If Breakpoint is set, the above settings will only affect the
// named breakpoint.
Breakpoint string
}
// SetChaos sets the failure injection parameters to c.
func SetChaos(c Chaos) {
chaosSetting = c
chaosEnabled = c.KillChance > 0 || c.SlowdownChance > 0
}
func chaos(bpname string) {
if !chaosEnabled {
return
}
switch chaosSetting.Breakpoint {
case "", bpname:
kc := chaosSetting.KillChance
if kc > 0 && mrand.Intn(1000) < int(kc*1000) {
panic(chaosError{})
}
if bpname == "insert" {
return
}
sc := chaosSetting.SlowdownChance
if sc > 0 && mrand.Intn(1000) < int(sc*1000) {
time.Sleep(chaosSetting.Slowdown)
}
}
}
type chaosError struct{}
func (f *flusher) handleChaos(err *error) {
v := recover()
if v == nil {
return
}
if _, ok := v.(chaosError); ok {
f.debugf("Killed by chaos!")
*err = ErrChaos
return
}
panic(v)
}

109
Godeps/_workspace/src/gopkg.in/mgo.v2/txn/debug.go generated vendored Normal file
View File

@@ -0,0 +1,109 @@
package txn
import (
"bytes"
"fmt"
"sort"
"sync/atomic"
"gopkg.in/mgo.v2/bson"
)
var (
debugEnabled bool
logger log_Logger
)
type log_Logger interface {
Output(calldepth int, s string) error
}
// Specify the *log.Logger where logged messages should be sent to.
func SetLogger(l log_Logger) {
logger = l
}
// SetDebug enables or disables debugging.
func SetDebug(debug bool) {
debugEnabled = debug
}
var ErrChaos = fmt.Errorf("interrupted by chaos")
var debugId uint32
func debugPrefix() string {
d := atomic.AddUint32(&debugId, 1) - 1
s := make([]byte, 0, 10)
for i := uint(0); i < 8; i++ {
s = append(s, "abcdefghijklmnop"[(d>>(4*i))&0xf])
if d>>(4*(i+1)) == 0 {
break
}
}
s = append(s, ')', ' ')
return string(s)
}
func logf(format string, args ...interface{}) {
if logger != nil {
logger.Output(2, fmt.Sprintf(format, argsForLog(args)...))
}
}
func debugf(format string, args ...interface{}) {
if debugEnabled && logger != nil {
logger.Output(2, fmt.Sprintf(format, argsForLog(args)...))
}
}
func argsForLog(args []interface{}) []interface{} {
for i, arg := range args {
switch v := arg.(type) {
case bson.ObjectId:
args[i] = v.Hex()
case []bson.ObjectId:
lst := make([]string, len(v))
for j, id := range v {
lst[j] = id.Hex()
}
args[i] = lst
case map[docKey][]bson.ObjectId:
buf := &bytes.Buffer{}
var dkeys docKeys
for dkey := range v {
dkeys = append(dkeys, dkey)
}
sort.Sort(dkeys)
for i, dkey := range dkeys {
if i > 0 {
buf.WriteByte(' ')
}
buf.WriteString(fmt.Sprintf("%v: {", dkey))
for j, id := range v[dkey] {
if j > 0 {
buf.WriteByte(' ')
}
buf.WriteString(id.Hex())
}
buf.WriteByte('}')
}
args[i] = buf.String()
case map[docKey][]int64:
buf := &bytes.Buffer{}
var dkeys docKeys
for dkey := range v {
dkeys = append(dkeys, dkey)
}
sort.Sort(dkeys)
for i, dkey := range dkeys {
if i > 0 {
buf.WriteByte(' ')
}
buf.WriteString(fmt.Sprintf("%v: %v", dkey, v[dkey]))
}
args[i] = buf.String()
}
}
return args
}

View File

@@ -0,0 +1,205 @@
package txn
import (
"sort"
. "gopkg.in/check.v1"
)
type DocKeySuite struct{}
var _ = Suite(&DocKeySuite{})
type T struct {
A int
B string
}
type T2 struct {
A int
B string
}
type T3 struct {
A int
B string
}
type T4 struct {
A int
B string
}
type T5 struct {
F int
Q string
}
type T6 struct {
A int
B string
}
type T7 struct {
A bool
B float64
}
type T8 struct {
A int
B string
}
type T9 struct {
A int
B string
C bool
}
type T10 struct {
C int `bson:"a"`
D string `bson:"b,omitempty"`
}
type T11 struct {
C int
D string
}
type T12 struct {
S string
}
type T13 struct {
p, q, r bool
S string
}
var docKeysTests = [][]docKeys{
{{
{"c", 1},
{"c", 5},
{"c", 2},
}, {
{"c", 1},
{"c", 2},
{"c", 5},
}}, {{
{"c", "foo"},
{"c", "bar"},
{"c", "bob"},
}, {
{"c", "bar"},
{"c", "bob"},
{"c", "foo"},
}}, {{
{"c", 0.2},
{"c", 0.07},
{"c", 0.9},
}, {
{"c", 0.07},
{"c", 0.2},
{"c", 0.9},
}}, {{
{"c", true},
{"c", false},
{"c", true},
}, {
{"c", false},
{"c", true},
{"c", true},
}}, {{
{"c", T{1, "b"}},
{"c", T{1, "a"}},
{"c", T{0, "b"}},
{"c", T{0, "a"}},
}, {
{"c", T{0, "a"}},
{"c", T{0, "b"}},
{"c", T{1, "a"}},
{"c", T{1, "b"}},
}}, {{
{"c", T{1, "a"}},
{"c", T{0, "a"}},
}, {
{"c", T{0, "a"}},
{"c", T{1, "a"}},
}}, {{
{"c", T3{0, "b"}},
{"c", T2{1, "b"}},
{"c", T3{1, "a"}},
{"c", T2{0, "a"}},
}, {
{"c", T2{0, "a"}},
{"c", T3{0, "b"}},
{"c", T3{1, "a"}},
{"c", T2{1, "b"}},
}}, {{
{"c", T5{1, "b"}},
{"c", T4{1, "b"}},
{"c", T5{0, "a"}},
{"c", T4{0, "a"}},
}, {
{"c", T4{0, "a"}},
{"c", T5{0, "a"}},
{"c", T4{1, "b"}},
{"c", T5{1, "b"}},
}}, {{
{"c", T6{1, "b"}},
{"c", T7{true, 0.2}},
{"c", T6{0, "a"}},
{"c", T7{false, 0.04}},
}, {
{"c", T6{0, "a"}},
{"c", T6{1, "b"}},
{"c", T7{false, 0.04}},
{"c", T7{true, 0.2}},
}}, {{
{"c", T9{1, "b", true}},
{"c", T8{1, "b"}},
{"c", T9{0, "a", false}},
{"c", T8{0, "a"}},
}, {
{"c", T9{0, "a", false}},
{"c", T8{0, "a"}},
{"c", T9{1, "b", true}},
{"c", T8{1, "b"}},
}}, {{
{"b", 2},
{"a", 5},
{"c", 2},
{"b", 1},
}, {
{"a", 5},
{"b", 1},
{"b", 2},
{"c", 2},
}}, {{
{"c", T11{1, "a"}},
{"c", T11{1, "a"}},
{"c", T10{1, "a"}},
}, {
{"c", T10{1, "a"}},
{"c", T11{1, "a"}},
{"c", T11{1, "a"}},
}}, {{
{"c", T12{"a"}},
{"c", T13{false, true, false, "a"}},
{"c", T12{"b"}},
{"c", T13{false, true, false, "b"}},
}, {
{"c", T12{"a"}},
{"c", T13{false, true, false, "a"}},
{"c", T12{"b"}},
{"c", T13{false, true, false, "b"}},
}},
}
func (s *DocKeySuite) TestSort(c *C) {
for _, test := range docKeysTests {
keys := test[0]
expected := test[1]
sort.Sort(keys)
c.Check(keys, DeepEquals, expected)
}
}

968
Godeps/_workspace/src/gopkg.in/mgo.v2/txn/flusher.go generated vendored Normal file
View File

@@ -0,0 +1,968 @@
package txn
import (
"fmt"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
func flush(r *Runner, t *transaction) error {
f := &flusher{
Runner: r,
goal: t,
goalKeys: make(map[docKey]bool),
queue: make(map[docKey][]token),
debugId: debugPrefix(),
}
for _, dkey := range f.goal.docKeys() {
f.goalKeys[dkey] = true
}
return f.run()
}
type flusher struct {
*Runner
goal *transaction
goalKeys map[docKey]bool
queue map[docKey][]token
debugId string
}
func (f *flusher) run() (err error) {
if chaosEnabled {
defer f.handleChaos(&err)
}
f.debugf("Processing %s", f.goal)
seen := make(map[bson.ObjectId]*transaction)
if err := f.recurse(f.goal, seen); err != nil {
return err
}
if f.goal.done() {
return nil
}
// Sparse workloads will generally be managed entirely by recurse.
// Getting here means one or more transactions have dependencies
// and perhaps cycles.
// Build successors data for Tarjan's sort. Must consider
// that entries in txn-queue are not necessarily valid.
successors := make(map[bson.ObjectId][]bson.ObjectId)
ready := true
for _, dqueue := range f.queue {
NextPair:
for i := 0; i < len(dqueue); i++ {
pred := dqueue[i]
predid := pred.id()
predt := seen[predid]
if predt == nil || predt.Nonce != pred.nonce() {
continue
}
predsuccids, ok := successors[predid]
if !ok {
successors[predid] = nil
}
for j := i + 1; j < len(dqueue); j++ {
succ := dqueue[j]
succid := succ.id()
succt := seen[succid]
if succt == nil || succt.Nonce != succ.nonce() {
continue
}
if _, ok := successors[succid]; !ok {
successors[succid] = nil
}
// Found a valid pred/succ pair.
i = j - 1
for _, predsuccid := range predsuccids {
if predsuccid == succid {
continue NextPair
}
}
successors[predid] = append(predsuccids, succid)
if succid == f.goal.Id {
// There are still pre-requisites to handle.
ready = false
}
continue NextPair
}
}
}
f.debugf("Queues: %v", f.queue)
f.debugf("Successors: %v", successors)
if ready {
f.debugf("Goal %s has no real pre-requisites", f.goal)
return f.advance(f.goal, nil, true)
}
// Robert Tarjan's algorithm for detecting strongly-connected
// components is used for topological sorting and detecting
// cycles at once. The order in which transactions are applied
// in commonly affected documents must be a global agreement.
sorted := tarjanSort(successors)
if debugEnabled {
f.debugf("Tarjan output: %v", sorted)
}
pull := make(map[bson.ObjectId]*transaction)
for i := len(sorted) - 1; i >= 0; i-- {
scc := sorted[i]
f.debugf("Flushing %v", scc)
if len(scc) == 1 {
pull[scc[0]] = seen[scc[0]]
}
for _, id := range scc {
if err := f.advance(seen[id], pull, true); err != nil {
return err
}
}
if len(scc) > 1 {
for _, id := range scc {
pull[id] = seen[id]
}
}
}
return nil
}
func (f *flusher) recurse(t *transaction, seen map[bson.ObjectId]*transaction) error {
seen[t.Id] = t
err := f.advance(t, nil, false)
if err != errPreReqs {
return err
}
for _, dkey := range t.docKeys() {
for _, dtt := range f.queue[dkey] {
id := dtt.id()
if seen[id] != nil {
continue
}
qt, err := f.load(id)
if err != nil {
return err
}
err = f.recurse(qt, seen)
if err != nil {
return err
}
}
}
return nil
}
func (f *flusher) advance(t *transaction, pull map[bson.ObjectId]*transaction, force bool) error {
for {
switch t.State {
case tpreparing, tprepared:
revnos, err := f.prepare(t, force)
if err != nil {
return err
}
if t.State != tprepared {
continue
}
if err = f.assert(t, revnos, pull); err != nil {
return err
}
if t.State != tprepared {
continue
}
if err = f.checkpoint(t, revnos); err != nil {
return err
}
case tapplying:
return f.apply(t, pull)
case taborting:
return f.abortOrReload(t, nil, pull)
case tapplied, taborted:
return nil
default:
panic(fmt.Errorf("transaction in unknown state: %q", t.State))
}
}
panic("unreachable")
}
type stash string
const (
stashStable stash = ""
stashInsert stash = "insert"
stashRemove stash = "remove"
)
type txnInfo struct {
Queue []token `bson:"txn-queue"`
Revno int64 `bson:"txn-revno,omitempty"`
Insert bson.ObjectId `bson:"txn-insert,omitempty"`
Remove bson.ObjectId `bson:"txn-remove,omitempty"`
}
type stashState string
const (
stashNew stashState = ""
stashInserting stashState = "inserting"
)
var txnFields = bson.D{{"txn-queue", 1}, {"txn-revno", 1}, {"txn-remove", 1}, {"txn-insert", 1}}
var errPreReqs = fmt.Errorf("transaction has pre-requisites and force is false")
// prepare injects t's id onto txn-queue for all affected documents
// and collects the current txn-queue and txn-revno values during
// the process. If the prepared txn-queue indicates that there are
// pre-requisite transactions to be applied and the force parameter
// is false, errPreReqs will be returned. Otherwise, the current
// tip revision numbers for all the documents are returned.
func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error) {
if t.State != tpreparing {
return f.rescan(t, force)
}
f.debugf("Preparing %s", t)
// dkeys being sorted means stable iteration across all runners. This
// isn't strictly required, but reduces the chances of cycles.
dkeys := t.docKeys()
revno := make(map[docKey]int64)
info := txnInfo{}
tt := tokenFor(t)
NextDoc:
for _, dkey := range dkeys {
change := mgo.Change{
Update: bson.D{{"$addToSet", bson.D{{"txn-queue", tt}}}},
ReturnNew: true,
}
c := f.tc.Database.C(dkey.C)
cquery := c.FindId(dkey.Id).Select(txnFields)
RetryDoc:
change.Upsert = false
chaos("")
if _, err := cquery.Apply(change, &info); err == nil {
if info.Remove == "" {
// Fast path, unless workload is insert/remove heavy.
revno[dkey] = info.Revno
f.queue[dkey] = info.Queue
f.debugf("[A] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue)
continue NextDoc
} else {
// Handle remove in progress before preparing it.
if err := f.loadAndApply(info.Remove); err != nil {
return nil, err
}
goto RetryDoc
}
} else if err != mgo.ErrNotFound {
return nil, err
}
// Document missing. Use stash collection.
change.Upsert = true
chaos("")
_, err := f.sc.FindId(dkey).Apply(change, &info)
if err != nil {
return nil, err
}
if info.Insert != "" {
// Handle insert in progress before preparing it.
if err := f.loadAndApply(info.Insert); err != nil {
return nil, err
}
goto RetryDoc
}
// Must confirm stash is still in use and is the same one
// prepared, since applying a remove overwrites the stash.
docFound := false
stashFound := false
if err = c.FindId(dkey.Id).Select(txnFields).One(&info); err == nil {
docFound = true
} else if err != mgo.ErrNotFound {
return nil, err
} else if err = f.sc.FindId(dkey).One(&info); err == nil {
stashFound = true
if info.Revno == 0 {
// Missing revno in the stash only happens when it
// has been upserted, in which case it defaults to -1.
// Txn-inserted documents get revno -1 while in the stash
// for the first time, and -revno-1 == 2 when they go live.
info.Revno = -1
}
} else if err != mgo.ErrNotFound {
return nil, err
}
if docFound && info.Remove == "" || stashFound && info.Insert == "" {
for _, dtt := range info.Queue {
if dtt != tt {
continue
}
// Found tt properly prepared.
if stashFound {
f.debugf("[B] Prepared document %v on stash with revno %d and queue: %v", dkey, info.Revno, info.Queue)
} else {
f.debugf("[B] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue)
}
revno[dkey] = info.Revno
f.queue[dkey] = info.Queue
continue NextDoc
}
}
// The stash wasn't valid and tt got overwriten. Try again.
f.unstashToken(tt, dkey)
goto RetryDoc
}
// Save the prepared nonce onto t.
nonce := tt.nonce()
qdoc := bson.D{{"_id", t.Id}, {"s", tpreparing}}
udoc := bson.D{{"$set", bson.D{{"s", tprepared}, {"n", nonce}}}}
chaos("set-prepared")
err = f.tc.Update(qdoc, udoc)
if err == nil {
t.State = tprepared
t.Nonce = nonce
} else if err == mgo.ErrNotFound {
f.debugf("Can't save nonce of %s: LOST RACE", tt)
if err := f.reload(t); err != nil {
return nil, err
} else if t.State == tpreparing {
panic("can't save nonce yet transaction is still preparing")
} else if t.State != tprepared {
return t.Revnos, nil
}
tt = t.token()
} else if err != nil {
return nil, err
}
prereqs, found := f.hasPreReqs(tt, dkeys)
if !found {
// Must only happen when reloading above.
return f.rescan(t, force)
} else if prereqs && !force {
f.debugf("Prepared queue with %s [has prereqs & not forced].", tt)
return nil, errPreReqs
}
revnos = assembledRevnos(t.Ops, revno)
if !prereqs {
f.debugf("Prepared queue with %s [no prereqs]. Revnos: %v", tt, revnos)
} else {
f.debugf("Prepared queue with %s [forced] Revnos: %v", tt, revnos)
}
return revnos, nil
}
func (f *flusher) unstashToken(tt token, dkey docKey) error {
qdoc := bson.D{{"_id", dkey}, {"txn-queue", tt}}
udoc := bson.D{{"$pull", bson.D{{"txn-queue", tt}}}}
chaos("")
if err := f.sc.Update(qdoc, udoc); err == nil {
chaos("")
err = f.sc.Remove(bson.D{{"_id", dkey}, {"txn-queue", bson.D{}}})
} else if err != mgo.ErrNotFound {
return err
}
return nil
}
func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) {
f.debugf("Rescanning %s", t)
if t.State != tprepared {
panic(fmt.Errorf("rescanning transaction in invalid state: %q", t.State))
}
// dkeys being sorted means stable iteration across all
// runners. This isn't strictly required, but reduces the chances
// of cycles.
dkeys := t.docKeys()
tt := t.token()
if !force {
prereqs, found := f.hasPreReqs(tt, dkeys)
if found && prereqs {
// Its state is already known.
return nil, errPreReqs
}
}
revno := make(map[docKey]int64)
info := txnInfo{}
for _, dkey := range dkeys {
retry := 0
RetryDoc:
c := f.tc.Database.C(dkey.C)
if err := c.FindId(dkey.Id).Select(txnFields).One(&info); err == mgo.ErrNotFound {
// Document is missing. Look in stash.
if err := f.sc.FindId(dkey).One(&info); err == mgo.ErrNotFound {
// Stash also doesn't exist. Maybe someone applied it.
if err := f.reload(t); err != nil {
return nil, err
} else if t.State != tprepared {
return t.Revnos, err
}
// Not applying either.
retry++
if retry < 3 {
// Retry since there might be an insert/remove race.
goto RetryDoc
}
// Neither the doc nor the stash seem to exist.
return nil, fmt.Errorf("cannot find document %v for applying transaction %s", dkey, t)
} else if err != nil {
return nil, err
}
// Stash found.
if info.Insert != "" {
// Handle insert in progress before assuming ordering is good.
if err := f.loadAndApply(info.Insert); err != nil {
return nil, err
}
goto RetryDoc
}
if info.Revno == 0 {
// Missing revno in the stash means -1.
info.Revno = -1
}
} else if err != nil {
return nil, err
} else if info.Remove != "" {
// Handle remove in progress before assuming ordering is good.
if err := f.loadAndApply(info.Remove); err != nil {
return nil, err
}
goto RetryDoc
}
revno[dkey] = info.Revno
found := false
for _, id := range info.Queue {
if id == tt {
found = true
break
}
}
f.queue[dkey] = info.Queue
if !found {
// Previously set txn-queue was popped by someone.
// Transaction is being/has been applied elsewhere.
f.debugf("Rescanned document %v misses %s in queue: %v", dkey, tt, info.Queue)
err := f.reload(t)
if t.State == tpreparing || t.State == tprepared {
panic("rescanned document misses transaction in queue")
}
return t.Revnos, err
}
}
prereqs, found := f.hasPreReqs(tt, dkeys)
if !found {
panic("rescanning loop guarantees that this can't happen")
} else if prereqs && !force {
f.debugf("Rescanned queue with %s: has prereqs, not forced", tt)
return nil, errPreReqs
}
revnos = assembledRevnos(t.Ops, revno)
if !prereqs {
f.debugf("Rescanned queue with %s: no prereqs, revnos: %v", tt, revnos)
} else {
f.debugf("Rescanned queue with %s: has prereqs, forced, revnos: %v", tt, revnos)
}
return revnos, nil
}
func assembledRevnos(ops []Op, revno map[docKey]int64) []int64 {
revnos := make([]int64, len(ops))
for i, op := range ops {
dkey := op.docKey()
revnos[i] = revno[dkey]
drevno := revno[dkey]
switch {
case op.Insert != nil && drevno < 0:
revno[dkey] = -drevno + 1
case op.Update != nil && drevno >= 0:
revno[dkey] = drevno + 1
case op.Remove && drevno >= 0:
revno[dkey] = -drevno - 1
}
}
return revnos
}
func (f *flusher) hasPreReqs(tt token, dkeys docKeys) (prereqs, found bool) {
found = true
NextDoc:
for _, dkey := range dkeys {
for _, dtt := range f.queue[dkey] {
if dtt == tt {
continue NextDoc
} else if dtt.id() != tt.id() {
prereqs = true
}
}
found = false
}
return
}
func (f *flusher) reload(t *transaction) error {
var newt transaction
query := f.tc.FindId(t.Id)
query.Select(bson.D{{"s", 1}, {"n", 1}, {"r", 1}})
if err := query.One(&newt); err != nil {
return fmt.Errorf("failed to reload transaction: %v", err)
}
t.State = newt.State
t.Nonce = newt.Nonce
t.Revnos = newt.Revnos
f.debugf("Reloaded %s: %q", t, t.State)
return nil
}
func (f *flusher) loadAndApply(id bson.ObjectId) error {
t, err := f.load(id)
if err != nil {
return err
}
return f.advance(t, nil, true)
}
// assert verifies that all assertions in t match the content that t
// will be applied upon. If an assertion fails, the transaction state
// is changed to aborted.
func (f *flusher) assert(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) error {
f.debugf("Asserting %s with revnos %v", t, revnos)
if t.State != tprepared {
panic(fmt.Errorf("asserting transaction in invalid state: %q", t.State))
}
qdoc := make(bson.D, 3)
revno := make(map[docKey]int64)
for i, op := range t.Ops {
dkey := op.docKey()
if _, ok := revno[dkey]; !ok {
revno[dkey] = revnos[i]
}
if op.Assert == nil {
continue
}
if op.Assert == DocMissing {
if revnos[i] >= 0 {
return f.abortOrReload(t, revnos, pull)
}
continue
}
if op.Insert != nil {
return fmt.Errorf("Insert can only Assert txn.DocMissing", op.Assert)
}
// if revnos[i] < 0 { abort }?
qdoc = append(qdoc[:0], bson.DocElem{"_id", op.Id})
if op.Assert != DocMissing {
var revnoq interface{}
if n := revno[dkey]; n == 0 {
revnoq = bson.D{{"$exists", false}}
} else {
revnoq = n
}
// XXX Add tt to the query here, once we're sure it's all working.
// Not having it increases the chances of breaking on bad logic.
qdoc = append(qdoc, bson.DocElem{"txn-revno", revnoq})
if op.Assert != DocExists {
qdoc = append(qdoc, bson.DocElem{"$or", []interface{}{op.Assert}})
}
}
c := f.tc.Database.C(op.C)
if err := c.Find(qdoc).Select(bson.D{{"_id", 1}}).One(nil); err == mgo.ErrNotFound {
// Assertion failed or someone else started applying.
return f.abortOrReload(t, revnos, pull)
} else if err != nil {
return err
}
}
f.debugf("Asserting %s succeeded", t)
return nil
}
func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) (err error) {
f.debugf("Aborting or reloading %s (was %q)", t, t.State)
if t.State == tprepared {
qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}}
udoc := bson.D{{"$set", bson.D{{"s", taborting}}}}
chaos("set-aborting")
if err = f.tc.Update(qdoc, udoc); err == nil {
t.State = taborting
} else if err == mgo.ErrNotFound {
if err = f.reload(t); err != nil || t.State != taborting {
f.debugf("Won't abort %s. Reloaded state: %q", t, t.State)
return err
}
} else {
return err
}
} else if t.State != taborting {
panic(fmt.Errorf("aborting transaction in invalid state: %q", t.State))
}
if len(revnos) > 0 {
if pull == nil {
pull = map[bson.ObjectId]*transaction{t.Id: t}
}
seen := make(map[docKey]bool)
for i, op := range t.Ops {
dkey := op.docKey()
if seen[op.docKey()] {
continue
}
seen[dkey] = true
pullAll := tokensToPull(f.queue[dkey], pull, "")
if len(pullAll) == 0 {
continue
}
udoc := bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}}
chaos("")
if revnos[i] < 0 {
err = f.sc.UpdateId(dkey, udoc)
} else {
c := f.tc.Database.C(dkey.C)
err = c.UpdateId(dkey.Id, udoc)
}
if err != nil && err != mgo.ErrNotFound {
return err
}
}
}
udoc := bson.D{{"$set", bson.D{{"s", taborted}}}}
chaos("set-aborted")
if err := f.tc.UpdateId(t.Id, udoc); err != nil && err != mgo.ErrNotFound {
return err
}
t.State = taborted
f.debugf("Aborted %s", t)
return nil
}
func (f *flusher) checkpoint(t *transaction, revnos []int64) error {
var debugRevnos map[docKey][]int64
if debugEnabled {
debugRevnos = make(map[docKey][]int64)
for i, op := range t.Ops {
dkey := op.docKey()
debugRevnos[dkey] = append(debugRevnos[dkey], revnos[i])
}
f.debugf("Ready to apply %s. Saving revnos %v", t, debugRevnos)
}
// Save in t the txn-revno values the transaction must run on.
qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}}
udoc := bson.D{{"$set", bson.D{{"s", tapplying}, {"r", revnos}}}}
chaos("set-applying")
err := f.tc.Update(qdoc, udoc)
if err == nil {
t.State = tapplying
t.Revnos = revnos
f.debugf("Ready to apply %s. Saving revnos %v: DONE", t, debugRevnos)
} else if err == mgo.ErrNotFound {
f.debugf("Ready to apply %s. Saving revnos %v: LOST RACE", t, debugRevnos)
return f.reload(t)
}
return nil
}
func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) error {
f.debugf("Applying transaction %s", t)
if t.State != tapplying {
panic(fmt.Errorf("applying transaction in invalid state: %q", t.State))
}
if pull == nil {
pull = map[bson.ObjectId]*transaction{t.Id: t}
}
logRevnos := append([]int64(nil), t.Revnos...)
logDoc := bson.D{{"_id", t.Id}}
tt := tokenFor(t)
for i := range t.Ops {
op := &t.Ops[i]
dkey := op.docKey()
dqueue := f.queue[dkey]
revno := t.Revnos[i]
var opName string
if debugEnabled {
opName = op.name()
f.debugf("Applying %s op %d (%s) on %v with txn-revno %d", t, i, opName, dkey, revno)
}
c := f.tc.Database.C(op.C)
qdoc := bson.D{{"_id", dkey.Id}, {"txn-revno", revno}, {"txn-queue", tt}}
if op.Insert != nil {
qdoc[0].Value = dkey
if revno == -1 {
qdoc[1].Value = bson.D{{"$exists", false}}
}
} else if revno == 0 {
// There's no document with revno 0. The only way to see it is
// when an existent document participates in a transaction the
// first time. Txn-inserted documents get revno -1 while in the
// stash for the first time, and -revno-1 == 2 when they go live.
qdoc[1].Value = bson.D{{"$exists", false}}
}
pullAll := tokensToPull(dqueue, pull, tt)
var d bson.D
var outcome string
var err error
switch {
case op.Update != nil:
if revno < 0 {
err = mgo.ErrNotFound
f.debugf("Won't try to apply update op; negative revision means the document is missing or stashed")
} else {
newRevno := revno + 1
logRevnos[i] = newRevno
if d, err = objToDoc(op.Update); err != nil {
return err
}
if d, err = addToDoc(d, "$pullAll", bson.D{{"txn-queue", pullAll}}); err != nil {
return err
}
if d, err = addToDoc(d, "$set", bson.D{{"txn-revno", newRevno}}); err != nil {
return err
}
chaos("")
err = c.Update(qdoc, d)
}
case op.Remove:
if revno < 0 {
err = mgo.ErrNotFound
} else {
newRevno := -revno - 1
logRevnos[i] = newRevno
nonce := newNonce()
stash := txnInfo{}
change := mgo.Change{
Update: bson.D{{"$push", bson.D{{"n", nonce}}}},
Upsert: true,
ReturnNew: true,
}
if _, err = f.sc.FindId(dkey).Apply(change, &stash); err != nil {
return err
}
change = mgo.Change{
Update: bson.D{{"$set", bson.D{{"txn-remove", t.Id}}}},
ReturnNew: true,
}
var info txnInfo
if _, err = c.Find(qdoc).Apply(change, &info); err == nil {
// The document still exists so the stash previously
// observed was either out of date or necessarily
// contained the token being applied.
f.debugf("Marked document %v to be removed on revno %d with queue: %v", dkey, info.Revno, info.Queue)
updated := false
if !hasToken(stash.Queue, tt) {
var set, unset bson.D
if revno == 0 {
// Missing revno in stash means -1.
set = bson.D{{"txn-queue", info.Queue}}
unset = bson.D{{"n", 1}, {"txn-revno", 1}}
} else {
set = bson.D{{"txn-queue", info.Queue}, {"txn-revno", newRevno}}
unset = bson.D{{"n", 1}}
}
qdoc := bson.D{{"_id", dkey}, {"n", nonce}}
udoc := bson.D{{"$set", set}, {"$unset", unset}}
if err = f.sc.Update(qdoc, udoc); err == nil {
updated = true
} else if err != mgo.ErrNotFound {
return err
}
}
if updated {
f.debugf("Updated stash for document %v with revno %d and queue: %v", dkey, newRevno, info.Queue)
} else {
f.debugf("Stash for document %v was up-to-date", dkey)
}
err = c.Remove(qdoc)
}
}
case op.Insert != nil:
if revno >= 0 {
err = mgo.ErrNotFound
} else {
newRevno := -revno + 1
logRevnos[i] = newRevno
if d, err = objToDoc(op.Insert); err != nil {
return err
}
change := mgo.Change{
Update: bson.D{{"$set", bson.D{{"txn-insert", t.Id}}}},
ReturnNew: true,
}
chaos("")
var info txnInfo
if _, err = f.sc.Find(qdoc).Apply(change, &info); err == nil {
f.debugf("Stash for document %v has revno %d and queue: %v", dkey, info.Revno, info.Queue)
d = setInDoc(d, bson.D{{"_id", op.Id}, {"txn-revno", newRevno}, {"txn-queue", info.Queue}})
// Unlikely yet unfortunate race in here if this gets seriously
// delayed. If someone inserts+removes meanwhile, this will
// reinsert, and there's no way to avoid that while keeping the
// collection clean or compromising sharding. applyOps can solve
// the former, but it can't shard (SERVER-1439).
chaos("insert")
err = c.Insert(d)
if err == nil || mgo.IsDup(err) {
if err == nil {
f.debugf("New document %v inserted with revno %d and queue: %v", dkey, info.Revno, info.Queue)
} else {
f.debugf("Document %v already existed", dkey)
}
chaos("")
if err = f.sc.Remove(qdoc); err == nil {
f.debugf("Stash for document %v removed", dkey)
}
}
}
}
case op.Assert != nil:
// Pure assertion. No changes to apply.
}
if err == nil {
outcome = "DONE"
} else if err == mgo.ErrNotFound || mgo.IsDup(err) {
outcome = "MISS"
err = nil
} else {
outcome = err.Error()
}
if debugEnabled {
f.debugf("Applying %s op %d (%s) on %v with txn-revno %d: %s", t, i, opName, dkey, revno, outcome)
}
if err != nil {
return err
}
if f.lc != nil && op.isChange() {
// Add change to the log document.
var dr bson.D
for li := range logDoc {
elem := &logDoc[li]
if elem.Name == op.C {
dr = elem.Value.(bson.D)
break
}
}
if dr == nil {
logDoc = append(logDoc, bson.DocElem{op.C, bson.D{{"d", []interface{}{}}, {"r", []int64{}}}})
dr = logDoc[len(logDoc)-1].Value.(bson.D)
}
dr[0].Value = append(dr[0].Value.([]interface{}), op.Id)
dr[1].Value = append(dr[1].Value.([]int64), logRevnos[i])
}
}
t.State = tapplied
if f.lc != nil {
// Insert log document into the changelog collection.
f.debugf("Inserting %s into change log", t)
err := f.lc.Insert(logDoc)
if err != nil && !mgo.IsDup(err) {
return err
}
}
// It's been applied, so errors are ignored here. It's fine for someone
// else to win the race and mark it as applied, and it's also fine for
// it to remain pending until a later point when someone will perceive
// it has been applied and mark it at such.
f.debugf("Marking %s as applied", t)
chaos("set-applied")
f.tc.Update(bson.D{{"_id", t.Id}, {"s", tapplying}}, bson.D{{"$set", bson.D{{"s", tapplied}}}})
return nil
}
func tokensToPull(dqueue []token, pull map[bson.ObjectId]*transaction, dontPull token) []token {
var result []token
for j := len(dqueue) - 1; j >= 0; j-- {
dtt := dqueue[j]
if dtt == dontPull {
continue
}
if _, ok := pull[dtt.id()]; ok {
// It was handled before and this is a leftover invalid
// nonce in the queue. Cherry-pick it out.
result = append(result, dtt)
}
}
return result
}
func objToDoc(obj interface{}) (d bson.D, err error) {
data, err := bson.Marshal(obj)
if err != nil {
return nil, err
}
err = bson.Unmarshal(data, &d)
if err != nil {
return nil, err
}
return d, err
}
func addToDoc(doc bson.D, key string, add bson.D) (bson.D, error) {
for i := range doc {
elem := &doc[i]
if elem.Name != key {
continue
}
if old, ok := elem.Value.(bson.D); ok {
elem.Value = append(old, add...)
return doc, nil
} else {
return nil, fmt.Errorf("invalid %q value in change document: %#v", key, elem.Value)
}
}
return append(doc, bson.DocElem{key, add}), nil
}
func setInDoc(doc bson.D, set bson.D) bson.D {
dlen := len(doc)
NextS:
for s := range set {
sname := set[s].Name
for d := 0; d < dlen; d++ {
if doc[d].Name == sname {
doc[d].Value = set[s].Value
continue NextS
}
}
doc = append(doc, set[s])
}
return doc
}
func hasToken(tokens []token, tt token) bool {
for _, ttt := range tokens {
if ttt == tt {
return true
}
}
return false
}
func (f *flusher) debugf(format string, args ...interface{}) {
if !debugEnabled {
return
}
debugf(f.debugId+format, args...)
}

101
Godeps/_workspace/src/gopkg.in/mgo.v2/txn/mgo_test.go generated vendored Normal file
View File

@@ -0,0 +1,101 @@
package txn_test
import (
"bytes"
"gopkg.in/mgo.v2"
. "gopkg.in/check.v1"
"os/exec"
"time"
)
// ----------------------------------------------------------------------------
// The mgo test suite
type MgoSuite struct {
output bytes.Buffer
server *exec.Cmd
session *mgo.Session
}
var mgoaddr = "127.0.0.1:50017"
func (s *MgoSuite) SetUpSuite(c *C) {
//mgo.SetDebug(true)
mgo.SetStats(true)
dbdir := c.MkDir()
args := []string{
"--dbpath", dbdir,
"--bind_ip", "127.0.0.1",
"--port", "50017",
"--nssize", "1",
"--noprealloc",
"--smallfiles",
"--nojournal",
"-vvvvv",
}
s.server = exec.Command("mongod", args...)
s.server.Stdout = &s.output
s.server.Stderr = &s.output
err := s.server.Start()
if err != nil {
panic(err)
}
}
func (s *MgoSuite) TearDownSuite(c *C) {
s.server.Process.Kill()
s.server.Process.Wait()
}
func (s *MgoSuite) SetUpTest(c *C) {
err := DropAll(mgoaddr)
if err != nil {
panic(err)
}
mgo.SetLogger(c)
mgo.ResetStats()
s.session, err = mgo.Dial(mgoaddr)
c.Assert(err, IsNil)
}
func (s *MgoSuite) TearDownTest(c *C) {
if s.session != nil {
s.session.Close()
}
for i := 0; ; i++ {
stats := mgo.GetStats()
if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
break
}
if i == 20 {
c.Fatal("Test left sockets in a dirty state")
}
c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
time.Sleep(500 * time.Millisecond)
}
}
func DropAll(mongourl string) (err error) {
session, err := mgo.Dial(mongourl)
if err != nil {
return err
}
defer session.Close()
names, err := session.DatabaseNames()
if err != nil {
return err
}
for _, name := range names {
switch name {
case "admin", "local", "config":
default:
err = session.DB(name).DropDatabase()
if err != nil {
return err
}
}
}
return nil
}

389
Godeps/_workspace/src/gopkg.in/mgo.v2/txn/sim_test.go generated vendored Normal file
View File

@@ -0,0 +1,389 @@
package txn_test
import (
"flag"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"gopkg.in/mgo.v2/txn"
. "gopkg.in/check.v1"
"math/rand"
"time"
)
var (
duration = flag.Duration("duration", 200*time.Millisecond, "duration for each simulation")
seed = flag.Int64("seed", 0, "seed for rand")
)
type params struct {
killChance float64
slowdownChance float64
slowdown time.Duration
unsafe bool
workers int
accounts int
changeHalf bool
reinsertCopy bool
reinsertZeroed bool
changelog bool
changes int
}
func (s *S) TestSim1Worker(c *C) {
simulate(c, params{
workers: 1,
accounts: 4,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSim4WorkersDense(c *C) {
simulate(c, params{
workers: 4,
accounts: 2,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSim4WorkersSparse(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimHalf1Worker(c *C) {
simulate(c, params{
workers: 1,
accounts: 4,
changeHalf: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimHalf4WorkersDense(c *C) {
simulate(c, params{
workers: 4,
accounts: 2,
changeHalf: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimHalf4WorkersSparse(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
changeHalf: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimReinsertCopy1Worker(c *C) {
simulate(c, params{
workers: 1,
accounts: 10,
reinsertCopy: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimReinsertCopy4Workers(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
reinsertCopy: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimReinsertZeroed1Worker(c *C) {
simulate(c, params{
workers: 1,
accounts: 10,
reinsertZeroed: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimReinsertZeroed4Workers(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
reinsertZeroed: true,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
})
}
func (s *S) TestSimChangeLog(c *C) {
simulate(c, params{
workers: 4,
accounts: 10,
killChance: 0.01,
slowdownChance: 0.3,
slowdown: 100 * time.Millisecond,
changelog: true,
})
}
type balanceChange struct {
id bson.ObjectId
origin int
target int
amount int
}
func simulate(c *C, params params) {
seed := *seed
if seed == 0 {
seed = time.Now().UnixNano()
}
rand.Seed(seed)
c.Logf("Seed: %v", seed)
txn.SetChaos(txn.Chaos{
KillChance: params.killChance,
SlowdownChance: params.slowdownChance,
Slowdown: params.slowdown,
})
defer txn.SetChaos(txn.Chaos{})
session, err := mgo.Dial(mgoaddr)
c.Assert(err, IsNil)
defer session.Close()
db := session.DB("test")
tc := db.C("tc")
runner := txn.NewRunner(tc)
tclog := db.C("tc.log")
if params.changelog {
info := mgo.CollectionInfo{
Capped: true,
MaxBytes: 1000000,
}
err := tclog.Create(&info)
c.Assert(err, IsNil)
runner.ChangeLog(tclog)
}
accounts := db.C("accounts")
for i := 0; i < params.accounts; i++ {
err := accounts.Insert(M{"_id": i, "balance": 300})
c.Assert(err, IsNil)
}
var stop time.Time
if params.changes <= 0 {
stop = time.Now().Add(*duration)
}
max := params.accounts
if params.reinsertCopy || params.reinsertZeroed {
max = int(float64(params.accounts) * 1.5)
}
changes := make(chan balanceChange, 1024)
//session.SetMode(mgo.Eventual, true)
for i := 0; i < params.workers; i++ {
go func() {
n := 0
for {
if n > 0 && n == params.changes {
break
}
if !stop.IsZero() && time.Now().After(stop) {
break
}
change := balanceChange{
id: bson.NewObjectId(),
origin: rand.Intn(max),
target: rand.Intn(max),
amount: 100,
}
var old Account
var oldExists bool
if params.reinsertCopy || params.reinsertZeroed {
if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound {
c.Check(err, IsNil)
change.amount = old.Balance
oldExists = true
}
}
var ops []txn.Op
switch {
case params.reinsertCopy && oldExists:
ops = []txn.Op{{
C: "accounts",
Id: change.origin,
Assert: M{"balance": change.amount},
Remove: true,
}, {
C: "accounts",
Id: change.target,
Assert: txn.DocMissing,
Insert: M{"balance": change.amount},
}}
case params.reinsertZeroed && oldExists:
ops = []txn.Op{{
C: "accounts",
Id: change.target,
Assert: txn.DocMissing,
Insert: M{"balance": 0},
}, {
C: "accounts",
Id: change.origin,
Assert: M{"balance": change.amount},
Remove: true,
}, {
C: "accounts",
Id: change.target,
Assert: txn.DocExists,
Update: M{"$inc": M{"balance": change.amount}},
}}
case params.changeHalf:
ops = []txn.Op{{
C: "accounts",
Id: change.origin,
Assert: M{"balance": M{"$gte": change.amount}},
Update: M{"$inc": M{"balance": -change.amount / 2}},
}, {
C: "accounts",
Id: change.target,
Assert: txn.DocExists,
Update: M{"$inc": M{"balance": change.amount / 2}},
}, {
C: "accounts",
Id: change.origin,
Update: M{"$inc": M{"balance": -change.amount / 2}},
}, {
C: "accounts",
Id: change.target,
Update: M{"$inc": M{"balance": change.amount / 2}},
}}
default:
ops = []txn.Op{{
C: "accounts",
Id: change.origin,
Assert: M{"balance": M{"$gte": change.amount}},
Update: M{"$inc": M{"balance": -change.amount}},
}, {
C: "accounts",
Id: change.target,
Assert: txn.DocExists,
Update: M{"$inc": M{"balance": change.amount}},
}}
}
err = runner.Run(ops, change.id, nil)
if err != nil && err != txn.ErrAborted && err != txn.ErrChaos {
c.Check(err, IsNil)
}
n++
changes <- change
}
changes <- balanceChange{}
}()
}
alive := params.workers
changeLog := make([]balanceChange, 0, 1024)
for alive > 0 {
change := <-changes
if change.id == "" {
alive--
} else {
changeLog = append(changeLog, change)
}
}
c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted."))
txn.SetChaos(txn.Chaos{})
err = runner.ResumeAll()
c.Assert(err, IsNil)
n, err := accounts.Count()
c.Check(err, IsNil)
c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed."))
n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count()
c.Check(err, IsNil)
c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n))
globalBalance := 0
iter := accounts.Find(nil).Iter()
account := Account{}
for iter.Next(&account) {
globalBalance += account.Balance
}
c.Check(iter.Close(), IsNil)
c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant."))
// Compute and verify the exact final state of all accounts.
balance := make(map[int]int)
for i := 0; i < params.accounts; i++ {
balance[i] += 300
}
var applied, aborted int
for _, change := range changeLog {
err := runner.Resume(change.id)
if err == txn.ErrAborted {
aborted++
continue
} else if err != nil {
c.Fatalf("resuming %s failed: %v", change.id, err)
}
balance[change.origin] -= change.amount
balance[change.target] += change.amount
applied++
}
iter = accounts.Find(nil).Iter()
for iter.Next(&account) {
c.Assert(account.Balance, Equals, balance[account.Id])
}
c.Check(iter.Close(), IsNil)
c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted)
if params.changelog {
n, err := tclog.Count()
c.Assert(err, IsNil)
// Check if the capped collection is full.
dummy := make([]byte, 1024)
tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy})
m, err := tclog.Count()
c.Assert(err, IsNil)
if m == n+1 {
// Wasn't full, so it must have seen it all.
c.Assert(err, IsNil)
c.Assert(n, Equals, applied)
}
}
}

94
Godeps/_workspace/src/gopkg.in/mgo.v2/txn/tarjan.go generated vendored Normal file
View File

@@ -0,0 +1,94 @@
package txn
import (
"gopkg.in/mgo.v2/bson"
"sort"
)
func tarjanSort(successors map[bson.ObjectId][]bson.ObjectId) [][]bson.ObjectId {
// http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
data := &tarjanData{
successors: successors,
nodes: make([]tarjanNode, 0, len(successors)),
index: make(map[bson.ObjectId]int, len(successors)),
}
for id := range successors {
id := bson.ObjectId(string(id))
if _, seen := data.index[id]; !seen {
data.strongConnect(id)
}
}
// Sort connected components to stabilize the algorithm.
for _, ids := range data.output {
if len(ids) > 1 {
sort.Sort(idList(ids))
}
}
return data.output
}
type tarjanData struct {
successors map[bson.ObjectId][]bson.ObjectId
output [][]bson.ObjectId
nodes []tarjanNode
stack []bson.ObjectId
index map[bson.ObjectId]int
}
type tarjanNode struct {
lowlink int
stacked bool
}
type idList []bson.ObjectId
func (l idList) Len() int { return len(l) }
func (l idList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l idList) Less(i, j int) bool { return l[i] < l[j] }
func (data *tarjanData) strongConnect(id bson.ObjectId) *tarjanNode {
index := len(data.nodes)
data.index[id] = index
data.stack = append(data.stack, id)
data.nodes = append(data.nodes, tarjanNode{index, true})
node := &data.nodes[index]
for _, succid := range data.successors[id] {
succindex, seen := data.index[succid]
if !seen {
succnode := data.strongConnect(succid)
if succnode.lowlink < node.lowlink {
node.lowlink = succnode.lowlink
}
} else if data.nodes[succindex].stacked {
// Part of the current strongly-connected component.
if succindex < node.lowlink {
node.lowlink = succindex
}
}
}
if node.lowlink == index {
// Root node; pop stack and output new
// strongly-connected component.
var scc []bson.ObjectId
i := len(data.stack) - 1
for {
stackid := data.stack[i]
stackindex := data.index[stackid]
data.nodes[stackindex].stacked = false
scc = append(scc, stackid)
if stackindex == index {
break
}
i--
}
data.stack = data.stack[:i]
data.output = append(data.output, scc)
}
return node
}

View File

@@ -0,0 +1,44 @@
package txn
import (
"fmt"
"gopkg.in/mgo.v2/bson"
. "gopkg.in/check.v1"
)
type TarjanSuite struct{}
var _ = Suite(TarjanSuite{})
func bid(n int) bson.ObjectId {
return bson.ObjectId(fmt.Sprintf("%024d", n))
}
func bids(ns ...int) (ids []bson.ObjectId) {
for _, n := range ns {
ids = append(ids, bid(n))
}
return
}
func (TarjanSuite) TestExample(c *C) {
successors := map[bson.ObjectId][]bson.ObjectId{
bid(1): bids(2, 3),
bid(2): bids(1, 5),
bid(3): bids(4),
bid(4): bids(3, 5),
bid(5): bids(6),
bid(6): bids(7),
bid(7): bids(8),
bid(8): bids(6, 9),
bid(9): bids(),
}
c.Assert(tarjanSort(successors), DeepEquals, [][]bson.ObjectId{
bids(9),
bids(6, 7, 8),
bids(5),
bids(3, 4),
bids(1, 2),
})
}

609
Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn.go generated vendored Normal file
View File

@@ -0,0 +1,609 @@
// The txn package implements support for multi-document transactions.
//
// For details check the following blog post:
//
// http://blog.labix.org/2012/08/22/multi-doc-transactions-for-mongodb
//
package txn
import (
"encoding/binary"
"fmt"
"reflect"
"sort"
"strings"
"sync"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
crand "crypto/rand"
mrand "math/rand"
)
type state int
const (
tpreparing state = 1 // One or more documents not prepared
tprepared state = 2 // Prepared but not yet ready to run
taborting state = 3 // Assertions failed, cleaning up
tapplying state = 4 // Changes are in progress
taborted state = 5 // Pre-conditions failed, nothing done
tapplied state = 6 // All changes applied
)
func (s state) String() string {
switch s {
case tpreparing:
return "preparing"
case tprepared:
return "prepared"
case taborting:
return "aborting"
case tapplying:
return "applying"
case taborted:
return "aborted"
case tapplied:
return "applied"
}
panic(fmt.Errorf("unknown state: %d", s))
}
var rand *mrand.Rand
var randmu sync.Mutex
func init() {
var seed int64
err := binary.Read(crand.Reader, binary.BigEndian, &seed)
if err != nil {
panic(err)
}
rand = mrand.New(mrand.NewSource(seed))
}
type transaction struct {
Id bson.ObjectId `bson:"_id"`
State state `bson:"s"`
Info interface{} `bson:"i,omitempty"`
Ops []Op `bson:"o"`
Nonce string `bson:"n,omitempty"`
Revnos []int64 `bson:"r,omitempty"`
docKeysCached docKeys
}
func (t *transaction) String() string {
if t.Nonce == "" {
return t.Id.Hex()
}
return string(t.token())
}
func (t *transaction) done() bool {
return t.State == tapplied || t.State == taborted
}
func (t *transaction) token() token {
if t.Nonce == "" {
panic("transaction has no nonce")
}
return tokenFor(t)
}
func (t *transaction) docKeys() docKeys {
if t.docKeysCached != nil {
return t.docKeysCached
}
dkeys := make(docKeys, 0, len(t.Ops))
NextOp:
for _, op := range t.Ops {
dkey := op.docKey()
for i := range dkeys {
if dkey == dkeys[i] {
continue NextOp
}
}
dkeys = append(dkeys, dkey)
}
sort.Sort(dkeys)
t.docKeysCached = dkeys
return dkeys
}
// tokenFor returns a unique transaction token that
// is composed by t's id and a nonce. If t already has
// a nonce assigned to it, it will be used, otherwise
// a new nonce will be generated.
func tokenFor(t *transaction) token {
nonce := t.Nonce
if nonce == "" {
nonce = newNonce()
}
return token(t.Id.Hex() + "_" + nonce)
}
func newNonce() string {
randmu.Lock()
r := rand.Uint32()
randmu.Unlock()
n := make([]byte, 8)
for i := uint(0); i < 8; i++ {
n[i] = "0123456789abcdef"[(r>>(4*i))&0xf]
}
return string(n)
}
type token string
func (tt token) id() bson.ObjectId { return bson.ObjectIdHex(string(tt[:24])) }
func (tt token) nonce() string { return string(tt[25:]) }
// Op represents an operation to a single document that may be
// applied as part of a transaction with other operations.
type Op struct {
// C and Id identify the collection and document this operation
// refers to. Id is matched against the "_id" document field.
C string `bson:"c"`
Id interface{} `bson:"d"`
// Assert optionally holds a query document that is used to
// test the operation document at the time the transaction is
// going to be applied. The assertions for all operations in
// a transaction are tested before any changes take place,
// and the transaction is entirely aborted if any of them
// fails. This is also the only way to prevent a transaction
// from being being applied (the transaction continues despite
// the outcome of Insert, Update, and Remove).
Assert interface{} `bson:"a,omitempty"`
// The Insert, Update and Remove fields describe the mutation
// intended by the operation. At most one of them may be set
// per operation. If none are set, Assert must be set and the
// operation becomes a read-only test.
//
// Insert holds the document to be inserted at the time the
// transaction is applied. The Id field will be inserted
// into the document automatically as its _id field. The
// transaction will continue even if the document already
// exists. Use Assert with txn.DocMissing if the insertion is
// required.
//
// Update holds the update document to be applied at the time
// the transaction is applied. The transaction will continue
// even if a document with Id is missing. Use Assert to
// test for the document presence or its contents.
//
// Remove indicates whether to remove the document with Id.
// The transaction continues even if the document doesn't yet
// exist at the time the transaction is applied. Use Assert
// with txn.DocExists to make sure it will be removed.
Insert interface{} `bson:"i,omitempty"`
Update interface{} `bson:"u,omitempty"`
Remove bool `bson:"r,omitempty"`
}
func (op *Op) isChange() bool {
return op.Update != nil || op.Insert != nil || op.Remove
}
func (op *Op) docKey() docKey {
return docKey{op.C, op.Id}
}
func (op *Op) name() string {
switch {
case op.Update != nil:
return "update"
case op.Insert != nil:
return "insert"
case op.Remove:
return "remove"
case op.Assert != nil:
return "assert"
}
return "none"
}
const (
// DocExists and DocMissing may be used on an operation's
// Assert value to assert that the document with the given
// Id exists or does not exist, respectively.
DocExists = "d+"
DocMissing = "d-"
)
// A Runner applies operations as part of a transaction onto any number
// of collections within a database. See the Run method for details.
type Runner struct {
tc *mgo.Collection // txns
sc *mgo.Collection // stash
lc *mgo.Collection // log
}
// NewRunner returns a new transaction runner that uses tc to hold its
// transactions.
//
// Multiple transaction collections may exist in a single database, but
// all collections that are touched by operations in a given transaction
// collection must be handled exclusively by it.
//
// A second collection with the same name of tc but suffixed by ".stash"
// will be used for implementing the transactional behavior of insert
// and remove operations.
func NewRunner(tc *mgo.Collection) *Runner {
return &Runner{tc, tc.Database.C(tc.Name + ".stash"), nil}
}
var ErrAborted = fmt.Errorf("transaction aborted")
// Run creates a new transaction with ops and runs it immediately.
// The id parameter specifies the transaction id, and may be written
// down ahead of time to later verify the success of the change and
// resume it, when the procedure is interrupted for any reason. If
// empty, a random id will be generated.
// The info parameter, if not nil, is included under the "i"
// field of the transaction document.
//
// Operations across documents are not atomically applied, but are
// guaranteed to be eventually all applied in the order provided or
// all aborted, as long as the affected documents are only modified
// through transactions. If documents are simultaneously modified
// by transactions and out of transactions the behavior is undefined.
//
// If Run returns no errors, all operations were applied successfully.
// If it returns ErrAborted, one or more operations can't be applied
// and the transaction was entirely aborted with no changes performed.
// Otherwise, if the transaction is interrupted while running for any
// reason, it may be resumed explicitly or by attempting to apply
// another transaction on any of the documents targeted by ops, as
// long as the interruption was made after the transaction document
// itself was inserted. Run Resume with the obtained transaction id
// to confirm whether the transaction was applied or not.
//
// Any number of transactions may be run concurrently, with one
// runner or many.
func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) {
const efmt = "error in transaction op %d: %s"
for i := range ops {
op := &ops[i]
if op.C == "" || op.Id == nil {
return fmt.Errorf(efmt, i, "C or Id missing")
}
changes := 0
if op.Insert != nil {
changes++
}
if op.Update != nil {
changes++
}
if op.Remove {
changes++
}
if changes > 1 {
return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set")
}
if changes == 0 && op.Assert == nil {
return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set")
}
}
if id == "" {
id = bson.NewObjectId()
}
// Insert transaction sooner rather than later, to stay on the safer side.
t := transaction{
Id: id,
Ops: ops,
State: tpreparing,
Info: info,
}
if err = r.tc.Insert(&t); err != nil {
return err
}
if err = flush(r, &t); err != nil {
return err
}
if t.State == taborted {
return ErrAborted
} else if t.State != tapplied {
panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
}
return nil
}
// ResumeAll resumes all pending transactions. All ErrAborted errors
// from individual transactions are ignored.
func (r *Runner) ResumeAll() (err error) {
debugf("Resuming all unfinished transactions")
iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter()
var t transaction
for iter.Next(&t) {
if t.State == tapplied || t.State == taborted {
continue
}
debugf("Resuming %s from %q", t.Id, t.State)
if err := flush(r, &t); err != nil {
return err
}
if !t.done() {
panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
}
}
return nil
}
// Resume resumes the transaction with id. It returns mgo.ErrNotFound
// if the transaction is not found. Otherwise, it has the same semantics
// of the Run method after the transaction is inserted.
func (r *Runner) Resume(id bson.ObjectId) (err error) {
t, err := r.load(id)
if err != nil {
return err
}
if !t.done() {
debugf("Resuming %s from %q", t, t.State)
if err := flush(r, t); err != nil {
return err
}
}
if t.State == taborted {
return ErrAborted
} else if t.State != tapplied {
panic(fmt.Errorf("invalid state for %s after flush: %q", t, t.State))
}
return nil
}
// ChangeLog enables logging of changes to the given collection
// every time a transaction that modifies content is done being
// applied.
//
// Saved documents are in the format:
//
// {"_id": <txn id>, <collection>: {"d": [<doc id>, ...], "r": [<doc revno>, ...]}}
//
// The document revision is the value of the txn-revno field after
// the change has been applied. Negative values indicate the document
// was not present in the collection. Revisions will not change when
// updates or removes are applied to missing documents or inserts are
// attempted when the document isn't present.
func (r *Runner) ChangeLog(logc *mgo.Collection) {
r.lc = logc
}
// PurgeMissing removes from collections any state that refers to transaction
// documents that for whatever reason have been lost from the system (removed
// by accident or lost in a hard crash, for example).
//
// This method should very rarely be needed, if at all, and should never be
// used during the normal operation of an application. Its purpose is to put
// a system that has seen unavoidable corruption back in a working state.
func (r *Runner) PurgeMissing(collections ...string) error {
type M map[string]interface{}
type S []interface{}
pipeline := []M{
{"$project": M{"_id": 1, "txn-queue": 1}},
{"$unwind": "$txn-queue"},
{"$sort": M{"_id": 1, "txn-queue": 1}},
//{"$group": M{"_id": M{"$substr": S{"$txn-queue", 0, 24}}, "docids": M{"$push": "$_id"}}},
}
type TRef struct {
DocId interface{} "_id"
TxnId string "txn-queue"
}
found := make(map[bson.ObjectId]bool)
colls := make(map[string]bool)
sort.Strings(collections)
for _, collection := range collections {
c := r.tc.Database.C(collection)
iter := c.Pipe(pipeline).Iter()
var tref TRef
for iter.Next(&tref) {
txnId := bson.ObjectIdHex(tref.TxnId[:24])
if found[txnId] {
continue
}
if r.tc.FindId(txnId).One(nil) == nil {
found[txnId] = true
continue
}
logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tref.DocId, txnId)
err := c.UpdateId(tref.DocId, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
if err != nil {
return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
}
}
colls[collection] = true
}
type StashTRef struct {
Id docKey "_id"
TxnId string "txn-queue"
}
iter := r.sc.Pipe(pipeline).Iter()
var stref StashTRef
for iter.Next(&stref) {
txnId := bson.ObjectIdHex(stref.TxnId[:24])
if found[txnId] {
continue
}
if r.tc.FindId(txnId).One(nil) == nil {
found[txnId] = true
continue
}
logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stref.Id.C, stref.Id.Id, txnId)
err := r.sc.UpdateId(stref.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
if err != nil {
return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
}
}
return nil
}
func (r *Runner) load(id bson.ObjectId) (*transaction, error) {
var t transaction
err := r.tc.FindId(id).One(&t)
if err == mgo.ErrNotFound {
return nil, fmt.Errorf("cannot find transaction %s", id)
} else if err != nil {
return nil, err
}
return &t, nil
}
type typeNature int
const (
// The order of these values matters. Transactions
// from applications using different ordering will
// be incompatible with each other.
_ typeNature = iota
natureString
natureInt
natureFloat
natureBool
natureStruct
)
func valueNature(v interface{}) (value interface{}, nature typeNature) {
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.String:
return rv.String(), natureString
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int(), natureInt
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return int64(rv.Uint()), natureInt
case reflect.Float32, reflect.Float64:
return rv.Float(), natureFloat
case reflect.Bool:
return rv.Bool(), natureBool
case reflect.Struct:
return v, natureStruct
}
panic("document id type unsupported by txn: " + rv.Kind().String())
}
type docKey struct {
C string
Id interface{}
}
type docKeys []docKey
func (ks docKeys) Len() int { return len(ks) }
func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] }
func (ks docKeys) Less(i, j int) bool {
a, b := ks[i], ks[j]
if a.C != b.C {
return a.C < b.C
}
return valuecmp(a.Id, b.Id) == -1
}
func valuecmp(a, b interface{}) int {
av, an := valueNature(a)
bv, bn := valueNature(b)
if an < bn {
return -1
}
if an > bn {
return 1
}
if av == bv {
return 0
}
var less bool
switch an {
case natureString:
less = av.(string) < bv.(string)
case natureInt:
less = av.(int64) < bv.(int64)
case natureFloat:
less = av.(float64) < bv.(float64)
case natureBool:
less = !av.(bool) && bv.(bool)
case natureStruct:
less = structcmp(av, bv) == -1
default:
panic("unreachable")
}
if less {
return -1
}
return 1
}
func structcmp(a, b interface{}) int {
av := reflect.ValueOf(a)
bv := reflect.ValueOf(b)
var ai, bi = 0, 0
var an, bn = av.NumField(), bv.NumField()
var avi, bvi interface{}
var af, bf reflect.StructField
for {
for ai < an {
af = av.Type().Field(ai)
if isExported(af.Name) {
avi = av.Field(ai).Interface()
ai++
break
}
ai++
}
for bi < bn {
bf = bv.Type().Field(bi)
if isExported(bf.Name) {
bvi = bv.Field(bi).Interface()
bi++
break
}
bi++
}
if n := valuecmp(avi, bvi); n != 0 {
return n
}
nameA := getFieldName(af)
nameB := getFieldName(bf)
if nameA < nameB {
return -1
}
if nameA > nameB {
return 1
}
if ai == an && bi == bn {
return 0
}
if ai == an || bi == bn {
if ai == bn {
return -1
}
return 1
}
}
panic("unreachable")
}
func isExported(name string) bool {
a := name[0]
return a >= 'A' && a <= 'Z'
}
func getFieldName(f reflect.StructField) string {
name := f.Tag.Get("bson")
if i := strings.Index(name, ","); i >= 0 {
name = name[:i]
}
if name == "" {
name = strings.ToLower(f.Name)
}
return name
}

627
Godeps/_workspace/src/gopkg.in/mgo.v2/txn/txn_test.go generated vendored Normal file
View File

@@ -0,0 +1,627 @@
package txn_test
import (
"sync"
"testing"
"time"
. "gopkg.in/check.v1"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"gopkg.in/mgo.v2/txn"
)
func TestAll(t *testing.T) {
TestingT(t)
}
type S struct {
MgoSuite
db *mgo.Database
tc, sc *mgo.Collection
accounts *mgo.Collection
runner *txn.Runner
}
var _ = Suite(&S{})
type M map[string]interface{}
func (s *S) SetUpTest(c *C) {
txn.SetChaos(txn.Chaos{})
txn.SetLogger(c)
txn.SetDebug(true)
s.MgoSuite.SetUpTest(c)
s.db = s.session.DB("test")
s.tc = s.db.C("tc")
s.sc = s.db.C("tc.stash")
s.accounts = s.db.C("accounts")
s.runner = txn.NewRunner(s.tc)
}
func (s *S) TearDownTest(c *C) {
txn.SetLogger(nil)
txn.SetDebug(false)
}
type Account struct {
Id int `bson:"_id"`
Balance int
}
func (s *S) TestDocExists(c *C) {
err := s.accounts.Insert(M{"_id": 0, "balance": 300})
c.Assert(err, IsNil)
exists := []txn.Op{{
C: "accounts",
Id: 0,
Assert: txn.DocExists,
}}
missing := []txn.Op{{
C: "accounts",
Id: 0,
Assert: txn.DocMissing,
}}
err = s.runner.Run(exists, "", nil)
c.Assert(err, IsNil)
err = s.runner.Run(missing, "", nil)
c.Assert(err, Equals, txn.ErrAborted)
err = s.accounts.RemoveId(0)
c.Assert(err, IsNil)
err = s.runner.Run(exists, "", nil)
c.Assert(err, Equals, txn.ErrAborted)
err = s.runner.Run(missing, "", nil)
c.Assert(err, IsNil)
}
func (s *S) TestInsert(c *C) {
err := s.accounts.Insert(M{"_id": 0, "balance": 300})
c.Assert(err, IsNil)
ops := []txn.Op{{
C: "accounts",
Id: 0,
Insert: M{"balance": 200},
}}
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
ops[0].Id = 1
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
err = s.accounts.FindId(1).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 200)
}
func (s *S) TestInsertStructID(c *C) {
type id struct {
FirstName string
LastName string
}
ops := []txn.Op{{
C: "accounts",
Id: id{FirstName: "John", LastName: "Jones"},
Assert: txn.DocMissing,
Insert: M{"balance": 200},
}, {
C: "accounts",
Id: id{FirstName: "Sally", LastName: "Smith"},
Assert: txn.DocMissing,
Insert: M{"balance": 800},
}}
err := s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
n, err := s.accounts.Find(nil).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 2)
}
func (s *S) TestRemove(c *C) {
err := s.accounts.Insert(M{"_id": 0, "balance": 300})
c.Assert(err, IsNil)
ops := []txn.Op{{
C: "accounts",
Id: 0,
Remove: true,
}}
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
err = s.accounts.FindId(0).One(nil)
c.Assert(err, Equals, mgo.ErrNotFound)
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
}
func (s *S) TestUpdate(c *C) {
var err error
err = s.accounts.Insert(M{"_id": 0, "balance": 200})
c.Assert(err, IsNil)
err = s.accounts.Insert(M{"_id": 1, "balance": 200})
c.Assert(err, IsNil)
ops := []txn.Op{{
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}}
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
ops[0].Id = 1
err = s.accounts.FindId(1).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 200)
}
func (s *S) TestInsertUpdate(c *C) {
ops := []txn.Op{{
C: "accounts",
Id: 0,
Insert: M{"_id": 0, "balance": 200},
}, {
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}}
err := s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 400)
}
func (s *S) TestUpdateInsert(c *C) {
ops := []txn.Op{{
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}, {
C: "accounts",
Id: 0,
Insert: M{"_id": 0, "balance": 200},
}}
err := s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 200)
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
}
func (s *S) TestInsertRemoveInsert(c *C) {
ops := []txn.Op{{
C: "accounts",
Id: 0,
Insert: M{"_id": 0, "balance": 200},
}, {
C: "accounts",
Id: 0,
Remove: true,
}, {
C: "accounts",
Id: 0,
Insert: M{"_id": 0, "balance": 300},
}}
err := s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
}
func (s *S) TestQueueStashing(c *C) {
txn.SetChaos(txn.Chaos{
KillChance: 1,
Breakpoint: "set-applying",
})
opses := [][]txn.Op{{{
C: "accounts",
Id: 0,
Insert: M{"balance": 100},
}}, {{
C: "accounts",
Id: 0,
Remove: true,
}}, {{
C: "accounts",
Id: 0,
Insert: M{"balance": 200},
}}, {{
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}}}
var last bson.ObjectId
for _, ops := range opses {
last = bson.NewObjectId()
err := s.runner.Run(ops, last, nil)
c.Assert(err, Equals, txn.ErrChaos)
}
txn.SetChaos(txn.Chaos{})
err := s.runner.Resume(last)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 300)
}
func (s *S) TestInfo(c *C) {
ops := []txn.Op{{
C: "accounts",
Id: 0,
Assert: txn.DocMissing,
}}
id := bson.NewObjectId()
err := s.runner.Run(ops, id, M{"n": 42})
c.Assert(err, IsNil)
var t struct{ I struct{ N int } }
err = s.tc.FindId(id).One(&t)
c.Assert(err, IsNil)
c.Assert(t.I.N, Equals, 42)
}
func (s *S) TestErrors(c *C) {
doc := bson.M{"foo": 1}
tests := []txn.Op{{
C: "c",
Id: 0,
}, {
C: "c",
Id: 0,
Insert: doc,
Remove: true,
}, {
C: "c",
Id: 0,
Insert: doc,
Update: doc,
}, {
C: "c",
Id: 0,
Update: doc,
Remove: true,
}, {
C: "c",
Assert: doc,
}, {
Id: 0,
Assert: doc,
}}
txn.SetChaos(txn.Chaos{KillChance: 1.0})
for _, op := range tests {
c.Logf("op: %v", op)
err := s.runner.Run([]txn.Op{op}, "", nil)
c.Assert(err, ErrorMatches, "error in transaction op 0: .*")
}
}
func (s *S) TestAssertNestedOr(c *C) {
// Assert uses $or internally. Ensure nesting works.
err := s.accounts.Insert(M{"_id": 0, "balance": 300})
c.Assert(err, IsNil)
ops := []txn.Op{{
C: "accounts",
Id: 0,
Assert: bson.D{{"$or", []bson.D{{{"balance", 100}}, {{"balance", 300}}}}},
Update: bson.D{{"$inc", bson.D{{"balance", 100}}}},
}}
err = s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var account Account
err = s.accounts.FindId(0).One(&account)
c.Assert(err, IsNil)
c.Assert(account.Balance, Equals, 400)
}
func (s *S) TestVerifyFieldOrdering(c *C) {
// Used to have a map in certain operations, which means
// the ordering of fields would be messed up.
fields := bson.D{{"a", 1}, {"b", 2}, {"c", 3}}
ops := []txn.Op{{
C: "accounts",
Id: 0,
Insert: fields,
}}
err := s.runner.Run(ops, "", nil)
c.Assert(err, IsNil)
var d bson.D
err = s.accounts.FindId(0).One(&d)
c.Assert(err, IsNil)
var filtered bson.D
for _, e := range d {
switch e.Name {
case "a", "b", "c":
filtered = append(filtered, e)
}
}
c.Assert(filtered, DeepEquals, fields)
}
func (s *S) TestChangeLog(c *C) {
chglog := s.db.C("chglog")
s.runner.ChangeLog(chglog)
ops := []txn.Op{{
C: "debts",
Id: 0,
Assert: txn.DocMissing,
}, {
C: "accounts",
Id: 0,
Insert: M{"balance": 300},
}, {
C: "accounts",
Id: 1,
Insert: M{"balance": 300},
}, {
C: "people",
Id: "joe",
Insert: M{"accounts": []int64{0, 1}},
}}
id := bson.NewObjectId()
err := s.runner.Run(ops, id, nil)
c.Assert(err, IsNil)
type IdList []interface{}
type Log struct {
Docs IdList "d"
Revnos []int64 "r"
}
var m map[string]*Log
err = chglog.FindId(id).One(&m)
c.Assert(err, IsNil)
c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}})
c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}})
c.Assert(m["debts"], IsNil)
ops = []txn.Op{{
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 100}},
}, {
C: "accounts",
Id: 1,
Update: M{"$inc": M{"balance": 100}},
}}
id = bson.NewObjectId()
err = s.runner.Run(ops, id, nil)
c.Assert(err, IsNil)
m = nil
err = chglog.FindId(id).One(&m)
c.Assert(err, IsNil)
c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}})
c.Assert(m["people"], IsNil)
ops = []txn.Op{{
C: "accounts",
Id: 0,
Remove: true,
}, {
C: "people",
Id: "joe",
Remove: true,
}}
id = bson.NewObjectId()
err = s.runner.Run(ops, id, nil)
c.Assert(err, IsNil)
m = nil
err = chglog.FindId(id).One(&m)
c.Assert(err, IsNil)
c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}})
c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}})
}
func (s *S) TestPurgeMissing(c *C) {
txn.SetChaos(txn.Chaos{
KillChance: 1,
Breakpoint: "set-applying",
})
err := s.accounts.Insert(M{"_id": 0, "balance": 100})
c.Assert(err, IsNil)
err = s.accounts.Insert(M{"_id": 1, "balance": 100})
c.Assert(err, IsNil)
ops1 := []txn.Op{{
C: "accounts",
Id: 3,
Insert: M{"balance": 100},
}}
ops2 := []txn.Op{{
C: "accounts",
Id: 0,
Remove: true,
}, {
C: "accounts",
Id: 1,
Update: M{"$inc": M{"balance": 100}},
}, {
C: "accounts",
Id: 2,
Insert: M{"balance": 100},
}}
first := bson.NewObjectId()
c.Logf("---- Running ops1 under transaction %q, to be canceled by chaos", first.Hex())
err = s.runner.Run(ops1, first, nil)
c.Assert(err, Equals, txn.ErrChaos)
last := bson.NewObjectId()
c.Logf("---- Running ops2 under transaction %q, to be canceled by chaos", last.Hex())
err = s.runner.Run(ops2, last, nil)
c.Assert(err, Equals, txn.ErrChaos)
c.Logf("---- Removing transaction %q", last.Hex())
err = s.tc.RemoveId(last)
c.Assert(err, IsNil)
c.Logf("---- Disabling chaos and attempting to resume all")
txn.SetChaos(txn.Chaos{})
err = s.runner.ResumeAll()
c.Assert(err, IsNil)
again := bson.NewObjectId()
c.Logf("---- Running ops2 again under transaction %q, to fail for missing transaction", again.Hex())
err = s.runner.Run(ops2, "", nil)
c.Assert(err, ErrorMatches, "cannot find transaction .*")
c.Logf("---- Puring missing transactions")
err = s.runner.PurgeMissing("accounts")
c.Assert(err, IsNil)
c.Logf("---- Resuming pending transactions")
err = s.runner.ResumeAll()
c.Assert(err, IsNil)
expect := []struct{ Id, Balance int }{
{0, -1},
{1, 200},
{2, 100},
{3, 100},
}
var got Account
for _, want := range expect {
err = s.accounts.FindId(want.Id).One(&got)
if want.Balance == -1 {
if err != mgo.ErrNotFound {
c.Errorf("Account %d should not exist, find got err=%#v", err)
}
} else if err != nil {
c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance)
} else if got.Balance != want.Balance {
c.Errorf("Account %d should have balance of %d, got %d", want.Id, want.Balance, got.Balance)
}
}
}
func (s *S) TestTxnQueueStressTest(c *C) {
txn.SetChaos(txn.Chaos{
SlowdownChance: 0.3,
Slowdown: 50 * time.Millisecond,
})
defer txn.SetChaos(txn.Chaos{})
// So we can run more iterations of the test in less time.
txn.SetDebug(false)
err := s.accounts.Insert(M{"_id": 0, "balance": 0}, M{"_id": 1, "balance": 0})
c.Assert(err, IsNil)
// Run half of the operations changing account 0 and then 1,
// and the other half in the opposite order.
ops01 := []txn.Op{{
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 1}},
}, {
C: "accounts",
Id: 1,
Update: M{"$inc": M{"balance": 1}},
}}
ops10 := []txn.Op{{
C: "accounts",
Id: 1,
Update: M{"$inc": M{"balance": 1}},
}, {
C: "accounts",
Id: 0,
Update: M{"$inc": M{"balance": 1}},
}}
ops := [][]txn.Op{ops01, ops10}
const runners = 4
const changes = 1000
var wg sync.WaitGroup
wg.Add(runners)
for n := 0; n < runners; n++ {
n := n
go func() {
defer wg.Done()
for i := 0; i < changes; i++ {
err = s.runner.Run(ops[n%2], "", nil)
c.Assert(err, IsNil)
}
}()
}
wg.Wait()
for id := 0; id < 2; id++ {
var account Account
err = s.accounts.FindId(id).One(&account)
if account.Balance != runners*changes {
c.Errorf("Account should have balance of %d, got %d", runners*changes, account.Balance)
}
}
}

25
Godeps/_workspace/src/labix.org/v2/mgo/bson/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
BSON library for Go
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

682
Godeps/_workspace/src/labix.org/v2/mgo/bson/bson.go generated vendored Normal file
View File

@@ -0,0 +1,682 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package bson is an implementation of the BSON specification for Go:
//
// http://bsonspec.org
//
// It was created as part of the mgo MongoDB driver for Go, but is standalone
// and may be used on its own without the driver.
package bson
import (
"crypto/md5"
"crypto/rand"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
// --------------------------------------------------------------------------
// The public API.
// A value implementing the bson.Getter interface will have its GetBSON
// method called when the given value has to be marshalled, and the result
// of this method will be marshaled in place of the actual object.
//
// If GetBSON returns return a non-nil error, the marshalling procedure
// will stop and error out with the provided value.
type Getter interface {
GetBSON() (interface{}, error)
}
// A value implementing the bson.Setter interface will receive the BSON
// value via the SetBSON method during unmarshaling, and the object
// itself will not be changed as usual.
//
// If setting the value works, the method should return nil or alternatively
// bson.SetZero to set the respective field to its zero value (nil for
// pointer types). If SetBSON returns a value of type bson.TypeError, the
// BSON value will be omitted from a map or slice being decoded and the
// unmarshalling will continue. If it returns any other non-nil error, the
// unmarshalling procedure will stop and error out with the provided value.
//
// This interface is generally useful in pointer receivers, since the method
// will want to change the receiver. A type field that implements the Setter
// interface doesn't have to be a pointer, though.
//
// Unlike the usual behavior, unmarshalling onto a value that implements a
// Setter interface will NOT reset the value to its zero state. This allows
// the value to decide by itself how to be unmarshalled.
//
// For example:
//
// type MyString string
//
// func (s *MyString) SetBSON(raw bson.Raw) error {
// return raw.Unmarshal(s)
// }
//
type Setter interface {
SetBSON(raw Raw) error
}
// SetZero may be returned from a SetBSON method to have the value set to
// its respective zero value. When used in pointer values, this will set the
// field to nil rather than to the pre-allocated value.
var SetZero = errors.New("set to zero")
// M is a convenient alias for a map[string]interface{} map, useful for
// dealing with BSON in a native way. For instance:
//
// bson.M{"a": 1, "b": true}
//
// There's no special handling for this type in addition to what's done anyway
// for an equivalent map type. Elements in the map will be dumped in an
// undefined ordered. See also the bson.D type for an ordered alternative.
type M map[string]interface{}
// D represents a BSON document containing ordered elements. For example:
//
// bson.D{{"a", 1}, {"b", true}}
//
// In some situations, such as when creating indexes for MongoDB, the order in
// which the elements are defined is important. If the order is not important,
// using a map is generally more comfortable. See bson.M and bson.RawD.
type D []DocElem
// See the D type.
type DocElem struct {
Name string
Value interface{}
}
// Map returns a map out of the ordered element name/value pairs in d.
func (d D) Map() (m M) {
m = make(M, len(d))
for _, item := range d {
m[item.Name] = item.Value
}
return m
}
// The Raw type represents raw unprocessed BSON documents and elements.
// Kind is the kind of element as defined per the BSON specification, and
// Data is the raw unprocessed data for the respective element.
// Using this type it is possible to unmarshal or marshal values partially.
//
// Relevant documentation:
//
// http://bsonspec.org/#/specification
//
type Raw struct {
Kind byte
Data []byte
}
// RawD represents a BSON document containing raw unprocessed elements.
// This low-level representation may be useful when lazily processing
// documents of uncertain content, or when manipulating the raw content
// documents in general.
type RawD []RawDocElem
// See the RawD type.
type RawDocElem struct {
Name string
Value Raw
}
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
// long. MongoDB objects by default have such a property set in their "_id"
// property.
//
// http://www.mongodb.org/display/DOCS/Object+IDs
type ObjectId string
// ObjectIdHex returns an ObjectId from the provided hex representation.
// Calling this function with an invalid hex representation will
// cause a runtime panic. See the IsObjectIdHex function.
func ObjectIdHex(s string) ObjectId {
d, err := hex.DecodeString(s)
if err != nil || len(d) != 12 {
panic(fmt.Sprintf("Invalid input to ObjectIdHex: %q", s))
}
return ObjectId(d)
}
// IsObjectIdHex returns whether s is a valid hex representation of
// an ObjectId. See the ObjectIdHex function.
func IsObjectIdHex(s string) bool {
if len(s) != 24 {
return false
}
_, err := hex.DecodeString(s)
return err == nil
}
// objectIdCounter is atomically incremented when generating a new ObjectId
// using NewObjectId() function. It's used as a counter part of an id.
var objectIdCounter uint32 = 0
// machineId stores machine id generated once and used in subsequent calls
// to NewObjectId function.
var machineId = readMachineId()
// readMachineId generates machine id and puts it into the machineId global
// variable. If this function fails to get the hostname, it will cause
// a runtime error.
func readMachineId() []byte {
var sum [3]byte
id := sum[:]
hostname, err1 := os.Hostname()
if err1 != nil {
_, err2 := io.ReadFull(rand.Reader, id)
if err2 != nil {
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
}
return id
}
hw := md5.New()
hw.Write([]byte(hostname))
copy(id, hw.Sum(nil))
return id
}
// NewObjectId returns a new unique ObjectId.
func NewObjectId() ObjectId {
var b [12]byte
// Timestamp, 4 bytes, big endian
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
// Machine, first 3 bytes of md5(hostname)
b[4] = machineId[0]
b[5] = machineId[1]
b[6] = machineId[2]
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
pid := os.Getpid()
b[7] = byte(pid >> 8)
b[8] = byte(pid)
// Increment, 3 bytes, big endian
i := atomic.AddUint32(&objectIdCounter, 1)
b[9] = byte(i >> 16)
b[10] = byte(i >> 8)
b[11] = byte(i)
return ObjectId(b[:])
}
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
// with the provided number of seconds from epoch UTC, and all other parts
// filled with zeroes. It's not safe to insert a document with an id generated
// by this method, it is useful only for queries to find documents with ids
// generated before or after the specified timestamp.
func NewObjectIdWithTime(t time.Time) ObjectId {
var b [12]byte
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
return ObjectId(string(b[:]))
}
// String returns a hex string representation of the id.
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
func (id ObjectId) String() string {
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
}
// Hex returns a hex representation of the ObjectId.
func (id ObjectId) Hex() string {
return hex.EncodeToString([]byte(id))
}
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
func (id ObjectId) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
}
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
func (id *ObjectId) UnmarshalJSON(data []byte) error {
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s", string(data)))
}
var buf [12]byte
_, err := hex.Decode(buf[:], data[1:25])
if err != nil {
return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s (%s)", string(data), err))
}
*id = ObjectId(string(buf[:]))
return nil
}
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
func (id ObjectId) Valid() bool {
return len(id) == 12
}
// byteSlice returns byte slice of id from start to end.
// Calling this function with an invalid id will cause a runtime panic.
func (id ObjectId) byteSlice(start, end int) []byte {
if len(id) != 12 {
panic(fmt.Sprintf("Invalid ObjectId: %q", string(id)))
}
return []byte(string(id)[start:end])
}
// Time returns the timestamp part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Time() time.Time {
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
return time.Unix(secs, 0)
}
// Machine returns the 3-byte machine id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Machine() []byte {
return id.byteSlice(4, 7)
}
// Pid returns the process id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Pid() uint16 {
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
}
// Counter returns the incrementing value part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Counter() int32 {
b := id.byteSlice(9, 12)
// Counter is stored as big-endian 3-byte value
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
}
// The Symbol type is similar to a string and is used in languages with a
// distinct symbol type.
type Symbol string
// Now returns the current time with millisecond precision. MongoDB stores
// timestamps with the same precision, so a Time returned from this method
// will not change after a roundtrip to the database. That's the only reason
// why this function exists. Using the time.Now function also works fine
// otherwise.
func Now() time.Time {
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
}
// MongoTimestamp is a special internal type used by MongoDB that for some
// strange reason has its own datatype defined in BSON.
type MongoTimestamp int64
type orderKey int64
// MaxKey is a special value that compares higher than all other possible BSON
// values in a MongoDB database.
var MaxKey = orderKey(1<<63 - 1)
// MinKey is a special value that compares lower than all other possible BSON
// values in a MongoDB database.
var MinKey = orderKey(-1 << 63)
type undefined struct{}
// Undefined represents the undefined BSON value.
var Undefined undefined
// Binary is a representation for non-standard binary values. Any kind should
// work, but the following are known as of this writing:
//
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
// 0x01 - Function (!?)
// 0x02 - Obsolete generic.
// 0x03 - UUID
// 0x05 - MD5
// 0x80 - User defined.
//
type Binary struct {
Kind byte
Data []byte
}
// RegEx represents a regular expression. The Options field may contain
// individual characters defining the way in which the pattern should be
// applied, and must be sorted. Valid options as of this writing are 'i' for
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
// unicode. The value of the Options parameter is not verified before being
// marshaled into the BSON format.
type RegEx struct {
Pattern string
Options string
}
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
// will be marshaled as a mapping from identifiers to values that may be
// used when evaluating the provided Code.
type JavaScript struct {
Code string
Scope interface{}
}
const initialBufferSize = 64
func handleErr(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
} else if _, ok := r.(externalPanic); ok {
panic(r)
} else if s, ok := r.(string); ok {
*err = errors.New(s)
} else if e, ok := r.(error); ok {
*err = e
} else {
panic(r)
}
}
}
// Marshal serializes the in value, which may be a map or a struct value.
// In the case of struct values, only exported fields will be serialized.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
//
// minsize Marshal an int64 value as an int32, if that's feasible
// while preserving the numeric value.
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the bson keys of other struct fields.
//
// Some examples:
//
// type T struct {
// A bool
// B int "myb"
// C string "myc,omitempty"
// D string `bson:",omitempty" json:"jsonkey"`
// E int64 ",minsize"
// F int64 "myf,omitempty,minsize"
// }
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := &encoder{make([]byte, 0, initialBufferSize)}
e.addDoc(reflect.ValueOf(in))
return e.out, nil
}
// Unmarshal deserializes data from in into the out value. The out value
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported during unmarshal (see the
// Marshal method for other flags):
//
// inline Inline the field, which must be a struct or a map.
// Inlined structs are handled as if its fields were part
// of the outer struct. An inlined map causes keys that do
// not match any other struct field to be inserted in the
// map rather than being discarded as usual.
//
// The target field or element types of out may not necessarily match
// the BSON values of the provided data. The following conversions are
// made automatically:
//
// - Numeric types are converted if at least the integer part of the
// value would be preserved correctly
// - Bools are converted to numeric types as 1 or 0
// - Numeric types are converted to bools as true if not 0 or false otherwise
// - Binary and string BSON data is converted to a string, array or byte slice
//
// If the value would not fit the type and cannot be converted, it's
// silently skipped.
//
// Pointer values are initialized when necessary.
func Unmarshal(in []byte, out interface{}) (err error) {
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Map, reflect.Ptr:
d := newDecoder(in)
d.readDocTo(v)
case reflect.Struct:
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Unmarshal needs a map or a pointer to a struct.")
}
return nil
}
// Unmarshal deserializes raw into the out value. If the out value type
// is not compatible with raw, a *bson.TypeError is returned.
//
// See the Unmarshal function documentation for more details on the
// unmarshalling process.
func (raw Raw) Unmarshal(out interface{}) (err error) {
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
fallthrough
case reflect.Map:
d := newDecoder(raw.Data)
good := d.readElemTo(v, raw.Kind)
if !good {
return &TypeError{v.Type(), raw.Kind}
}
case reflect.Struct:
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
}
return nil
}
type TypeError struct {
Type reflect.Type
Kind byte
}
func (e *TypeError) Error() string {
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
InlineMap int
Zero reflect.Value
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
MinSize bool
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var structMapMutex sync.RWMutex
type externalPanic string
func (e externalPanic) String() string {
return string(e)
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
structMapMutex.RLock()
sinfo, found := structMap[st]
structMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("bson")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
// XXX Drop this after a few releases.
if s := strings.Index(tag, "/"); s >= 0 {
recommend := tag[:s]
for _, c := range tag[s+1:] {
switch c {
case 'c':
recommend += ",omitempty"
case 's':
recommend += ",minsize"
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", string([]byte{uint8(c)}), tag, st)
panic(externalPanic(msg))
}
}
msg := fmt.Sprintf("Replace tag %q in field %s of type %s by %q", tag, field.Name, st, recommend)
panic(externalPanic(msg))
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "minsize":
info.MinSize = true
case "inline":
inline = true
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
panic(externalPanic(msg))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
panic("Option ,inline needs a struct value or map field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
fieldsMap,
fieldsList,
inlineMap,
reflect.New(st).Elem(),
}
structMapMutex.Lock()
structMap[st] = sinfo
structMapMutex.Unlock()
return sinfo, nil
}

1452
Godeps/_workspace/src/labix.org/v2/mgo/bson/bson_test.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

795
Godeps/_workspace/src/labix.org/v2/mgo/bson/decode.go generated vendored Normal file
View File

@@ -0,0 +1,795 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"fmt"
"math"
"net/url"
"reflect"
"sync"
"time"
)
type decoder struct {
in []byte
i int
docType reflect.Type
}
var typeM = reflect.TypeOf(M{})
func newDecoder(in []byte) *decoder {
return &decoder{in, 0, typeM}
}
// --------------------------------------------------------------------------
// Some helper functions.
func corrupted() {
panic("Document is corrupted")
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
// --------------------------------------------------------------------------
// Unmarshaling of documents.
const (
setterUnknown = iota
setterNone
setterType
setterAddr
)
var setterStyle map[reflect.Type]int
var setterIface reflect.Type
var setterMutex sync.RWMutex
func init() {
var iface Setter
setterIface = reflect.TypeOf(&iface).Elem()
setterStyle = make(map[reflect.Type]int)
}
func getSetter(outt reflect.Type, out reflect.Value) Setter {
setterMutex.RLock()
style := setterStyle[outt]
setterMutex.RUnlock()
if style == setterNone {
return nil
}
if style == setterUnknown {
setterMutex.Lock()
defer setterMutex.Unlock()
if outt.Implements(setterIface) {
setterStyle[outt] = setterType
} else if reflect.PtrTo(outt).Implements(setterIface) {
setterStyle[outt] = setterAddr
} else {
setterStyle[outt] = setterNone
return nil
}
style = setterStyle[outt]
}
if style == setterAddr {
if !out.CanAddr() {
return nil
}
out = out.Addr()
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
return out.Interface().(Setter)
}
func clearMap(m reflect.Value) {
var none reflect.Value
for _, k := range m.MapKeys() {
m.SetMapIndex(k, none)
}
}
func (d *decoder) readDocTo(out reflect.Value) {
var elemType reflect.Type
outt := out.Type()
outk := outt.Kind()
for {
if outk == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
if setter := getSetter(outt, out); setter != nil {
var raw Raw
d.readDocTo(reflect.ValueOf(&raw))
err := setter.SetBSON(raw)
if _, ok := err.(*TypeError); err != nil && !ok {
panic(err)
}
return
}
if outk == reflect.Ptr {
out = out.Elem()
outt = out.Type()
outk = out.Kind()
continue
}
break
}
var fieldsMap map[string]fieldInfo
var inlineMap reflect.Value
start := d.i
origout := out
if outk == reflect.Interface {
if d.docType.Kind() == reflect.Map {
mv := reflect.MakeMap(d.docType)
out.Set(mv)
out = mv
} else {
dv := reflect.New(d.docType).Elem()
out.Set(dv)
out = dv
}
outt = out.Type()
outk = outt.Kind()
}
docType := d.docType
keyType := typeString
convertKey := false
switch outk {
case reflect.Map:
keyType = outt.Key()
if keyType.Kind() != reflect.String {
panic("BSON map must have string keys. Got: " + outt.String())
}
if keyType != typeString {
convertKey = true
}
elemType = outt.Elem()
if elemType == typeIface {
d.docType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(out.Type()))
} else if out.Len() > 0 {
clearMap(out)
}
case reflect.Struct:
if outt != typeRaw {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
fieldsMap = sinfo.FieldsMap
out.Set(sinfo.Zero)
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
clearMap(inlineMap)
}
elemType = inlineMap.Type().Elem()
if elemType == typeIface {
d.docType = inlineMap.Type()
}
}
}
case reflect.Slice:
switch outt.Elem() {
case typeDocElem:
origout.Set(d.readDocElems(outt))
return
case typeRawDocElem:
origout.Set(d.readRawDocElems(outt))
return
}
fallthrough
default:
panic("Unsupported document type for unmarshalling: " + out.Type().String())
}
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
switch outk {
case reflect.Map:
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
k := reflect.ValueOf(name)
if convertKey {
k = k.Convert(keyType)
}
out.SetMapIndex(k, e)
}
case reflect.Struct:
if outt == typeRaw {
d.dropElem(kind)
} else {
if info, ok := fieldsMap[name]; ok {
if info.Inline == nil {
d.readElemTo(out.Field(info.Num), kind)
} else {
d.readElemTo(out.FieldByIndex(info.Inline), kind)
}
} else if inlineMap.IsValid() {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
}
} else {
d.dropElem(kind)
}
}
case reflect.Slice:
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
d.docType = docType
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
}
}
func (d *decoder) readArrayDocTo(out reflect.Value) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
i := 0
l := out.Len()
for d.in[d.i] != '\x00' {
if i >= l {
panic("Length mismatch on array field")
}
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
d.readElemTo(out.Index(i), kind)
if d.i >= end {
corrupted()
}
i++
}
if i != l {
panic("Length mismatch on array field")
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
tmp := make([]reflect.Value, 0, 8)
elemType := t.Elem()
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
tmp = append(tmp, e)
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
n := len(tmp)
slice := reflect.MakeSlice(t, n, n)
for i := 0; i != n; i++ {
slice.Index(i).Set(tmp[i])
}
return slice.Interface()
}
var typeSlice = reflect.TypeOf([]interface{}{})
var typeIface = typeSlice.Elem()
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]DocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := DocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]RawDocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := RawDocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readDocWith(f func(kind byte, name string)) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
f(kind, name)
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
// --------------------------------------------------------------------------
// Unmarshaling of individual elements within a document.
var blackHole = settableValueOf(struct{}{})
func (d *decoder) dropElem(kind byte) {
d.readElemTo(blackHole, kind)
}
// Attempt to decode an element from the document and put it into out.
// If the types are not compatible, the returned ok value will be
// false and out will be unchanged.
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
start := d.i
if kind == '\x03' {
// Special case for documents. Delegate to readDocTo().
switch out.Kind() {
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
d.readDocTo(out)
default:
switch out.Interface().(type) {
case D:
out.Set(d.readDocElems(out.Type()))
case RawD:
out.Set(d.readRawDocElems(out.Type()))
default:
d.readDocTo(blackHole)
}
}
return true
}
var in interface{}
switch kind {
case 0x01: // Float64
in = d.readFloat64()
case 0x02: // UTF-8 string
in = d.readStr()
case 0x03: // Document
panic("Can't happen. Handled above.")
case 0x04: // Array
outt := out.Type()
for outt.Kind() == reflect.Ptr {
outt = outt.Elem()
}
switch outt.Kind() {
case reflect.Array:
d.readArrayDocTo(out)
return true
case reflect.Slice:
in = d.readSliceDoc(outt)
default:
in = d.readSliceDoc(typeSlice)
}
case 0x05: // Binary
b := d.readBinary()
if b.Kind == 0x00 || b.Kind == 0x02 {
in = b.Data
} else {
in = b
}
case 0x06: // Undefined (obsolete, but still seen in the wild)
in = Undefined
case 0x07: // ObjectId
in = ObjectId(d.readBytes(12))
case 0x08: // Bool
in = d.readBool()
case 0x09: // Timestamp
// MongoDB handles timestamps as milliseconds.
i := d.readInt64()
if i == -62135596800000 {
in = time.Time{} // In UTC for convenience.
} else {
in = time.Unix(i/1e3, i%1e3*1e6)
}
case 0x0A: // Nil
in = nil
case 0x0B: // RegEx
in = d.readRegEx()
case 0x0D: // JavaScript without scope
in = JavaScript{Code: d.readStr()}
case 0x0E: // Symbol
in = Symbol(d.readStr())
case 0x0F: // JavaScript with scope
d.i += 4 // Skip length
js := JavaScript{d.readStr(), make(M)}
d.readDocTo(reflect.ValueOf(js.Scope))
in = js
case 0x10: // Int32
in = int(d.readInt32())
case 0x11: // Mongo-specific timestamp
in = MongoTimestamp(d.readInt64())
case 0x12: // Int64
in = d.readInt64()
case 0x7F: // Max key
in = MaxKey
case 0xFF: // Min key
in = MinKey
default:
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
}
outt := out.Type()
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
return true
}
if setter := getSetter(outt, out); setter != nil {
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
if err == SetZero {
out.Set(reflect.Zero(outt))
return true
}
if err == nil {
return true
}
if _, ok := err.(*TypeError); !ok {
panic(err)
}
return false
}
if in == nil {
out.Set(reflect.Zero(outt))
return true
}
outk := outt.Kind()
// Dereference and initialize pointer if necessary.
first := true
for outk == reflect.Ptr {
if !out.IsNil() {
out = out.Elem()
} else {
elem := reflect.New(outt.Elem())
if first {
// Only set if value is compatible.
first = false
defer func(out, elem reflect.Value) {
if good {
out.Set(elem)
}
}(out, elem)
} else {
out.Set(elem)
}
out = elem
}
outt = out.Type()
outk = outt.Kind()
}
inv := reflect.ValueOf(in)
if outt == inv.Type() {
out.Set(inv)
return true
}
switch outk {
case reflect.Interface:
out.Set(inv)
return true
case reflect.String:
switch inv.Kind() {
case reflect.String:
out.SetString(inv.String())
return true
case reflect.Slice:
if b, ok := in.([]byte); ok {
out.SetString(string(b))
return true
}
}
case reflect.Slice, reflect.Array:
// Remember, array (0x04) slices are built with the correct
// element type. If we are here, must be a cross BSON kind
// conversion (e.g. 0x05 unmarshalling on string).
if outt.Elem().Kind() != reflect.Uint8 {
break
}
switch inv.Kind() {
case reflect.String:
slice := []byte(inv.String())
out.Set(reflect.ValueOf(slice))
return true
case reflect.Slice:
switch outt.Kind() {
case reflect.Array:
reflect.Copy(out, inv)
case reflect.Slice:
out.SetBytes(inv.Bytes())
}
return true
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetInt(inv.Int())
return true
case reflect.Float32, reflect.Float64:
out.SetInt(int64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetInt(1)
} else {
out.SetInt(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetUint(uint64(inv.Int()))
return true
case reflect.Float32, reflect.Float64:
out.SetUint(uint64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetUint(1)
} else {
out.SetUint(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON.")
}
case reflect.Float32, reflect.Float64:
switch inv.Kind() {
case reflect.Float32, reflect.Float64:
out.SetFloat(inv.Float())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetFloat(float64(inv.Int()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetFloat(1)
} else {
out.SetFloat(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Bool:
switch inv.Kind() {
case reflect.Bool:
out.SetBool(inv.Bool())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetBool(inv.Int() != 0)
return true
case reflect.Float32, reflect.Float64:
out.SetBool(inv.Float() != 0)
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Struct:
if outt == typeURL && inv.Kind() == reflect.String {
u, err := url.Parse(inv.String())
if err != nil {
panic(err)
}
out.Set(reflect.ValueOf(u).Elem())
return true
}
}
return false
}
// --------------------------------------------------------------------------
// Parsers of basic types.
func (d *decoder) readRegEx() RegEx {
re := RegEx{}
re.Pattern = d.readCStr()
re.Options = d.readCStr()
return re
}
func (d *decoder) readBinary() Binary {
l := d.readInt32()
b := Binary{}
b.Kind = d.readByte()
b.Data = d.readBytes(l)
if b.Kind == 0x02 && len(b.Data) >= 4 {
// Weird obsolete format with redundant length.
b.Data = b.Data[4:]
}
return b
}
func (d *decoder) readStr() string {
l := d.readInt32()
b := d.readBytes(l - 1)
if d.readByte() != '\x00' {
corrupted()
}
return string(b)
}
func (d *decoder) readCStr() string {
start := d.i
end := start
l := len(d.in)
for ; end != l; end++ {
if d.in[end] == '\x00' {
break
}
}
d.i = end + 1
if d.i > l {
corrupted()
}
return string(d.in[start:end])
}
func (d *decoder) readBool() bool {
if d.readByte() == 1 {
return true
}
return false
}
func (d *decoder) readFloat64() float64 {
return math.Float64frombits(uint64(d.readInt64()))
}
func (d *decoder) readInt32() int32 {
b := d.readBytes(4)
return int32((uint32(b[0]) << 0) |
(uint32(b[1]) << 8) |
(uint32(b[2]) << 16) |
(uint32(b[3]) << 24))
}
func (d *decoder) readInt64() int64 {
b := d.readBytes(8)
return int64((uint64(b[0]) << 0) |
(uint64(b[1]) << 8) |
(uint64(b[2]) << 16) |
(uint64(b[3]) << 24) |
(uint64(b[4]) << 32) |
(uint64(b[5]) << 40) |
(uint64(b[6]) << 48) |
(uint64(b[7]) << 56))
}
func (d *decoder) readByte() byte {
i := d.i
d.i++
if d.i > len(d.in) {
corrupted()
}
return d.in[i]
}
func (d *decoder) readBytes(length int32) []byte {
start := d.i
d.i += int(length)
if d.i > len(d.in) {
corrupted()
}
return d.in[start : start+int(length)]
}

462
Godeps/_workspace/src/labix.org/v2/mgo/bson/encode.go generated vendored Normal file
View File

@@ -0,0 +1,462 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"time"
)
// --------------------------------------------------------------------------
// Some internal infrastructure.
var (
typeBinary = reflect.TypeOf(Binary{})
typeObjectId = reflect.TypeOf(ObjectId(""))
typeSymbol = reflect.TypeOf(Symbol(""))
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
typeOrderKey = reflect.TypeOf(MinKey)
typeDocElem = reflect.TypeOf(DocElem{})
typeRawDocElem = reflect.TypeOf(RawDocElem{})
typeRaw = reflect.TypeOf(Raw{})
typeURL = reflect.TypeOf(url.URL{})
typeTime = reflect.TypeOf(time.Time{})
typeString = reflect.TypeOf("")
)
const itoaCacheSize = 32
var itoaCache []string
func init() {
itoaCache = make([]string, itoaCacheSize)
for i := 0; i != itoaCacheSize; i++ {
itoaCache[i] = strconv.Itoa(i)
}
}
func itoa(i int) string {
if i < itoaCacheSize {
return itoaCache[i]
}
return strconv.Itoa(i)
}
// --------------------------------------------------------------------------
// Marshaling of the document value itself.
type encoder struct {
out []byte
}
func (e *encoder) addDoc(v reflect.Value) {
for {
if vi, ok := v.Interface().(Getter); ok {
getv, err := vi.GetBSON()
if err != nil {
panic(err)
}
v = reflect.ValueOf(getv)
continue
}
if v.Kind() == reflect.Ptr {
v = v.Elem()
continue
}
break
}
if v.Type() == typeRaw {
raw := v.Interface().(Raw)
if raw.Kind != 0x03 && raw.Kind != 0x00 {
panic("Attempted to unmarshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
}
e.addBytes(raw.Data...)
return
}
start := e.reserveInt32()
switch v.Kind() {
case reflect.Map:
e.addMap(v)
case reflect.Struct:
e.addStruct(v)
case reflect.Array, reflect.Slice:
e.addSlice(v)
default:
panic("Can't marshal " + v.Type().String() + " as a BSON document")
}
e.addBytes(0)
e.setInt32(start, int32(len(e.out)-start))
}
func (e *encoder) addMap(v reflect.Value) {
for _, k := range v.MapKeys() {
e.addElem(k.String(), v.MapIndex(k), false)
}
}
func (e *encoder) addStruct(v reflect.Value) {
sinfo, err := getStructInfo(v.Type())
if err != nil {
panic(err)
}
var value reflect.Value
if sinfo.InlineMap >= 0 {
m := v.Field(sinfo.InlineMap)
if m.Len() > 0 {
for _, k := range m.MapKeys() {
ks := k.String()
if _, found := sinfo.FieldsMap[ks]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
}
e.addElem(ks, m.MapIndex(k), false)
}
}
}
for _, info := range sinfo.FieldsList {
if info.Inline == nil {
value = v.Field(info.Num)
} else {
value = v.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.addElem(info.Key, value, info.MinSize)
}
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Ptr, reflect.Interface:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
if v.Type() == typeTime {
return v.Interface().(time.Time).IsZero()
}
for i := v.NumField()-1; i >= 0; i-- {
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}
func (e *encoder) addSlice(v reflect.Value) {
vi := v.Interface()
if d, ok := vi.(D); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if d, ok := vi.(RawD); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
l := v.Len()
et := v.Type().Elem()
if et == typeDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(DocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if et == typeRawDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(RawDocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
for i := 0; i < l; i++ {
e.addElem(itoa(i), v.Index(i), false)
}
}
// --------------------------------------------------------------------------
// Marshaling of elements in a document.
func (e *encoder) addElemName(kind byte, name string) {
e.addBytes(kind)
e.addBytes([]byte(name)...)
e.addBytes(0)
}
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
if !v.IsValid() {
e.addElemName('\x0A', name)
return
}
if getter, ok := v.Interface().(Getter); ok {
getv, err := getter.GetBSON()
if err != nil {
panic(err)
}
e.addElem(name, reflect.ValueOf(getv), minSize)
return
}
switch v.Kind() {
case reflect.Interface:
e.addElem(name, v.Elem(), minSize)
case reflect.Ptr:
e.addElem(name, v.Elem(), minSize)
case reflect.String:
s := v.String()
switch v.Type() {
case typeObjectId:
if len(s) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s)) + ")")
}
e.addElemName('\x07', name)
e.addBytes([]byte(s)...)
case typeSymbol:
e.addElemName('\x0E', name)
e.addStr(s)
default:
e.addElemName('\x02', name)
e.addStr(s)
}
case reflect.Float32, reflect.Float64:
e.addElemName('\x01', name)
e.addInt64(int64(math.Float64bits(v.Float())))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
u := v.Uint()
if int64(u) < 0 {
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
e.addElemName('\x10', name)
e.addInt32(int32(u))
} else {
e.addElemName('\x12', name)
e.addInt64(int64(u))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type() {
case typeMongoTimestamp:
e.addElemName('\x11', name)
e.addInt64(v.Int())
case typeOrderKey:
if v.Int() == int64(MaxKey) {
e.addElemName('\x7F', name)
} else {
e.addElemName('\xFF', name)
}
default:
i := v.Int()
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
// It fits into an int32, encode as such.
e.addElemName('\x10', name)
e.addInt32(int32(i))
} else {
e.addElemName('\x12', name)
e.addInt64(i)
}
}
case reflect.Bool:
e.addElemName('\x08', name)
if v.Bool() {
e.addBytes(1)
} else {
e.addBytes(0)
}
case reflect.Map:
e.addElemName('\x03', name)
e.addDoc(v)
case reflect.Slice:
vt := v.Type()
et := vt.Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
e.addBinary('\x00', v.Bytes())
} else if et == typeDocElem || et == typeRawDocElem {
e.addElemName('\x03', name)
e.addDoc(v)
} else {
e.addElemName('\x04', name)
e.addDoc(v)
}
case reflect.Array:
et := v.Type().Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte))
} else {
e.addElemName('\x04', name)
e.addDoc(v)
}
case reflect.Struct:
switch s := v.Interface().(type) {
case Raw:
kind := s.Kind
if kind == 0x00 {
kind = 0x03
}
e.addElemName(kind, name)
e.addBytes(s.Data...)
case Binary:
e.addElemName('\x05', name)
e.addBinary(s.Kind, s.Data)
case RegEx:
e.addElemName('\x0B', name)
e.addCStr(s.Pattern)
e.addCStr(s.Options)
case JavaScript:
if s.Scope == nil {
e.addElemName('\x0D', name)
e.addStr(s.Code)
} else {
e.addElemName('\x0F', name)
start := e.reserveInt32()
e.addStr(s.Code)
e.addDoc(reflect.ValueOf(s.Scope))
e.setInt32(start, int32(len(e.out)-start))
}
case time.Time:
// MongoDB handles timestamps as milliseconds.
e.addElemName('\x09', name)
e.addInt64(s.Unix() * 1000 + int64(s.Nanosecond() / 1e6))
case url.URL:
e.addElemName('\x02', name)
e.addStr(s.String())
case undefined:
e.addElemName('\x06', name)
default:
e.addElemName('\x03', name)
e.addDoc(v)
}
default:
panic("Can't marshal " + v.Type().String() + " in a BSON document")
}
}
// --------------------------------------------------------------------------
// Marshaling of base types.
func (e *encoder) addBinary(subtype byte, v []byte) {
if subtype == 0x02 {
// Wonder how that brilliant idea came to life. Obsolete, luckily.
e.addInt32(int32(len(v) + 4))
e.addBytes(subtype)
e.addInt32(int32(len(v)))
} else {
e.addInt32(int32(len(v)))
e.addBytes(subtype)
}
e.addBytes(v...)
}
func (e *encoder) addStr(v string) {
e.addInt32(int32(len(v) + 1))
e.addCStr(v)
}
func (e *encoder) addCStr(v string) {
e.addBytes([]byte(v)...)
e.addBytes(0)
}
func (e *encoder) reserveInt32() (pos int) {
pos = len(e.out)
e.addBytes(0, 0, 0, 0)
return pos
}
func (e *encoder) setInt32(pos int, v int32) {
e.out[pos+0] = byte(v)
e.out[pos+1] = byte(v >> 8)
e.out[pos+2] = byte(v >> 16)
e.out[pos+3] = byte(v >> 24)
}
func (e *encoder) addInt32(v int32) {
u := uint32(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
}
func (e *encoder) addInt64(v int64) {
u := uint64(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
}
func (e *encoder) addBytes(v ...byte) {
e.out = append(e.out, v...)
}