add vendor dependencies

This commit is contained in:
Herman Schaaf
2019-07-28 21:22:28 +01:00
parent 5752260ea9
commit 557c8221d1
226 changed files with 47062 additions and 0 deletions

64
vendor/github.com/cupcake/rdb/crc64/crc64.go generated vendored Normal file

File diff suppressed because one or more lines are too long

824
vendor/github.com/cupcake/rdb/decoder.go generated vendored Normal file
View File

@@ -0,0 +1,824 @@
// Package rdb implements parsing and encoding of the Redis RDB file format.
package rdb
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"strconv"
"github.com/cupcake/rdb/crc64"
)
// A Decoder must be implemented to parse a RDB file.
type Decoder interface {
// StartRDB is called when parsing of a valid RDB file starts.
StartRDB()
// StartDatabase is called when database n starts.
// Once a database starts, another database will not start until EndDatabase is called.
StartDatabase(n int)
// AUX field
Aux(key, value []byte)
// ResizeDB hint
ResizeDatabase(dbSize, expiresSize uint32)
// Set is called once for each string key.
Set(key, value []byte, expiry int64)
// StartHash is called at the beginning of a hash.
// Hset will be called exactly length times before EndHash.
StartHash(key []byte, length, expiry int64)
// Hset is called once for each field=value pair in a hash.
Hset(key, field, value []byte)
// EndHash is called when there are no more fields in a hash.
EndHash(key []byte)
// StartSet is called at the beginning of a set.
// Sadd will be called exactly cardinality times before EndSet.
StartSet(key []byte, cardinality, expiry int64)
// Sadd is called once for each member of a set.
Sadd(key, member []byte)
// EndSet is called when there are no more fields in a set.
EndSet(key []byte)
// StartList is called at the beginning of a list.
// Rpush will be called exactly length times before EndList.
// If length of the list is not known, then length is -1
StartList(key []byte, length, expiry int64)
// Rpush is called once for each value in a list.
Rpush(key, value []byte)
// EndList is called when there are no more values in a list.
EndList(key []byte)
// StartZSet is called at the beginning of a sorted set.
// Zadd will be called exactly cardinality times before EndZSet.
StartZSet(key []byte, cardinality, expiry int64)
// Zadd is called once for each member of a sorted set.
Zadd(key []byte, score float64, member []byte)
// EndZSet is called when there are no more members in a sorted set.
EndZSet(key []byte)
// EndDatabase is called at the end of a database.
EndDatabase(n int)
// EndRDB is called when parsing of the RDB file is complete.
EndRDB()
}
// Decode parses a RDB file from r and calls the decode hooks on d.
func Decode(r io.Reader, d Decoder) error {
decoder := &decode{d, make([]byte, 8), bufio.NewReader(r)}
return decoder.decode()
}
// Decode a byte slice from the Redis DUMP command. The dump does not contain the
// database, key or expiry, so they must be included in the function call (but
// can be zero values).
func DecodeDump(dump []byte, db int, key []byte, expiry int64, d Decoder) error {
err := verifyDump(dump)
if err != nil {
return err
}
decoder := &decode{d, make([]byte, 8), bytes.NewReader(dump[1:])}
decoder.event.StartRDB()
decoder.event.StartDatabase(db)
err = decoder.readObject(key, ValueType(dump[0]), expiry)
decoder.event.EndDatabase(db)
decoder.event.EndRDB()
return err
}
type byteReader interface {
io.Reader
io.ByteReader
}
type decode struct {
event Decoder
intBuf []byte
r byteReader
}
type ValueType byte
const (
TypeString ValueType = 0
TypeList ValueType = 1
TypeSet ValueType = 2
TypeZSet ValueType = 3
TypeHash ValueType = 4
TypeHashZipmap ValueType = 9
TypeListZiplist ValueType = 10
TypeSetIntset ValueType = 11
TypeZSetZiplist ValueType = 12
TypeHashZiplist ValueType = 13
TypeListQuicklist ValueType = 14
)
const (
rdb6bitLen = 0
rdb14bitLen = 1
rdb32bitLen = 2
rdbEncVal = 3
rdbFlagAux = 0xfa
rdbFlagResizeDB = 0xfb
rdbFlagExpiryMS = 0xfc
rdbFlagExpiry = 0xfd
rdbFlagSelectDB = 0xfe
rdbFlagEOF = 0xff
rdbEncInt8 = 0
rdbEncInt16 = 1
rdbEncInt32 = 2
rdbEncLZF = 3
rdbZiplist6bitlenString = 0
rdbZiplist14bitlenString = 1
rdbZiplist32bitlenString = 2
rdbZiplistInt16 = 0xc0
rdbZiplistInt32 = 0xd0
rdbZiplistInt64 = 0xe0
rdbZiplistInt24 = 0xf0
rdbZiplistInt8 = 0xfe
rdbZiplistInt4 = 15
)
func (d *decode) decode() error {
err := d.checkHeader()
if err != nil {
return err
}
d.event.StartRDB()
var db uint32
var expiry int64
firstDB := true
for {
objType, err := d.r.ReadByte()
if err != nil {
return err
}
switch objType {
case rdbFlagAux:
auxKey, err := d.readString()
if err != nil {
return err
}
auxVal, err := d.readString()
if err != nil {
return err
}
d.event.Aux(auxKey, auxVal)
case rdbFlagResizeDB:
dbSize, _, err := d.readLength()
if err != nil {
return err
}
expiresSize, _, err := d.readLength()
if err != nil {
return err
}
d.event.ResizeDatabase(dbSize, expiresSize)
case rdbFlagExpiryMS:
_, err := io.ReadFull(d.r, d.intBuf)
if err != nil {
return err
}
expiry = int64(binary.LittleEndian.Uint64(d.intBuf))
case rdbFlagExpiry:
_, err := io.ReadFull(d.r, d.intBuf[:4])
if err != nil {
return err
}
expiry = int64(binary.LittleEndian.Uint32(d.intBuf)) * 1000
case rdbFlagSelectDB:
if !firstDB {
d.event.EndDatabase(int(db))
}
db, _, err = d.readLength()
if err != nil {
return err
}
d.event.StartDatabase(int(db))
case rdbFlagEOF:
d.event.EndDatabase(int(db))
d.event.EndRDB()
return nil
default:
key, err := d.readString()
if err != nil {
return err
}
err = d.readObject(key, ValueType(objType), expiry)
if err != nil {
return err
}
expiry = 0
}
}
panic("not reached")
}
func (d *decode) readObject(key []byte, typ ValueType, expiry int64) error {
switch typ {
case TypeString:
value, err := d.readString()
if err != nil {
return err
}
d.event.Set(key, value, expiry)
case TypeList:
length, _, err := d.readLength()
if err != nil {
return err
}
d.event.StartList(key, int64(length), expiry)
for i := uint32(0); i < length; i++ {
value, err := d.readString()
if err != nil {
return err
}
d.event.Rpush(key, value)
}
d.event.EndList(key)
case TypeListQuicklist:
length, _, err := d.readLength()
if err != nil {
return err
}
d.event.StartList(key, int64(-1), expiry)
for i := uint32(0); i < length; i++ {
d.readZiplist(key, 0, false)
}
d.event.EndList(key)
case TypeSet:
cardinality, _, err := d.readLength()
if err != nil {
return err
}
d.event.StartSet(key, int64(cardinality), expiry)
for i := uint32(0); i < cardinality; i++ {
member, err := d.readString()
if err != nil {
return err
}
d.event.Sadd(key, member)
}
d.event.EndSet(key)
case TypeZSet:
cardinality, _, err := d.readLength()
if err != nil {
return err
}
d.event.StartZSet(key, int64(cardinality), expiry)
for i := uint32(0); i < cardinality; i++ {
member, err := d.readString()
if err != nil {
return err
}
score, err := d.readFloat64()
if err != nil {
return err
}
d.event.Zadd(key, score, member)
}
d.event.EndZSet(key)
case TypeHash:
length, _, err := d.readLength()
if err != nil {
return err
}
d.event.StartHash(key, int64(length), expiry)
for i := uint32(0); i < length; i++ {
field, err := d.readString()
if err != nil {
return err
}
value, err := d.readString()
if err != nil {
return err
}
d.event.Hset(key, field, value)
}
d.event.EndHash(key)
case TypeHashZipmap:
return d.readZipmap(key, expiry)
case TypeListZiplist:
return d.readZiplist(key, expiry, true)
case TypeSetIntset:
return d.readIntset(key, expiry)
case TypeZSetZiplist:
return d.readZiplistZset(key, expiry)
case TypeHashZiplist:
return d.readZiplistHash(key, expiry)
default:
return fmt.Errorf("rdb: unknown object type %d for key %s", typ, key)
}
return nil
}
func (d *decode) readZipmap(key []byte, expiry int64) error {
var length int
zipmap, err := d.readString()
if err != nil {
return err
}
buf := newSliceBuffer(zipmap)
lenByte, err := buf.ReadByte()
if err != nil {
return err
}
if lenByte >= 254 { // we need to count the items manually
length, err = countZipmapItems(buf)
length /= 2
if err != nil {
return err
}
} else {
length = int(lenByte)
}
d.event.StartHash(key, int64(length), expiry)
for i := 0; i < length; i++ {
field, err := readZipmapItem(buf, false)
if err != nil {
return err
}
value, err := readZipmapItem(buf, true)
if err != nil {
return err
}
d.event.Hset(key, field, value)
}
d.event.EndHash(key)
return nil
}
func readZipmapItem(buf *sliceBuffer, readFree bool) ([]byte, error) {
length, free, err := readZipmapItemLength(buf, readFree)
if err != nil {
return nil, err
}
if length == -1 {
return nil, nil
}
value, err := buf.Slice(length)
if err != nil {
return nil, err
}
_, err = buf.Seek(int64(free), 1)
return value, err
}
func countZipmapItems(buf *sliceBuffer) (int, error) {
n := 0
for {
strLen, free, err := readZipmapItemLength(buf, n%2 != 0)
if err != nil {
return 0, err
}
if strLen == -1 {
break
}
_, err = buf.Seek(int64(strLen)+int64(free), 1)
if err != nil {
return 0, err
}
n++
}
_, err := buf.Seek(0, 0)
return n, err
}
func readZipmapItemLength(buf *sliceBuffer, readFree bool) (int, int, error) {
b, err := buf.ReadByte()
if err != nil {
return 0, 0, err
}
switch b {
case 253:
s, err := buf.Slice(5)
if err != nil {
return 0, 0, err
}
return int(binary.BigEndian.Uint32(s)), int(s[4]), nil
case 254:
return 0, 0, fmt.Errorf("rdb: invalid zipmap item length")
case 255:
return -1, 0, nil
}
var free byte
if readFree {
free, err = buf.ReadByte()
}
return int(b), int(free), err
}
func (d *decode) readZiplist(key []byte, expiry int64, addListEvents bool) error {
ziplist, err := d.readString()
if err != nil {
return err
}
buf := newSliceBuffer(ziplist)
length, err := readZiplistLength(buf)
if err != nil {
return err
}
if addListEvents {
d.event.StartList(key, length, expiry)
}
for i := int64(0); i < length; i++ {
entry, err := readZiplistEntry(buf)
if err != nil {
return err
}
d.event.Rpush(key, entry)
}
if addListEvents {
d.event.EndList(key)
}
return nil
}
func (d *decode) readZiplistZset(key []byte, expiry int64) error {
ziplist, err := d.readString()
if err != nil {
return err
}
buf := newSliceBuffer(ziplist)
cardinality, err := readZiplistLength(buf)
if err != nil {
return err
}
cardinality /= 2
d.event.StartZSet(key, cardinality, expiry)
for i := int64(0); i < cardinality; i++ {
member, err := readZiplistEntry(buf)
if err != nil {
return err
}
scoreBytes, err := readZiplistEntry(buf)
if err != nil {
return err
}
score, err := strconv.ParseFloat(string(scoreBytes), 64)
if err != nil {
return err
}
d.event.Zadd(key, score, member)
}
d.event.EndZSet(key)
return nil
}
func (d *decode) readZiplistHash(key []byte, expiry int64) error {
ziplist, err := d.readString()
if err != nil {
return err
}
buf := newSliceBuffer(ziplist)
length, err := readZiplistLength(buf)
if err != nil {
return err
}
length /= 2
d.event.StartHash(key, length, expiry)
for i := int64(0); i < length; i++ {
field, err := readZiplistEntry(buf)
if err != nil {
return err
}
value, err := readZiplistEntry(buf)
if err != nil {
return err
}
d.event.Hset(key, field, value)
}
d.event.EndHash(key)
return nil
}
func readZiplistLength(buf *sliceBuffer) (int64, error) {
buf.Seek(8, 0) // skip the zlbytes and zltail
lenBytes, err := buf.Slice(2)
if err != nil {
return 0, err
}
return int64(binary.LittleEndian.Uint16(lenBytes)), nil
}
func readZiplistEntry(buf *sliceBuffer) ([]byte, error) {
prevLen, err := buf.ReadByte()
if err != nil {
return nil, err
}
if prevLen == 254 {
buf.Seek(4, 1) // skip the 4-byte prevlen
}
header, err := buf.ReadByte()
if err != nil {
return nil, err
}
switch {
case header>>6 == rdbZiplist6bitlenString:
return buf.Slice(int(header & 0x3f))
case header>>6 == rdbZiplist14bitlenString:
b, err := buf.ReadByte()
if err != nil {
return nil, err
}
return buf.Slice((int(header&0x3f) << 8) | int(b))
case header>>6 == rdbZiplist32bitlenString:
lenBytes, err := buf.Slice(4)
if err != nil {
return nil, err
}
return buf.Slice(int(binary.BigEndian.Uint32(lenBytes)))
case header == rdbZiplistInt16:
intBytes, err := buf.Slice(2)
if err != nil {
return nil, err
}
return []byte(strconv.FormatInt(int64(int16(binary.LittleEndian.Uint16(intBytes))), 10)), nil
case header == rdbZiplistInt32:
intBytes, err := buf.Slice(4)
if err != nil {
return nil, err
}
return []byte(strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))), 10)), nil
case header == rdbZiplistInt64:
intBytes, err := buf.Slice(8)
if err != nil {
return nil, err
}
return []byte(strconv.FormatInt(int64(binary.LittleEndian.Uint64(intBytes)), 10)), nil
case header == rdbZiplistInt24:
intBytes := make([]byte, 4)
_, err := buf.Read(intBytes[1:])
if err != nil {
return nil, err
}
return []byte(strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))>>8), 10)), nil
case header == rdbZiplistInt8:
b, err := buf.ReadByte()
return []byte(strconv.FormatInt(int64(int8(b)), 10)), err
case header>>4 == rdbZiplistInt4:
return []byte(strconv.FormatInt(int64(header&0x0f)-1, 10)), nil
}
return nil, fmt.Errorf("rdb: unknown ziplist header byte: %d", header)
}
func (d *decode) readIntset(key []byte, expiry int64) error {
intset, err := d.readString()
if err != nil {
return err
}
buf := newSliceBuffer(intset)
intSizeBytes, err := buf.Slice(4)
if err != nil {
return err
}
intSize := binary.LittleEndian.Uint32(intSizeBytes)
if intSize != 2 && intSize != 4 && intSize != 8 {
return fmt.Errorf("rdb: unknown intset encoding: %d", intSize)
}
lenBytes, err := buf.Slice(4)
if err != nil {
return err
}
cardinality := binary.LittleEndian.Uint32(lenBytes)
d.event.StartSet(key, int64(cardinality), expiry)
for i := uint32(0); i < cardinality; i++ {
intBytes, err := buf.Slice(int(intSize))
if err != nil {
return err
}
var intString string
switch intSize {
case 2:
intString = strconv.FormatInt(int64(int16(binary.LittleEndian.Uint16(intBytes))), 10)
case 4:
intString = strconv.FormatInt(int64(int32(binary.LittleEndian.Uint32(intBytes))), 10)
case 8:
intString = strconv.FormatInt(int64(int64(binary.LittleEndian.Uint64(intBytes))), 10)
}
d.event.Sadd(key, []byte(intString))
}
d.event.EndSet(key)
return nil
}
func (d *decode) checkHeader() error {
header := make([]byte, 9)
_, err := io.ReadFull(d.r, header)
if err != nil {
return err
}
if !bytes.Equal(header[:5], []byte("REDIS")) {
return fmt.Errorf("rdb: invalid file format")
}
version, _ := strconv.ParseInt(string(header[5:]), 10, 64)
if version < 1 || version > 7 {
return fmt.Errorf("rdb: invalid RDB version number %d", version)
}
return nil
}
func (d *decode) readString() ([]byte, error) {
length, encoded, err := d.readLength()
if err != nil {
return nil, err
}
if encoded {
switch length {
case rdbEncInt8:
i, err := d.readUint8()
return []byte(strconv.FormatInt(int64(int8(i)), 10)), err
case rdbEncInt16:
i, err := d.readUint16()
return []byte(strconv.FormatInt(int64(int16(i)), 10)), err
case rdbEncInt32:
i, err := d.readUint32()
return []byte(strconv.FormatInt(int64(int32(i)), 10)), err
case rdbEncLZF:
clen, _, err := d.readLength()
if err != nil {
return nil, err
}
ulen, _, err := d.readLength()
if err != nil {
return nil, err
}
compressed := make([]byte, clen)
_, err = io.ReadFull(d.r, compressed)
if err != nil {
return nil, err
}
decompressed := lzfDecompress(compressed, int(ulen))
if len(decompressed) != int(ulen) {
return nil, fmt.Errorf("decompressed string length %d didn't match expected length %d", len(decompressed), ulen)
}
return decompressed, nil
}
}
str := make([]byte, length)
_, err = io.ReadFull(d.r, str)
return str, err
}
func (d *decode) readUint8() (uint8, error) {
b, err := d.r.ReadByte()
return uint8(b), err
}
func (d *decode) readUint16() (uint16, error) {
_, err := io.ReadFull(d.r, d.intBuf[:2])
if err != nil {
return 0, err
}
return binary.LittleEndian.Uint16(d.intBuf), nil
}
func (d *decode) readUint32() (uint32, error) {
_, err := io.ReadFull(d.r, d.intBuf[:4])
if err != nil {
return 0, err
}
return binary.LittleEndian.Uint32(d.intBuf), nil
}
func (d *decode) readUint64() (uint64, error) {
_, err := io.ReadFull(d.r, d.intBuf)
if err != nil {
return 0, err
}
return binary.LittleEndian.Uint64(d.intBuf), nil
}
func (d *decode) readUint32Big() (uint32, error) {
_, err := io.ReadFull(d.r, d.intBuf[:4])
if err != nil {
return 0, err
}
return binary.BigEndian.Uint32(d.intBuf), nil
}
// Doubles are saved as strings prefixed by an unsigned
// 8 bit integer specifying the length of the representation.
// This 8 bit integer has special values in order to specify the following
// conditions:
// 253: not a number
// 254: + inf
// 255: - inf
func (d *decode) readFloat64() (float64, error) {
length, err := d.readUint8()
if err != nil {
return 0, err
}
switch length {
case 253:
return math.NaN(), nil
case 254:
return math.Inf(0), nil
case 255:
return math.Inf(-1), nil
default:
floatBytes := make([]byte, length)
_, err := io.ReadFull(d.r, floatBytes)
if err != nil {
return 0, err
}
f, err := strconv.ParseFloat(string(floatBytes), 64)
return f, err
}
panic("not reached")
}
func (d *decode) readLength() (uint32, bool, error) {
b, err := d.r.ReadByte()
if err != nil {
return 0, false, err
}
// The first two bits of the first byte are used to indicate the length encoding type
switch (b & 0xc0) >> 6 {
case rdb6bitLen:
// When the first two bits are 00, the next 6 bits are the length.
return uint32(b & 0x3f), false, nil
case rdb14bitLen:
// When the first two bits are 01, the next 14 bits are the length.
bb, err := d.r.ReadByte()
if err != nil {
return 0, false, err
}
return (uint32(b&0x3f) << 8) | uint32(bb), false, nil
case rdbEncVal:
// When the first two bits are 11, the next object is encoded.
// The next 6 bits indicate the encoding type.
return uint32(b & 0x3f), true, nil
default:
// When the first two bits are 10, the next 6 bits are discarded.
// The next 4 bytes are the length.
length, err := d.readUint32Big()
return length, false, err
}
panic("not reached")
}
func verifyDump(d []byte) error {
if len(d) < 10 {
return fmt.Errorf("rdb: invalid dump length")
}
version := binary.LittleEndian.Uint16(d[len(d)-10:])
if version != uint16(Version) {
return fmt.Errorf("rdb: invalid version %d, expecting %d", version, Version)
}
if binary.LittleEndian.Uint64(d[len(d)-8:]) != crc64.Digest(d[:len(d)-8]) {
return fmt.Errorf("rdb: invalid CRC checksum")
}
return nil
}
func lzfDecompress(in []byte, outlen int) []byte {
out := make([]byte, outlen)
for i, o := 0, 0; i < len(in); {
ctrl := int(in[i])
i++
if ctrl < 32 {
for x := 0; x <= ctrl; x++ {
out[o] = in[i]
i++
o++
}
} else {
length := ctrl >> 5
if length == 7 {
length = length + int(in[i])
i++
}
ref := o - ((ctrl & 0x1f) << 8) - int(in[i]) - 1
i++
for x := 0; x <= length+1; x++ {
out[o] = out[ref]
ref++
o++
}
}
}
return out
}

130
vendor/github.com/cupcake/rdb/encoder.go generated vendored Normal file
View File

@@ -0,0 +1,130 @@
package rdb
import (
"encoding/binary"
"fmt"
"hash"
"io"
"math"
"strconv"
"github.com/cupcake/rdb/crc64"
)
const Version = 6
type Encoder struct {
w io.Writer
crc hash.Hash
}
func NewEncoder(w io.Writer) *Encoder {
e := &Encoder{crc: crc64.New()}
e.w = io.MultiWriter(w, e.crc)
return e
}
func (e *Encoder) EncodeHeader() error {
_, err := fmt.Fprintf(e.w, "REDIS%04d", Version)
return err
}
func (e *Encoder) EncodeFooter() error {
e.w.Write([]byte{rdbFlagEOF})
_, err := e.w.Write(e.crc.Sum(nil))
return err
}
func (e *Encoder) EncodeDumpFooter() error {
binary.Write(e.w, binary.LittleEndian, uint16(Version))
_, err := e.w.Write(e.crc.Sum(nil))
return err
}
func (e *Encoder) EncodeDatabase(n int) error {
e.w.Write([]byte{rdbFlagSelectDB})
return e.EncodeLength(uint32(n))
}
func (e *Encoder) EncodeExpiry(expiry uint64) error {
b := make([]byte, 9)
b[0] = rdbFlagExpiryMS
binary.LittleEndian.PutUint64(b[1:], expiry)
_, err := e.w.Write(b)
return err
}
func (e *Encoder) EncodeType(v ValueType) error {
_, err := e.w.Write([]byte{byte(v)})
return err
}
func (e *Encoder) EncodeString(s []byte) error {
written, err := e.encodeIntString(s)
if written {
return err
}
e.EncodeLength(uint32(len(s)))
_, err = e.w.Write(s)
return err
}
func (e *Encoder) EncodeLength(l uint32) (err error) {
switch {
case l < 1<<6:
_, err = e.w.Write([]byte{byte(l)})
case l < 1<<14:
_, err = e.w.Write([]byte{byte(l>>8) | rdb14bitLen<<6, byte(l)})
default:
b := make([]byte, 5)
b[0] = rdb32bitLen << 6
binary.BigEndian.PutUint32(b[1:], l)
_, err = e.w.Write(b)
}
return
}
func (e *Encoder) EncodeFloat(f float64) (err error) {
switch {
case math.IsNaN(f):
_, err = e.w.Write([]byte{253})
case math.IsInf(f, 1):
_, err = e.w.Write([]byte{254})
case math.IsInf(f, -1):
_, err = e.w.Write([]byte{255})
default:
b := []byte(strconv.FormatFloat(f, 'g', 17, 64))
e.w.Write([]byte{byte(len(b))})
_, err = e.w.Write(b)
}
return
}
func (e *Encoder) encodeIntString(b []byte) (written bool, err error) {
s := string(b)
i, err := strconv.ParseInt(s, 10, 32)
if err != nil {
return
}
// if the stringified parsed int isn't exactly the same, we can't encode it as an int
if s != strconv.FormatInt(i, 10) {
return
}
switch {
case i >= math.MinInt8 && i <= math.MaxInt8:
_, err = e.w.Write([]byte{rdbEncVal << 6, byte(int8(i))})
case i >= math.MinInt16 && i <= math.MaxInt16:
b := make([]byte, 3)
b[0] = rdbEncVal<<6 | rdbEncInt16
binary.LittleEndian.PutUint16(b[1:], uint16(int16(i)))
_, err = e.w.Write(b)
case i >= math.MinInt32 && i <= math.MaxInt32:
b := make([]byte, 5)
b[0] = rdbEncVal<<6 | rdbEncInt32
binary.LittleEndian.PutUint32(b[1:], uint32(int32(i)))
_, err = e.w.Write(b)
default:
return
}
return true, err
}

View File

@@ -0,0 +1,24 @@
package nopdecoder
// NopDecoder may be embedded in a real Decoder to avoid implementing methods.
type NopDecoder struct{}
func (d NopDecoder) StartRDB() {}
func (d NopDecoder) StartDatabase(n int) {}
func (d NopDecoder) Aux(key, value []byte) {}
func (d NopDecoder) ResizeDatabase(dbSize, expiresSize uint32) {}
func (d NopDecoder) EndDatabase(n int) {}
func (d NopDecoder) EndRDB() {}
func (d NopDecoder) Set(key, value []byte, expiry int64) {}
func (d NopDecoder) StartHash(key []byte, length, expiry int64) {}
func (d NopDecoder) Hset(key, field, value []byte) {}
func (d NopDecoder) EndHash(key []byte) {}
func (d NopDecoder) StartSet(key []byte, cardinality, expiry int64) {}
func (d NopDecoder) Sadd(key, member []byte) {}
func (d NopDecoder) EndSet(key []byte) {}
func (d NopDecoder) StartList(key []byte, length, expiry int64) {}
func (d NopDecoder) Rpush(key, value []byte) {}
func (d NopDecoder) EndList(key []byte) {}
func (d NopDecoder) StartZSet(key []byte, cardinality, expiry int64) {}
func (d NopDecoder) Zadd(key []byte, score float64, member []byte) {}
func (d NopDecoder) EndZSet(key []byte) {}

67
vendor/github.com/cupcake/rdb/slice_buffer.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
package rdb
import (
"errors"
"io"
)
type sliceBuffer struct {
s []byte
i int
}
func newSliceBuffer(s []byte) *sliceBuffer {
return &sliceBuffer{s, 0}
}
func (s *sliceBuffer) Slice(n int) ([]byte, error) {
if s.i+n > len(s.s) {
return nil, io.EOF
}
b := s.s[s.i : s.i+n]
s.i += n
return b, nil
}
func (s *sliceBuffer) ReadByte() (byte, error) {
if s.i >= len(s.s) {
return 0, io.EOF
}
b := s.s[s.i]
s.i++
return b, nil
}
func (s *sliceBuffer) Read(b []byte) (int, error) {
if len(b) == 0 {
return 0, nil
}
if s.i >= len(s.s) {
return 0, io.EOF
}
n := copy(b, s.s[s.i:])
s.i += n
return n, nil
}
func (s *sliceBuffer) Seek(offset int64, whence int) (int64, error) {
var abs int64
switch whence {
case 0:
abs = offset
case 1:
abs = int64(s.i) + offset
case 2:
abs = int64(len(s.s)) + offset
default:
return 0, errors.New("invalid whence")
}
if abs < 0 {
return 0, errors.New("negative position")
}
if abs >= 1<<31 {
return 0, errors.New("position out of range")
}
s.i = int(abs)
return abs, nil
}

25
vendor/github.com/edsrzf/mmap-go/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2011, Evan Shaw <edsrzf@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

116
vendor/github.com/edsrzf/mmap-go/mmap.go generated vendored Normal file
View File

@@ -0,0 +1,116 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file defines the common package interface and contains a little bit of
// factored out logic.
// Package mmap allows mapping files into memory. It tries to provide a simple, reasonably portable interface,
// but doesn't go out of its way to abstract away every little platform detail.
// This specifically means:
// * forked processes may or may not inherit mappings
// * a file's timestamp may or may not be updated by writes through mappings
// * specifying a size larger than the file's actual size can increase the file's size
// * If the mapped file is being modified by another process while your program's running, don't expect consistent results between platforms
package mmap
import (
"errors"
"os"
"reflect"
"unsafe"
)
const (
// RDONLY maps the memory read-only.
// Attempts to write to the MMap object will result in undefined behavior.
RDONLY = 0
// RDWR maps the memory as read-write. Writes to the MMap object will update the
// underlying file.
RDWR = 1 << iota
// COPY maps the memory as copy-on-write. Writes to the MMap object will affect
// memory, but the underlying file will remain unchanged.
COPY
// If EXEC is set, the mapped memory is marked as executable.
EXEC
)
const (
// If the ANON flag is set, the mapped memory will not be backed by a file.
ANON = 1 << iota
)
// MMap represents a file mapped into memory.
type MMap []byte
// Map maps an entire file into memory.
// If ANON is set in flags, f is ignored.
func Map(f *os.File, prot, flags int) (MMap, error) {
return MapRegion(f, -1, prot, flags, 0)
}
// MapRegion maps part of a file into memory.
// The offset parameter must be a multiple of the system's page size.
// If length < 0, the entire file will be mapped.
// If ANON is set in flags, f is ignored.
func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) {
if offset%int64(os.Getpagesize()) != 0 {
return nil, errors.New("offset parameter must be a multiple of the system's page size")
}
var fd uintptr
if flags&ANON == 0 {
fd = uintptr(f.Fd())
if length < 0 {
fi, err := f.Stat()
if err != nil {
return nil, err
}
length = int(fi.Size())
}
} else {
if length <= 0 {
return nil, errors.New("anonymous mapping requires non-zero length")
}
fd = ^uintptr(0)
}
return mmap(length, uintptr(prot), uintptr(flags), fd, offset)
}
func (m *MMap) header() *reflect.SliceHeader {
return (*reflect.SliceHeader)(unsafe.Pointer(m))
}
// Lock keeps the mapped region in physical memory, ensuring that it will not be
// swapped out.
func (m MMap) Lock() error {
dh := m.header()
return lock(dh.Data, uintptr(dh.Len))
}
// Unlock reverses the effect of Lock, allowing the mapped region to potentially
// be swapped out.
// If m is already unlocked, aan error will result.
func (m MMap) Unlock() error {
dh := m.header()
return unlock(dh.Data, uintptr(dh.Len))
}
// Flush synchronizes the mapping's contents to the file's contents on disk.
func (m MMap) Flush() error {
dh := m.header()
return flush(dh.Data, uintptr(dh.Len))
}
// Unmap deletes the memory mapped region, flushes any remaining changes, and sets
// m to nil.
// Trying to read or write any remaining references to m after Unmap is called will
// result in undefined behavior.
// Unmap should only be called on the slice value that was originally returned from
// a call to Map. Calling Unmap on a derived slice may cause errors.
func (m *MMap) Unmap() error {
dh := m.header()
err := unmap(dh.Data, uintptr(dh.Len))
*m = nil
return err
}

67
vendor/github.com/edsrzf/mmap-go/mmap_unix.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux openbsd solaris netbsd
package mmap
import (
"syscall"
)
func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) {
flags := syscall.MAP_SHARED
prot := syscall.PROT_READ
switch {
case inprot&COPY != 0:
prot |= syscall.PROT_WRITE
flags = syscall.MAP_PRIVATE
case inprot&RDWR != 0:
prot |= syscall.PROT_WRITE
}
if inprot&EXEC != 0 {
prot |= syscall.PROT_EXEC
}
if inflags&ANON != 0 {
flags |= syscall.MAP_ANON
}
b, err := syscall.Mmap(int(fd), off, len, prot, flags)
if err != nil {
return nil, err
}
return b, nil
}
func flush(addr, len uintptr) error {
_, _, errno := syscall.Syscall(_SYS_MSYNC, addr, len, _MS_SYNC)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}
func lock(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MLOCK, addr, len, 0)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}
func unlock(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MUNLOCK, addr, len, 0)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}
func unmap(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, addr, len, 0)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}

125
vendor/github.com/edsrzf/mmap-go/mmap_windows.go generated vendored Normal file
View File

@@ -0,0 +1,125 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mmap
import (
"errors"
"os"
"sync"
"syscall"
)
// mmap on Windows is a two-step process.
// First, we call CreateFileMapping to get a handle.
// Then, we call MapviewToFile to get an actual pointer into memory.
// Because we want to emulate a POSIX-style mmap, we don't want to expose
// the handle -- only the pointer. We also want to return only a byte slice,
// not a struct, so it's convenient to manipulate.
// We keep this map so that we can get back the original handle from the memory address.
var handleLock sync.Mutex
var handleMap = map[uintptr]syscall.Handle{}
func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) {
flProtect := uint32(syscall.PAGE_READONLY)
dwDesiredAccess := uint32(syscall.FILE_MAP_READ)
switch {
case prot&COPY != 0:
flProtect = syscall.PAGE_WRITECOPY
dwDesiredAccess = syscall.FILE_MAP_COPY
case prot&RDWR != 0:
flProtect = syscall.PAGE_READWRITE
dwDesiredAccess = syscall.FILE_MAP_WRITE
}
if prot&EXEC != 0 {
flProtect <<= 4
dwDesiredAccess |= syscall.FILE_MAP_EXECUTE
}
// The maximum size is the area of the file, starting from 0,
// that we wish to allow to be mappable. It is the sum of
// the length the user requested, plus the offset where that length
// is starting from. This does not map the data into memory.
maxSizeHigh := uint32((off + int64(len)) >> 32)
maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF)
// TODO: Do we need to set some security attributes? It might help portability.
h, errno := syscall.CreateFileMapping(syscall.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil)
if h == 0 {
return nil, os.NewSyscallError("CreateFileMapping", errno)
}
// Actually map a view of the data into memory. The view's size
// is the length the user requested.
fileOffsetHigh := uint32(off >> 32)
fileOffsetLow := uint32(off & 0xFFFFFFFF)
addr, errno := syscall.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len))
if addr == 0 {
return nil, os.NewSyscallError("MapViewOfFile", errno)
}
handleLock.Lock()
handleMap[addr] = h
handleLock.Unlock()
m := MMap{}
dh := m.header()
dh.Data = addr
dh.Len = len
dh.Cap = dh.Len
return m, nil
}
func flush(addr, len uintptr) error {
errno := syscall.FlushViewOfFile(addr, len)
if errno != nil {
return os.NewSyscallError("FlushViewOfFile", errno)
}
handleLock.Lock()
defer handleLock.Unlock()
handle, ok := handleMap[addr]
if !ok {
// should be impossible; we would've errored above
return errors.New("unknown base address")
}
errno = syscall.FlushFileBuffers(handle)
return os.NewSyscallError("FlushFileBuffers", errno)
}
func lock(addr, len uintptr) error {
errno := syscall.VirtualLock(addr, len)
return os.NewSyscallError("VirtualLock", errno)
}
func unlock(addr, len uintptr) error {
errno := syscall.VirtualUnlock(addr, len)
return os.NewSyscallError("VirtualUnlock", errno)
}
func unmap(addr, len uintptr) error {
flush(addr, len)
// Lock the UnmapViewOfFile along with the handleMap deletion.
// As soon as we unmap the view, the OS is free to give the
// same addr to another new map. We don't want another goroutine
// to insert and remove the same addr into handleMap while
// we're trying to remove our old addr/handle pair.
handleLock.Lock()
defer handleLock.Unlock()
err := syscall.UnmapViewOfFile(addr)
if err != nil {
return err
}
handle, ok := handleMap[addr]
if !ok {
// should be impossible; we would've errored above
return errors.New("unknown base address")
}
delete(handleMap, addr)
e := syscall.CloseHandle(syscall.Handle(handle))
return os.NewSyscallError("CloseHandle", e)
}

8
vendor/github.com/edsrzf/mmap-go/msync_netbsd.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mmap
const _SYS_MSYNC = 277
const _MS_SYNC = 0x04

14
vendor/github.com/edsrzf/mmap-go/msync_unix.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux openbsd solaris
package mmap
import (
"syscall"
)
const _SYS_MSYNC = syscall.SYS_MSYNC
const _MS_SYNC = syscall.MS_SYNC

35
vendor/github.com/go-redis/redis/CHANGELOG.md generated vendored Normal file
View File

@@ -0,0 +1,35 @@
# Changelog
## v7 WIP
- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
- WithContext now can not be used to create a shallow copy of the client.
- New methods ProcessContext, DoContext, and ExecContext.
- Client respects Context.Deadline when setting net.Conn deadline.
- Client listens on Context.Done while waiting for a connection from the pool and returns an error when context context is cancelled.
- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow detecting reconnections.
- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse time.
## v6.15
- Cluster and Ring pipelines process commands for each node in its own goroutine.
## 6.14
- Added Options.MinIdleConns.
- Added Options.MaxConnAge.
- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
- Add Client.Do to simplify creating custom commands.
- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
- Lower memory usage.
## v6.13
- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set `HashReplicas = 1000` for better keys distribution between shards.
- Cluster client was optimized to use much less memory when reloading cluster state.
- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout occurres. In most cases it is recommended to use PubSub.Channel instead.
- Dialer.KeepAlive is set to 5 minutes by default.
## v6.12
- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis Servers that don't have cluster mode enabled. See https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup

25
vendor/github.com/go-redis/redis/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2013 The github.com/go-redis/redis Authors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

24
vendor/github.com/go-redis/redis/Makefile generated vendored Normal file
View File

@@ -0,0 +1,24 @@
all: testdeps
go test ./...
go test ./... -short -race
go test ./... -run=NONE -bench=. -benchmem
env GOOS=linux GOARCH=386 go test ./...
go vet
go get github.com/gordonklaus/ineffassign
ineffassign .
golangci-lint run
testdeps: testdata/redis/src/redis-server
bench: testdeps
go test ./... -test.run=NONE -test.bench=. -test.benchmem
.PHONY: all test testdeps bench
testdata/redis:
mkdir -p $@
wget -qO- https://github.com/antirez/redis/archive/5.0.tar.gz | tar xvz --strip-components=1 -C $@
testdata/redis/src/redis-server: testdata/redis
sed -i.bak 's/libjemalloc.a/libjemalloc.a -lrt/g' $</src/Makefile
cd $< && make all

119
vendor/github.com/go-redis/redis/README.md generated vendored Normal file
View File

@@ -0,0 +1,119 @@
# Redis client for Golang
[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
[![GoDoc](https://godoc.org/github.com/go-redis/redis?status.svg)](https://godoc.org/github.com/go-redis/redis)
[![Airbrake](https://img.shields.io/badge/kudos-airbrake.io-orange.svg)](https://airbrake.io)
Supports:
- Redis 3 commands except QUIT, MONITOR, SLOWLOG and SYNC.
- Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
- [Pub/Sub](https://godoc.org/github.com/go-redis/redis#PubSub).
- [Transactions](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
- [Pipeline](https://godoc.org/github.com/go-redis/redis#example-Client-Pipeline) and [TxPipeline](https://godoc.org/github.com/go-redis/redis#example-Client-TxPipeline).
- [Scripting](https://godoc.org/github.com/go-redis/redis#Script).
- [Timeouts](https://godoc.org/github.com/go-redis/redis#Options).
- [Redis Sentinel](https://godoc.org/github.com/go-redis/redis#NewFailoverClient).
- [Redis Cluster](https://godoc.org/github.com/go-redis/redis#NewClusterClient).
- [Cluster of Redis Servers](https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup) without using cluster mode and Redis Sentinel.
- [Ring](https://godoc.org/github.com/go-redis/redis#NewRing).
- [Instrumentation](https://godoc.org/github.com/go-redis/redis#ex-package--Instrumentation).
- [Cache friendly](https://github.com/go-redis/cache).
- [Rate limiting](https://github.com/go-redis/redis_rate).
- [Distributed Locks](https://github.com/bsm/redislock).
API docs: https://godoc.org/github.com/go-redis/redis.
Examples: https://godoc.org/github.com/go-redis/redis#pkg-examples.
## Installation
Install:
```shell
go get -u github.com/go-redis/redis
```
Import:
```go
import "github.com/go-redis/redis"
```
## Quickstart
```go
func ExampleNewClient() {
client := redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "", // no password set
DB: 0, // use default DB
})
pong, err := client.Ping().Result()
fmt.Println(pong, err)
// Output: PONG <nil>
}
func ExampleClient() {
err := client.Set("key", "value", 0).Err()
if err != nil {
panic(err)
}
val, err := client.Get("key").Result()
if err != nil {
panic(err)
}
fmt.Println("key", val)
val2, err := client.Get("key2").Result()
if err == redis.Nil {
fmt.Println("key2 does not exist")
} else if err != nil {
panic(err)
} else {
fmt.Println("key2", val2)
}
// Output: key value
// key2 does not exist
}
```
## Howto
Please go through [examples](https://godoc.org/github.com/go-redis/redis#pkg-examples) to get an idea how to use this package.
## Look and feel
Some corner cases:
```go
// SET key value EX 10 NX
set, err := client.SetNX("key", "value", 10*time.Second).Result()
// SORT list LIMIT 0 2 ASC
vals, err := client.Sort("list", redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
vals, err := client.ZRangeByScoreWithScores("zset", redis.ZRangeBy{
Min: "-inf",
Max: "+inf",
Offset: 0,
Count: 2,
}).Result()
// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
vals, err := client.ZInterStore("out", redis.ZStore{Weights: []int64{2, 3}}, "zset1", "zset2").Result()
// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
vals, err := client.Eval("return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
// custom command
res, err := client.Do("set", "key", "value")
```
## See also
- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
- [Golang msgpack](https://github.com/vmihailenco/msgpack)
- [Golang message task queue](https://github.com/vmihailenco/taskq)

1626
vendor/github.com/go-redis/redis/cluster.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

22
vendor/github.com/go-redis/redis/cluster_commands.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
package redis
import "sync/atomic"
func (c *ClusterClient) DBSize() *IntCmd {
cmd := NewIntCmd("dbsize")
var size int64
err := c.ForEachMaster(func(master *Client) error {
n, err := master.DBSize().Result()
if err != nil {
return err
}
atomic.AddInt64(&size, n)
return nil
})
if err != nil {
cmd.setErr(err)
return cmd
}
cmd.val = size
return cmd
}

1919
vendor/github.com/go-redis/redis/command.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

2586
vendor/github.com/go-redis/redis/commands.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

4
vendor/github.com/go-redis/redis/doc.go generated vendored Normal file
View File

@@ -0,0 +1,4 @@
/*
Package redis implements a Redis client.
*/
package redis

8
vendor/github.com/go-redis/redis/go.mod generated vendored Normal file
View File

@@ -0,0 +1,8 @@
module github.com/go-redis/redis
require (
github.com/onsi/ginkgo v1.8.0
github.com/onsi/gomega v1.5.0
)
go 1.13

27
vendor/github.com/go-redis/redis/go.sum generated vendored Normal file
View File

@@ -0,0 +1,27 @@
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -0,0 +1,81 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package consistenthash provides an implementation of a ring hash.
package consistenthash
import (
"hash/crc32"
"sort"
"strconv"
)
type Hash func(data []byte) uint32
type Map struct {
hash Hash
replicas int
keys []int // Sorted
hashMap map[int]string
}
func New(replicas int, fn Hash) *Map {
m := &Map{
replicas: replicas,
hash: fn,
hashMap: make(map[int]string),
}
if m.hash == nil {
m.hash = crc32.ChecksumIEEE
}
return m
}
// Returns true if there are no items available.
func (m *Map) IsEmpty() bool {
return len(m.keys) == 0
}
// Adds some keys to the hash.
func (m *Map) Add(keys ...string) {
for _, key := range keys {
for i := 0; i < m.replicas; i++ {
hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
m.keys = append(m.keys, hash)
m.hashMap[hash] = key
}
}
sort.Ints(m.keys)
}
// Gets the closest item in the hash to the provided key.
func (m *Map) Get(key string) string {
if m.IsEmpty() {
return ""
}
hash := int(m.hash([]byte(key)))
// Binary search for appropriate replica.
idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
// Means we have cycled back to the first replica.
if idx == len(m.keys) {
idx = 0
}
return m.hashMap[m.keys[idx]]
}

90
vendor/github.com/go-redis/redis/internal/error.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
package internal
import (
"io"
"net"
"strings"
"github.com/go-redis/redis/internal/proto"
)
func IsRetryableError(err error, retryTimeout bool) bool {
if err == nil {
return false
}
if err == io.EOF {
return true
}
if netErr, ok := err.(net.Error); ok {
if netErr.Timeout() {
return retryTimeout
}
return true
}
s := err.Error()
if s == "ERR max number of clients reached" {
return true
}
if strings.HasPrefix(s, "LOADING ") {
return true
}
if strings.HasPrefix(s, "READONLY ") {
return true
}
if strings.HasPrefix(s, "CLUSTERDOWN ") {
return true
}
return false
}
func IsRedisError(err error) bool {
_, ok := err.(proto.RedisError)
return ok
}
func IsBadConn(err error, allowTimeout bool) bool {
if err == nil {
return false
}
if IsRedisError(err) {
// #790
return IsReadOnlyError(err)
}
if allowTimeout {
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
return false
}
}
return true
}
func IsMovedError(err error) (moved bool, ask bool, addr string) {
if !IsRedisError(err) {
return
}
s := err.Error()
switch {
case strings.HasPrefix(s, "MOVED "):
moved = true
case strings.HasPrefix(s, "ASK "):
ask = true
default:
return
}
ind := strings.LastIndex(s, " ")
if ind == -1 {
return false, false, ""
}
addr = s[ind+1:]
return
}
func IsLoadingError(err error) bool {
return strings.HasPrefix(err.Error(), "LOADING ")
}
func IsReadOnlyError(err error) bool {
return strings.HasPrefix(err.Error(), "READONLY ")
}

View File

@@ -0,0 +1,77 @@
package hashtag
import (
"math/rand"
"strings"
)
const slotNumber = 16384
// CRC16 implementation according to CCITT standards.
// Copyright 2001-2010 Georges Menie (www.menie.org)
// Copyright 2013 The Go Authors. All rights reserved.
// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
var crc16tab = [256]uint16{
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
}
func Key(key string) string {
if s := strings.IndexByte(key, '{'); s > -1 {
if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
return key[s+1 : s+e+1]
}
}
return key
}
func RandomSlot() int {
return rand.Intn(slotNumber)
}
// hashSlot returns a consistent slot number between 0 and 16383
// for any given string key.
func Slot(key string) int {
if key == "" {
return RandomSlot()
}
key = Key(key)
return int(crc16sum(key)) % slotNumber
}
func crc16sum(key string) (crc uint16) {
for i := 0; i < len(key); i++ {
crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
}
return
}

24
vendor/github.com/go-redis/redis/internal/internal.go generated vendored Normal file
View File

@@ -0,0 +1,24 @@
package internal
import (
"math/rand"
"time"
)
// Retry backoff with jitter sleep to prevent overloaded conditions during intervals
// https://www.awsarchitectureblog.com/2015/03/backoff.html
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
if retry < 0 {
retry = 0
}
backoff := minBackoff << uint(retry)
if backoff > maxBackoff || backoff < minBackoff {
backoff = maxBackoff
}
if backoff == 0 {
return 0
}
return time.Duration(rand.Int63n(int64(backoff)))
}

8
vendor/github.com/go-redis/redis/internal/log.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
package internal
import (
"log"
"os"
)
var Logger = log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile)

60
vendor/github.com/go-redis/redis/internal/once.go generated vendored Normal file
View File

@@ -0,0 +1,60 @@
/*
Copyright 2014 The Camlistore Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"sync"
"sync/atomic"
)
// A Once will perform a successful action exactly once.
//
// Unlike a sync.Once, this Once's func returns an error
// and is re-armed on failure.
type Once struct {
m sync.Mutex
done uint32
}
// Do calls the function f if and only if Do has not been invoked
// without error for this instance of Once. In other words, given
// var once Once
// if once.Do(f) is called multiple times, only the first call will
// invoke f, even if f has a different value in each invocation unless
// f returns an error. A new instance of Once is required for each
// function to execute.
//
// Do is intended for initialization that must be run exactly once. Since f
// is niladic, it may be necessary to use a function literal to capture the
// arguments to a function to be invoked by Do:
// err := config.once.Do(func() error { return config.init(filename) })
func (o *Once) Do(f func() error) error {
if atomic.LoadUint32(&o.done) == 1 {
return nil
}
// Slow-path.
o.m.Lock()
defer o.m.Unlock()
var err error
if o.done == 0 {
err = f()
if err == nil {
atomic.StoreUint32(&o.done, 1)
}
}
return err
}

110
vendor/github.com/go-redis/redis/internal/pool/conn.go generated vendored Normal file
View File

@@ -0,0 +1,110 @@
package pool
import (
"context"
"net"
"sync/atomic"
"time"
"github.com/go-redis/redis/internal/proto"
)
var noDeadline = time.Time{}
type Conn struct {
netConn net.Conn
rd *proto.Reader
wr *proto.Writer
Inited bool
pooled bool
createdAt time.Time
usedAt int64 // atomic
}
func NewConn(netConn net.Conn) *Conn {
cn := &Conn{
netConn: netConn,
createdAt: time.Now(),
}
cn.rd = proto.NewReader(netConn)
cn.wr = proto.NewWriter(netConn)
cn.SetUsedAt(time.Now())
return cn
}
func (cn *Conn) UsedAt() time.Time {
unix := atomic.LoadInt64(&cn.usedAt)
return time.Unix(unix, 0)
}
func (cn *Conn) SetUsedAt(tm time.Time) {
atomic.StoreInt64(&cn.usedAt, tm.Unix())
}
func (cn *Conn) SetNetConn(netConn net.Conn) {
cn.netConn = netConn
cn.rd.Reset(netConn)
cn.wr.Reset(netConn)
}
func (cn *Conn) Write(b []byte) (int, error) {
return cn.netConn.Write(b)
}
func (cn *Conn) RemoteAddr() net.Addr {
return cn.netConn.RemoteAddr()
}
func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
tm := cn.deadline(ctx, timeout)
_ = cn.netConn.SetReadDeadline(tm)
return fn(cn.rd)
}
func (cn *Conn) WithWriter(
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
) error {
tm := cn.deadline(ctx, timeout)
_ = cn.netConn.SetWriteDeadline(tm)
firstErr := fn(cn.wr)
err := cn.wr.Flush()
if err != nil && firstErr == nil {
firstErr = err
}
return firstErr
}
func (cn *Conn) Close() error {
return cn.netConn.Close()
}
func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
tm := time.Now()
cn.SetUsedAt(tm)
if timeout > 0 {
tm = tm.Add(timeout)
}
if ctx != nil {
deadline, ok := ctx.Deadline()
if ok {
if timeout == 0 {
return deadline
}
if deadline.Before(tm) {
return deadline
}
return tm
}
}
if timeout > 0 {
return tm
}
return noDeadline
}

498
vendor/github.com/go-redis/redis/internal/pool/pool.go generated vendored Normal file
View File

@@ -0,0 +1,498 @@
package pool
import (
"context"
"errors"
"net"
"sync"
"sync/atomic"
"time"
"github.com/go-redis/redis/internal"
)
var ErrClosed = errors.New("redis: client is closed")
var ErrPoolTimeout = errors.New("redis: connection pool timeout")
var timers = sync.Pool{
New: func() interface{} {
t := time.NewTimer(time.Hour)
t.Stop()
return t
},
}
// Stats contains pool state information and accumulated stats.
type Stats struct {
Hits uint32 // number of times free connection was found in the pool
Misses uint32 // number of times free connection was NOT found in the pool
Timeouts uint32 // number of times a wait timeout occurred
TotalConns uint32 // number of total connections in the pool
IdleConns uint32 // number of idle connections in the pool
StaleConns uint32 // number of stale connections removed from the pool
}
type Pooler interface {
NewConn(context.Context) (*Conn, error)
CloseConn(*Conn) error
Get(context.Context) (*Conn, error)
Put(*Conn)
Remove(*Conn)
Len() int
IdleLen() int
Stats() *Stats
Close() error
}
type Options struct {
Dialer func(c context.Context) (net.Conn, error)
OnClose func(*Conn) error
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
}
type ConnPool struct {
opt *Options
dialErrorsNum uint32 // atomic
lastDialErrorMu sync.RWMutex
lastDialError error
queue chan struct{}
connsMu sync.Mutex
conns []*Conn
idleConns []*Conn
poolSize int
idleConnsLen int
stats Stats
_closed uint32 // atomic
}
var _ Pooler = (*ConnPool)(nil)
func NewConnPool(opt *Options) *ConnPool {
p := &ConnPool{
opt: opt,
queue: make(chan struct{}, opt.PoolSize),
conns: make([]*Conn, 0, opt.PoolSize),
idleConns: make([]*Conn, 0, opt.PoolSize),
}
p.checkMinIdleConns()
if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
go p.reaper(opt.IdleCheckFrequency)
}
return p
}
func (p *ConnPool) checkMinIdleConns() {
if p.opt.MinIdleConns == 0 {
return
}
for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
p.poolSize++
p.idleConnsLen++
go func() {
err := p.addIdleConn()
if err != nil {
p.connsMu.Lock()
p.poolSize--
p.idleConnsLen--
p.connsMu.Unlock()
}
}()
}
}
func (p *ConnPool) addIdleConn() error {
cn, err := p.newConn(context.TODO(), true)
if err != nil {
return err
}
p.connsMu.Lock()
p.conns = append(p.conns, cn)
p.idleConns = append(p.idleConns, cn)
p.connsMu.Unlock()
return nil
}
func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
return p._NewConn(ctx, false)
}
func (p *ConnPool) _NewConn(ctx context.Context, pooled bool) (*Conn, error) {
cn, err := p.newConn(ctx, pooled)
if err != nil {
return nil, err
}
p.connsMu.Lock()
p.conns = append(p.conns, cn)
if pooled {
// If pool is full remove the cn on next Put.
if p.poolSize >= p.opt.PoolSize {
cn.pooled = false
} else {
p.poolSize++
}
}
p.connsMu.Unlock()
return cn, nil
}
func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
if p.closed() {
return nil, ErrClosed
}
if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
return nil, p.getLastDialError()
}
netConn, err := p.opt.Dialer(ctx)
if err != nil {
p.setLastDialError(err)
if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
go p.tryDial()
}
return nil, err
}
cn := NewConn(netConn)
cn.pooled = pooled
return cn, nil
}
func (p *ConnPool) tryDial() {
for {
if p.closed() {
return
}
conn, err := p.opt.Dialer(context.Background())
if err != nil {
p.setLastDialError(err)
time.Sleep(time.Second)
continue
}
atomic.StoreUint32(&p.dialErrorsNum, 0)
_ = conn.Close()
return
}
}
func (p *ConnPool) setLastDialError(err error) {
p.lastDialErrorMu.Lock()
p.lastDialError = err
p.lastDialErrorMu.Unlock()
}
func (p *ConnPool) getLastDialError() error {
p.lastDialErrorMu.RLock()
err := p.lastDialError
p.lastDialErrorMu.RUnlock()
return err
}
// Get returns existed connection from the pool or creates a new one.
func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
if p.closed() {
return nil, ErrClosed
}
err := p.waitTurn(ctx)
if err != nil {
return nil, err
}
for {
p.connsMu.Lock()
cn := p.popIdle()
p.connsMu.Unlock()
if cn == nil {
break
}
if p.isStaleConn(cn) {
_ = p.CloseConn(cn)
continue
}
atomic.AddUint32(&p.stats.Hits, 1)
return cn, nil
}
atomic.AddUint32(&p.stats.Misses, 1)
newcn, err := p._NewConn(ctx, true)
if err != nil {
p.freeTurn()
return nil, err
}
return newcn, nil
}
func (p *ConnPool) getTurn() {
p.queue <- struct{}{}
}
func (p *ConnPool) waitTurn(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
select {
case p.queue <- struct{}{}:
return nil
default:
}
timer := timers.Get().(*time.Timer)
timer.Reset(p.opt.PoolTimeout)
select {
case <-ctx.Done():
if !timer.Stop() {
<-timer.C
}
timers.Put(timer)
return ctx.Err()
case p.queue <- struct{}{}:
if !timer.Stop() {
<-timer.C
}
timers.Put(timer)
return nil
case <-timer.C:
timers.Put(timer)
atomic.AddUint32(&p.stats.Timeouts, 1)
return ErrPoolTimeout
}
}
func (p *ConnPool) freeTurn() {
<-p.queue
}
func (p *ConnPool) popIdle() *Conn {
if len(p.idleConns) == 0 {
return nil
}
idx := len(p.idleConns) - 1
cn := p.idleConns[idx]
p.idleConns = p.idleConns[:idx]
p.idleConnsLen--
p.checkMinIdleConns()
return cn
}
func (p *ConnPool) Put(cn *Conn) {
if !cn.pooled {
p.Remove(cn)
return
}
p.connsMu.Lock()
p.idleConns = append(p.idleConns, cn)
p.idleConnsLen++
p.connsMu.Unlock()
p.freeTurn()
}
func (p *ConnPool) Remove(cn *Conn) {
p.removeConnWithLock(cn)
p.freeTurn()
_ = p.closeConn(cn)
}
func (p *ConnPool) CloseConn(cn *Conn) error {
p.removeConnWithLock(cn)
return p.closeConn(cn)
}
func (p *ConnPool) removeConnWithLock(cn *Conn) {
p.connsMu.Lock()
p.removeConn(cn)
p.connsMu.Unlock()
}
func (p *ConnPool) removeConn(cn *Conn) {
for i, c := range p.conns {
if c == cn {
p.conns = append(p.conns[:i], p.conns[i+1:]...)
if cn.pooled {
p.poolSize--
p.checkMinIdleConns()
}
return
}
}
}
func (p *ConnPool) closeConn(cn *Conn) error {
if p.opt.OnClose != nil {
_ = p.opt.OnClose(cn)
}
return cn.Close()
}
// Len returns total number of connections.
func (p *ConnPool) Len() int {
p.connsMu.Lock()
n := len(p.conns)
p.connsMu.Unlock()
return n
}
// IdleLen returns number of idle connections.
func (p *ConnPool) IdleLen() int {
p.connsMu.Lock()
n := p.idleConnsLen
p.connsMu.Unlock()
return n
}
func (p *ConnPool) Stats() *Stats {
idleLen := p.IdleLen()
return &Stats{
Hits: atomic.LoadUint32(&p.stats.Hits),
Misses: atomic.LoadUint32(&p.stats.Misses),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
IdleConns: uint32(idleLen),
StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
}
}
func (p *ConnPool) closed() bool {
return atomic.LoadUint32(&p._closed) == 1
}
func (p *ConnPool) Filter(fn func(*Conn) bool) error {
var firstErr error
p.connsMu.Lock()
for _, cn := range p.conns {
if fn(cn) {
if err := p.closeConn(cn); err != nil && firstErr == nil {
firstErr = err
}
}
}
p.connsMu.Unlock()
return firstErr
}
func (p *ConnPool) Close() error {
if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
return ErrClosed
}
var firstErr error
p.connsMu.Lock()
for _, cn := range p.conns {
if err := p.closeConn(cn); err != nil && firstErr == nil {
firstErr = err
}
}
p.conns = nil
p.poolSize = 0
p.idleConns = nil
p.idleConnsLen = 0
p.connsMu.Unlock()
return firstErr
}
func (p *ConnPool) reaper(frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
for range ticker.C {
if p.closed() {
break
}
_, err := p.ReapStaleConns()
if err != nil {
internal.Logger.Printf("ReapStaleConns failed: %s", err)
continue
}
}
}
func (p *ConnPool) ReapStaleConns() (int, error) {
var n int
for {
p.getTurn()
p.connsMu.Lock()
cn := p.reapStaleConn()
p.connsMu.Unlock()
p.freeTurn()
if cn != nil {
_ = p.closeConn(cn)
n++
} else {
break
}
}
atomic.AddUint32(&p.stats.StaleConns, uint32(n))
return n, nil
}
func (p *ConnPool) reapStaleConn() *Conn {
if len(p.idleConns) == 0 {
return nil
}
cn := p.idleConns[0]
if !p.isStaleConn(cn) {
return nil
}
p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
p.idleConnsLen--
p.removeConn(cn)
return cn
}
func (p *ConnPool) isStaleConn(cn *Conn) bool {
if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
return false
}
now := time.Now()
if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
return true
}
if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
return true
}
return false
}

View File

@@ -0,0 +1,55 @@
package pool
import "context"
type SingleConnPool struct {
cn *Conn
}
var _ Pooler = (*SingleConnPool)(nil)
func NewSingleConnPool(cn *Conn) *SingleConnPool {
return &SingleConnPool{
cn: cn,
}
}
func (p *SingleConnPool) NewConn(context.Context) (*Conn, error) {
panic("not implemented")
}
func (p *SingleConnPool) CloseConn(*Conn) error {
panic("not implemented")
}
func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
return p.cn, nil
}
func (p *SingleConnPool) Put(cn *Conn) {
if p.cn != cn {
panic("p.cn != cn")
}
}
func (p *SingleConnPool) Remove(cn *Conn) {
if p.cn != cn {
panic("p.cn != cn")
}
}
func (p *SingleConnPool) Len() int {
return 1
}
func (p *SingleConnPool) IdleLen() int {
return 0
}
func (p *SingleConnPool) Stats() *Stats {
return nil
}
func (p *SingleConnPool) Close() error {
return nil
}

View File

@@ -0,0 +1,112 @@
package pool
import (
"context"
"sync"
)
type StickyConnPool struct {
pool *ConnPool
reusable bool
cn *Conn
closed bool
mu sync.Mutex
}
var _ Pooler = (*StickyConnPool)(nil)
func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool {
return &StickyConnPool{
pool: pool,
reusable: reusable,
}
}
func (p *StickyConnPool) NewConn(context.Context) (*Conn, error) {
panic("not implemented")
}
func (p *StickyConnPool) CloseConn(*Conn) error {
panic("not implemented")
}
func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return nil, ErrClosed
}
if p.cn != nil {
return p.cn, nil
}
cn, err := p.pool.Get(ctx)
if err != nil {
return nil, err
}
p.cn = cn
return cn, nil
}
func (p *StickyConnPool) putUpstream() {
p.pool.Put(p.cn)
p.cn = nil
}
func (p *StickyConnPool) Put(cn *Conn) {}
func (p *StickyConnPool) removeUpstream() {
p.pool.Remove(p.cn)
p.cn = nil
}
func (p *StickyConnPool) Remove(cn *Conn) {
p.removeUpstream()
}
func (p *StickyConnPool) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.cn == nil {
return 0
}
return 1
}
func (p *StickyConnPool) IdleLen() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.cn == nil {
return 1
}
return 0
}
func (p *StickyConnPool) Stats() *Stats {
return nil
}
func (p *StickyConnPool) Close() error {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return ErrClosed
}
p.closed = true
if p.cn != nil {
if p.reusable {
p.putUpstream()
} else {
p.removeUpstream()
}
}
return nil
}

View File

@@ -0,0 +1,294 @@
package proto
import (
"bufio"
"fmt"
"io"
"strconv"
"github.com/go-redis/redis/internal/util"
)
const (
ErrorReply = '-'
StatusReply = '+'
IntReply = ':'
StringReply = '$'
ArrayReply = '*'
)
//------------------------------------------------------------------------------
const Nil = RedisError("redis: nil")
type RedisError string
func (e RedisError) Error() string { return string(e) }
//------------------------------------------------------------------------------
type MultiBulkParse func(*Reader, int64) (interface{}, error)
type Reader struct {
rd *bufio.Reader
_buf []byte
}
func NewReader(rd io.Reader) *Reader {
return &Reader{
rd: bufio.NewReader(rd),
_buf: make([]byte, 64),
}
}
func (r *Reader) Reset(rd io.Reader) {
r.rd.Reset(rd)
}
func (r *Reader) ReadLine() ([]byte, error) {
line, isPrefix, err := r.rd.ReadLine()
if err != nil {
return nil, err
}
if isPrefix {
return nil, bufio.ErrBufferFull
}
if len(line) == 0 {
return nil, fmt.Errorf("redis: reply is empty")
}
if isNilReply(line) {
return nil, Nil
}
return line, nil
}
func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
line, err := r.ReadLine()
if err != nil {
return nil, err
}
switch line[0] {
case ErrorReply:
return nil, ParseErrorReply(line)
case StatusReply:
return string(line[1:]), nil
case IntReply:
return util.ParseInt(line[1:], 10, 64)
case StringReply:
return r.readStringReply(line)
case ArrayReply:
n, err := parseArrayLen(line)
if err != nil {
return nil, err
}
if m == nil {
err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
return nil, err
}
return m(r, n)
}
return nil, fmt.Errorf("redis: can't parse %.100q", line)
}
func (r *Reader) ReadIntReply() (int64, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
}
switch line[0] {
case ErrorReply:
return 0, ParseErrorReply(line)
case IntReply:
return util.ParseInt(line[1:], 10, 64)
default:
return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
}
}
func (r *Reader) ReadString() (string, error) {
line, err := r.ReadLine()
if err != nil {
return "", err
}
switch line[0] {
case ErrorReply:
return "", ParseErrorReply(line)
case StringReply:
return r.readStringReply(line)
case StatusReply:
return string(line[1:]), nil
case IntReply:
return string(line[1:]), nil
default:
return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
}
}
func (r *Reader) readStringReply(line []byte) (string, error) {
if isNilReply(line) {
return "", Nil
}
replyLen, err := strconv.Atoi(string(line[1:]))
if err != nil {
return "", err
}
b := make([]byte, replyLen+2)
_, err = io.ReadFull(r.rd, b)
if err != nil {
return "", err
}
return util.BytesToString(b[:replyLen]), nil
}
func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
line, err := r.ReadLine()
if err != nil {
return nil, err
}
switch line[0] {
case ErrorReply:
return nil, ParseErrorReply(line)
case ArrayReply:
n, err := parseArrayLen(line)
if err != nil {
return nil, err
}
return m(r, n)
default:
return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
}
}
func (r *Reader) ReadArrayLen() (int64, error) {
line, err := r.ReadLine()
if err != nil {
return 0, err
}
switch line[0] {
case ErrorReply:
return 0, ParseErrorReply(line)
case ArrayReply:
return parseArrayLen(line)
default:
return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
}
}
func (r *Reader) ReadScanReply() ([]string, uint64, error) {
n, err := r.ReadArrayLen()
if err != nil {
return nil, 0, err
}
if n != 2 {
return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
}
cursor, err := r.ReadUint()
if err != nil {
return nil, 0, err
}
n, err = r.ReadArrayLen()
if err != nil {
return nil, 0, err
}
keys := make([]string, n)
for i := int64(0); i < n; i++ {
key, err := r.ReadString()
if err != nil {
return nil, 0, err
}
keys[i] = key
}
return keys, cursor, err
}
func (r *Reader) ReadInt() (int64, error) {
b, err := r.readTmpBytesReply()
if err != nil {
return 0, err
}
return util.ParseInt(b, 10, 64)
}
func (r *Reader) ReadUint() (uint64, error) {
b, err := r.readTmpBytesReply()
if err != nil {
return 0, err
}
return util.ParseUint(b, 10, 64)
}
func (r *Reader) ReadFloatReply() (float64, error) {
b, err := r.readTmpBytesReply()
if err != nil {
return 0, err
}
return util.ParseFloat(b, 64)
}
func (r *Reader) readTmpBytesReply() ([]byte, error) {
line, err := r.ReadLine()
if err != nil {
return nil, err
}
switch line[0] {
case ErrorReply:
return nil, ParseErrorReply(line)
case StringReply:
return r._readTmpBytesReply(line)
case StatusReply:
return line[1:], nil
default:
return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
}
}
func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
if isNilReply(line) {
return nil, Nil
}
replyLen, err := strconv.Atoi(string(line[1:]))
if err != nil {
return nil, err
}
buf := r.buf(replyLen + 2)
_, err = io.ReadFull(r.rd, buf)
if err != nil {
return nil, err
}
return buf[:replyLen], nil
}
func (r *Reader) buf(n int) []byte {
if d := n - cap(r._buf); d > 0 {
r._buf = append(r._buf, make([]byte, d)...)
}
return r._buf[:n]
}
func isNilReply(b []byte) bool {
return len(b) == 3 &&
(b[0] == StringReply || b[0] == ArrayReply) &&
b[1] == '-' && b[2] == '1'
}
func ParseErrorReply(line []byte) error {
return RedisError(string(line[1:]))
}
func parseArrayLen(line []byte) (int64, error) {
if isNilReply(line) {
return 0, Nil
}
return util.ParseInt(line[1:], 10, 64)
}

166
vendor/github.com/go-redis/redis/internal/proto/scan.go generated vendored Normal file
View File

@@ -0,0 +1,166 @@
package proto
import (
"encoding"
"fmt"
"reflect"
"github.com/go-redis/redis/internal/util"
)
func Scan(b []byte, v interface{}) error {
switch v := v.(type) {
case nil:
return fmt.Errorf("redis: Scan(nil)")
case *string:
*v = util.BytesToString(b)
return nil
case *[]byte:
*v = b
return nil
case *int:
var err error
*v, err = util.Atoi(b)
return err
case *int8:
n, err := util.ParseInt(b, 10, 8)
if err != nil {
return err
}
*v = int8(n)
return nil
case *int16:
n, err := util.ParseInt(b, 10, 16)
if err != nil {
return err
}
*v = int16(n)
return nil
case *int32:
n, err := util.ParseInt(b, 10, 32)
if err != nil {
return err
}
*v = int32(n)
return nil
case *int64:
n, err := util.ParseInt(b, 10, 64)
if err != nil {
return err
}
*v = n
return nil
case *uint:
n, err := util.ParseUint(b, 10, 64)
if err != nil {
return err
}
*v = uint(n)
return nil
case *uint8:
n, err := util.ParseUint(b, 10, 8)
if err != nil {
return err
}
*v = uint8(n)
return nil
case *uint16:
n, err := util.ParseUint(b, 10, 16)
if err != nil {
return err
}
*v = uint16(n)
return nil
case *uint32:
n, err := util.ParseUint(b, 10, 32)
if err != nil {
return err
}
*v = uint32(n)
return nil
case *uint64:
n, err := util.ParseUint(b, 10, 64)
if err != nil {
return err
}
*v = n
return nil
case *float32:
n, err := util.ParseFloat(b, 32)
if err != nil {
return err
}
*v = float32(n)
return err
case *float64:
var err error
*v, err = util.ParseFloat(b, 64)
return err
case *bool:
*v = len(b) == 1 && b[0] == '1'
return nil
case encoding.BinaryUnmarshaler:
return v.UnmarshalBinary(b)
default:
return fmt.Errorf(
"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
}
}
func ScanSlice(data []string, slice interface{}) error {
v := reflect.ValueOf(slice)
if !v.IsValid() {
return fmt.Errorf("redis: ScanSlice(nil)")
}
if v.Kind() != reflect.Ptr {
return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
}
v = v.Elem()
if v.Kind() != reflect.Slice {
return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
}
next := makeSliceNextElemFunc(v)
for i, s := range data {
elem := next()
if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
return err
}
}
return nil
}
func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
elemType := v.Type().Elem()
if elemType.Kind() == reflect.Ptr {
elemType = elemType.Elem()
return func() reflect.Value {
if v.Len() < v.Cap() {
v.Set(v.Slice(0, v.Len()+1))
elem := v.Index(v.Len() - 1)
if elem.IsNil() {
elem.Set(reflect.New(elemType))
}
return elem.Elem()
}
elem := reflect.New(elemType)
v.Set(reflect.Append(v, elem))
return elem.Elem()
}
}
zero := reflect.Zero(elemType)
return func() reflect.Value {
if v.Len() < v.Cap() {
v.Set(v.Slice(0, v.Len()+1))
return v.Index(v.Len() - 1)
}
v.Set(reflect.Append(v, zero))
return v.Index(v.Len() - 1)
}
}

View File

@@ -0,0 +1,161 @@
package proto
import (
"bufio"
"encoding"
"fmt"
"io"
"strconv"
"time"
"github.com/go-redis/redis/internal/util"
)
type Writer struct {
wr *bufio.Writer
lenBuf []byte
numBuf []byte
}
func NewWriter(wr io.Writer) *Writer {
return &Writer{
wr: bufio.NewWriter(wr),
lenBuf: make([]byte, 64),
numBuf: make([]byte, 64),
}
}
func (w *Writer) WriteArgs(args []interface{}) error {
err := w.wr.WriteByte(ArrayReply)
if err != nil {
return err
}
err = w.writeLen(len(args))
if err != nil {
return err
}
for _, arg := range args {
err := w.writeArg(arg)
if err != nil {
return err
}
}
return nil
}
func (w *Writer) writeLen(n int) error {
w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
w.lenBuf = append(w.lenBuf, '\r', '\n')
_, err := w.wr.Write(w.lenBuf)
return err
}
func (w *Writer) writeArg(v interface{}) error {
switch v := v.(type) {
case nil:
return w.string("")
case string:
return w.string(v)
case []byte:
return w.bytes(v)
case int:
return w.int(int64(v))
case int8:
return w.int(int64(v))
case int16:
return w.int(int64(v))
case int32:
return w.int(int64(v))
case int64:
return w.int(v)
case uint:
return w.uint(uint64(v))
case uint8:
return w.uint(uint64(v))
case uint16:
return w.uint(uint64(v))
case uint32:
return w.uint(uint64(v))
case uint64:
return w.uint(v)
case float32:
return w.float(float64(v))
case float64:
return w.float(v)
case bool:
if v {
return w.int(1)
}
return w.int(0)
case time.Time:
return w.string(v.Format(time.RFC3339))
case encoding.BinaryMarshaler:
b, err := v.MarshalBinary()
if err != nil {
return err
}
return w.bytes(b)
default:
return fmt.Errorf(
"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
}
}
func (w *Writer) bytes(b []byte) error {
err := w.wr.WriteByte(StringReply)
if err != nil {
return err
}
err = w.writeLen(len(b))
if err != nil {
return err
}
_, err = w.wr.Write(b)
if err != nil {
return err
}
return w.crlf()
}
func (w *Writer) string(s string) error {
return w.bytes(util.StringToBytes(s))
}
func (w *Writer) uint(n uint64) error {
w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
return w.bytes(w.numBuf)
}
func (w *Writer) int(n int64) error {
w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
return w.bytes(w.numBuf)
}
func (w *Writer) float(f float64) error {
w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
return w.bytes(w.numBuf)
}
func (w *Writer) crlf() error {
err := w.wr.WriteByte('\r')
if err != nil {
return err
}
return w.wr.WriteByte('\n')
}
func (w *Writer) Reset(wr io.Writer) {
w.wr.Reset(wr)
}
func (w *Writer) Flush() error {
return w.wr.Flush()
}

29
vendor/github.com/go-redis/redis/internal/util.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
package internal
import "github.com/go-redis/redis/internal/util"
func ToLower(s string) string {
if isLower(s) {
return s
}
b := make([]byte, len(s))
for i := range b {
c := s[i]
if c >= 'A' && c <= 'Z' {
c += 'a' - 'A'
}
b[i] = c
}
return util.BytesToString(b)
}
func isLower(s string) bool {
for i := 0; i < len(s); i++ {
c := s[i]
if c >= 'A' && c <= 'Z' {
return false
}
}
return true
}

11
vendor/github.com/go-redis/redis/internal/util/safe.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
// +build appengine
package util
func BytesToString(b []byte) string {
return string(b)
}
func StringToBytes(s string) []byte {
return []byte(s)
}

View File

@@ -0,0 +1,19 @@
package util
import "strconv"
func Atoi(b []byte) (int, error) {
return strconv.Atoi(BytesToString(b))
}
func ParseInt(b []byte, base int, bitSize int) (int64, error) {
return strconv.ParseInt(BytesToString(b), base, bitSize)
}
func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
return strconv.ParseUint(BytesToString(b), base, bitSize)
}
func ParseFloat(b []byte, bitSize int) (float64, error) {
return strconv.ParseFloat(BytesToString(b), bitSize)
}

View File

@@ -0,0 +1,22 @@
// +build !appengine
package util
import (
"unsafe"
)
// BytesToString converts byte slice to string.
func BytesToString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// StringToBytes converts string to byte slice.
func StringToBytes(s string) []byte {
return *(*[]byte)(unsafe.Pointer(
&struct {
string
Cap int
}{s, len(s)},
))
}

75
vendor/github.com/go-redis/redis/iterator.go generated vendored Normal file
View File

@@ -0,0 +1,75 @@
package redis
import (
"sync"
)
// ScanIterator is used to incrementally iterate over a collection of elements.
// It's safe for concurrent use by multiple goroutines.
type ScanIterator struct {
mu sync.Mutex // protects Scanner and pos
cmd *ScanCmd
pos int
}
// Err returns the last iterator error, if any.
func (it *ScanIterator) Err() error {
it.mu.Lock()
err := it.cmd.Err()
it.mu.Unlock()
return err
}
// Next advances the cursor and returns true if more values can be read.
func (it *ScanIterator) Next() bool {
it.mu.Lock()
defer it.mu.Unlock()
// Instantly return on errors.
if it.cmd.Err() != nil {
return false
}
// Advance cursor, check if we are still within range.
if it.pos < len(it.cmd.page) {
it.pos++
return true
}
for {
// Return if there is no more data to fetch.
if it.cmd.cursor == 0 {
return false
}
// Fetch next page.
if it.cmd._args[0] == "scan" {
it.cmd._args[1] = it.cmd.cursor
} else {
it.cmd._args[2] = it.cmd.cursor
}
err := it.cmd.process(it.cmd)
if err != nil {
return false
}
it.pos = 1
// Redis can occasionally return empty page.
if len(it.cmd.page) > 0 {
return true
}
}
}
// Val returns the key/field at the current cursor position.
func (it *ScanIterator) Val() string {
var v string
it.mu.Lock()
if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
v = it.cmd.page[it.pos-1]
}
it.mu.Unlock()
return v
}

228
vendor/github.com/go-redis/redis/options.go generated vendored Normal file
View File

@@ -0,0 +1,228 @@
package redis
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"net/url"
"runtime"
"strconv"
"strings"
"time"
"github.com/go-redis/redis/internal/pool"
)
// Limiter is the interface of a rate limiter or a circuit breaker.
type Limiter interface {
// Allow returns nil if operation is allowed or an error otherwise.
// If operation is allowed client must ReportResult of the operation
// whether it is a success or a failure.
Allow() error
// ReportResult reports the result of previously allowed operation.
// nil indicates a success, non-nil error indicates a failure.
ReportResult(result error)
}
type Options struct {
// The network type, either tcp or unix.
// Default is tcp.
Network string
// host:port address.
Addr string
// Dialer creates new network connection and has priority over
// Network and Addr options.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
// Hook that is called when new connection is established.
OnConnect func(*Conn) error
// Optional password. Must match the password specified in the
// requirepass server configuration option.
Password string
// Database to be selected after connecting to the server.
DB int
// Maximum number of retries before giving up.
// Default is to not retry failed commands.
MaxRetries int
// Minimum backoff between each retry.
// Default is 8 milliseconds; -1 disables backoff.
MinRetryBackoff time.Duration
// Maximum backoff between each retry.
// Default is 512 milliseconds; -1 disables backoff.
MaxRetryBackoff time.Duration
// Dial timeout for establishing new connections.
// Default is 5 seconds.
DialTimeout time.Duration
// Timeout for socket reads. If reached, commands will fail
// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
// Default is 3 seconds.
ReadTimeout time.Duration
// Timeout for socket writes. If reached, commands will fail
// with a timeout instead of blocking.
// Default is ReadTimeout.
WriteTimeout time.Duration
// Maximum number of socket connections.
// Default is 10 connections per every CPU as reported by runtime.NumCPU.
PoolSize int
// Minimum number of idle connections which is useful when establishing
// new connection is slow.
MinIdleConns int
// Connection age at which client retires (closes) the connection.
// Default is to not close aged connections.
MaxConnAge time.Duration
// Amount of time client waits for connection if all connections
// are busy before returning an error.
// Default is ReadTimeout + 1 second.
PoolTimeout time.Duration
// Amount of time after which client closes idle connections.
// Should be less than server's timeout.
// Default is 5 minutes. -1 disables idle timeout check.
IdleTimeout time.Duration
// Frequency of idle checks made by idle connections reaper.
// Default is 1 minute. -1 disables idle connections reaper,
// but idle connections are still discarded by the client
// if IdleTimeout is set.
IdleCheckFrequency time.Duration
// Enables read only queries on slave nodes.
readOnly bool
// TLS Config to use. When set TLS will be negotiated.
TLSConfig *tls.Config
}
func (opt *Options) init() {
if opt.Network == "" {
opt.Network = "tcp"
}
if opt.Addr == "" {
opt.Addr = "localhost:6379"
}
if opt.Dialer == nil {
opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
netDialer := &net.Dialer{
Timeout: opt.DialTimeout,
KeepAlive: 5 * time.Minute,
}
if opt.TLSConfig == nil {
return netDialer.Dial(network, addr)
}
return tls.DialWithDialer(netDialer, opt.Network, opt.Addr, opt.TLSConfig)
}
}
if opt.PoolSize == 0 {
opt.PoolSize = 10 * runtime.NumCPU()
}
if opt.DialTimeout == 0 {
opt.DialTimeout = 5 * time.Second
}
switch opt.ReadTimeout {
case -1:
opt.ReadTimeout = 0
case 0:
opt.ReadTimeout = 3 * time.Second
}
switch opt.WriteTimeout {
case -1:
opt.WriteTimeout = 0
case 0:
opt.WriteTimeout = opt.ReadTimeout
}
if opt.PoolTimeout == 0 {
opt.PoolTimeout = opt.ReadTimeout + time.Second
}
if opt.IdleTimeout == 0 {
opt.IdleTimeout = 5 * time.Minute
}
if opt.IdleCheckFrequency == 0 {
opt.IdleCheckFrequency = time.Minute
}
switch opt.MinRetryBackoff {
case -1:
opt.MinRetryBackoff = 0
case 0:
opt.MinRetryBackoff = 8 * time.Millisecond
}
switch opt.MaxRetryBackoff {
case -1:
opt.MaxRetryBackoff = 0
case 0:
opt.MaxRetryBackoff = 512 * time.Millisecond
}
}
// ParseURL parses an URL into Options that can be used to connect to Redis.
func ParseURL(redisURL string) (*Options, error) {
o := &Options{Network: "tcp"}
u, err := url.Parse(redisURL)
if err != nil {
return nil, err
}
if u.Scheme != "redis" && u.Scheme != "rediss" {
return nil, errors.New("invalid redis URL scheme: " + u.Scheme)
}
if u.User != nil {
if p, ok := u.User.Password(); ok {
o.Password = p
}
}
if len(u.Query()) > 0 {
return nil, errors.New("no options supported")
}
h, p, err := net.SplitHostPort(u.Host)
if err != nil {
h = u.Host
}
if h == "" {
h = "localhost"
}
if p == "" {
p = "6379"
}
o.Addr = net.JoinHostPort(h, p)
f := strings.FieldsFunc(u.Path, func(r rune) bool {
return r == '/'
})
switch len(f) {
case 0:
o.DB = 0
case 1:
if o.DB, err = strconv.Atoi(f[0]); err != nil {
return nil, fmt.Errorf("invalid redis database number: %q", f[0])
}
default:
return nil, errors.New("invalid redis URL path: " + u.Path)
}
if u.Scheme == "rediss" {
o.TLSConfig = &tls.Config{ServerName: h}
}
return o, nil
}
func newConnPool(opt *Options) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: func(c context.Context) (net.Conn, error) {
return opt.Dialer(c, opt.Network, opt.Addr)
},
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
})
}

142
vendor/github.com/go-redis/redis/pipeline.go generated vendored Normal file
View File

@@ -0,0 +1,142 @@
package redis
import (
"context"
"sync"
"github.com/go-redis/redis/internal/pool"
)
type pipelineExecer func(context.Context, []Cmder) error
// Pipeliner is an mechanism to realise Redis Pipeline technique.
//
// Pipelining is a technique to extremely speed up processing by packing
// operations to batches, send them at once to Redis and read a replies in a
// singe step.
// See https://redis.io/topics/pipelining
//
// Pay attention, that Pipeline is not a transaction, so you can get unexpected
// results in case of big pipelines and small read/write timeouts.
// Redis client has retransmission logic in case of timeouts, pipeline
// can be retransmitted and commands can be executed more then once.
// To avoid this: it is good idea to use reasonable bigger read/write timeouts
// depends of your batch size and/or use TxPipeline.
type Pipeliner interface {
StatefulCmdable
Do(args ...interface{}) *Cmd
Process(cmd Cmder) error
Close() error
Discard() error
Exec() ([]Cmder, error)
ExecContext(ctx context.Context) ([]Cmder, error)
}
var _ Pipeliner = (*Pipeline)(nil)
// Pipeline implements pipelining as described in
// http://redis.io/topics/pipelining. It's safe for concurrent use
// by multiple goroutines.
type Pipeline struct {
cmdable
statefulCmdable
ctx context.Context
exec pipelineExecer
mu sync.Mutex
cmds []Cmder
closed bool
}
func (c *Pipeline) init() {
c.cmdable = c.Process
c.statefulCmdable = c.Process
}
func (c *Pipeline) Do(args ...interface{}) *Cmd {
cmd := NewCmd(args...)
_ = c.Process(cmd)
return cmd
}
// Process queues the cmd for later execution.
func (c *Pipeline) Process(cmd Cmder) error {
c.mu.Lock()
c.cmds = append(c.cmds, cmd)
c.mu.Unlock()
return nil
}
// Close closes the pipeline, releasing any open resources.
func (c *Pipeline) Close() error {
c.mu.Lock()
_ = c.discard()
c.closed = true
c.mu.Unlock()
return nil
}
// Discard resets the pipeline and discards queued commands.
func (c *Pipeline) Discard() error {
c.mu.Lock()
err := c.discard()
c.mu.Unlock()
return err
}
func (c *Pipeline) discard() error {
if c.closed {
return pool.ErrClosed
}
c.cmds = c.cmds[:0]
return nil
}
// Exec executes all previously queued commands using one
// client-server roundtrip.
//
// Exec always returns list of commands and error of the first failed
// command if any.
func (c *Pipeline) Exec() ([]Cmder, error) {
return c.ExecContext(c.ctx)
}
func (c *Pipeline) ExecContext(ctx context.Context) ([]Cmder, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return nil, pool.ErrClosed
}
if len(c.cmds) == 0 {
return nil, nil
}
cmds := c.cmds
c.cmds = nil
return cmds, c.exec(ctx, cmds)
}
func (c *Pipeline) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
if err := fn(c); err != nil {
return nil, err
}
cmds, err := c.Exec()
_ = c.Close()
return cmds, err
}
func (c *Pipeline) Pipeline() Pipeliner {
return c
}
func (c *Pipeline) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipelined(fn)
}
func (c *Pipeline) TxPipeline() Pipeliner {
return c
}

592
vendor/github.com/go-redis/redis/pubsub.go generated vendored Normal file
View File

@@ -0,0 +1,592 @@
package redis
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"time"
"github.com/go-redis/redis/internal"
"github.com/go-redis/redis/internal/pool"
"github.com/go-redis/redis/internal/proto"
)
const pingTimeout = 30 * time.Second
var errPingTimeout = errors.New("redis: ping timeout")
// PubSub implements Pub/Sub commands as described in
// http://redis.io/topics/pubsub. Message receiving is NOT safe
// for concurrent use by multiple goroutines.
//
// PubSub automatically reconnects to Redis Server and resubscribes
// to the channels in case of network errors.
type PubSub struct {
opt *Options
newConn func([]string) (*pool.Conn, error)
closeConn func(*pool.Conn) error
mu sync.Mutex
cn *pool.Conn
channels map[string]struct{}
patterns map[string]struct{}
closed bool
exit chan struct{}
cmd *Cmd
chOnce sync.Once
msgCh chan *Message
allCh chan interface{}
ping chan struct{}
}
func (c *PubSub) String() string {
channels := mapKeys(c.channels)
channels = append(channels, mapKeys(c.patterns)...)
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
}
func (c *PubSub) init() {
c.exit = make(chan struct{})
}
func (c *PubSub) connWithLock() (*pool.Conn, error) {
c.mu.Lock()
cn, err := c.conn(nil)
c.mu.Unlock()
return cn, err
}
func (c *PubSub) conn(newChannels []string) (*pool.Conn, error) {
if c.closed {
return nil, pool.ErrClosed
}
if c.cn != nil {
return c.cn, nil
}
channels := mapKeys(c.channels)
channels = append(channels, newChannels...)
cn, err := c.newConn(channels)
if err != nil {
return nil, err
}
if err := c.resubscribe(cn); err != nil {
_ = c.closeConn(cn)
return nil, err
}
c.cn = cn
return cn, nil
}
func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
})
}
func (c *PubSub) resubscribe(cn *pool.Conn) error {
var firstErr error
if len(c.channels) > 0 {
firstErr = c._subscribe(cn, "subscribe", mapKeys(c.channels))
}
if len(c.patterns) > 0 {
err := c._subscribe(cn, "psubscribe", mapKeys(c.patterns))
if err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func mapKeys(m map[string]struct{}) []string {
s := make([]string, len(m))
i := 0
for k := range m {
s[i] = k
i++
}
return s
}
func (c *PubSub) _subscribe(
cn *pool.Conn, redisCmd string, channels []string,
) error {
args := make([]interface{}, 0, 1+len(channels))
args = append(args, redisCmd)
for _, channel := range channels {
args = append(args, channel)
}
cmd := NewSliceCmd(args...)
return c.writeCmd(context.TODO(), cn, cmd)
}
func (c *PubSub) releaseConnWithLock(cn *pool.Conn, err error, allowTimeout bool) {
c.mu.Lock()
c.releaseConn(cn, err, allowTimeout)
c.mu.Unlock()
}
func (c *PubSub) releaseConn(cn *pool.Conn, err error, allowTimeout bool) {
if c.cn != cn {
return
}
if internal.IsBadConn(err, allowTimeout) {
c.reconnect(err)
}
}
func (c *PubSub) reconnect(reason error) {
_ = c.closeTheCn(reason)
_, _ = c.conn(nil)
}
func (c *PubSub) closeTheCn(reason error) error {
if c.cn == nil {
return nil
}
if !c.closed {
internal.Logger.Printf("redis: discarding bad PubSub connection: %s", reason)
}
err := c.closeConn(c.cn)
c.cn = nil
return err
}
func (c *PubSub) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return pool.ErrClosed
}
c.closed = true
close(c.exit)
return c.closeTheCn(pool.ErrClosed)
}
// Subscribe the client to the specified channels. It returns
// empty subscription if there are no channels.
func (c *PubSub) Subscribe(channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
err := c.subscribe("subscribe", channels...)
if c.channels == nil {
c.channels = make(map[string]struct{})
}
for _, s := range channels {
c.channels[s] = struct{}{}
}
return err
}
// PSubscribe the client to the given patterns. It returns
// empty subscription if there are no patterns.
func (c *PubSub) PSubscribe(patterns ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
err := c.subscribe("psubscribe", patterns...)
if c.patterns == nil {
c.patterns = make(map[string]struct{})
}
for _, s := range patterns {
c.patterns[s] = struct{}{}
}
return err
}
// Unsubscribe the client from the given channels, or from all of
// them if none is given.
func (c *PubSub) Unsubscribe(channels ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
for _, channel := range channels {
delete(c.channels, channel)
}
err := c.subscribe("unsubscribe", channels...)
return err
}
// PUnsubscribe the client from the given patterns, or from all of
// them if none is given.
func (c *PubSub) PUnsubscribe(patterns ...string) error {
c.mu.Lock()
defer c.mu.Unlock()
for _, pattern := range patterns {
delete(c.patterns, pattern)
}
err := c.subscribe("punsubscribe", patterns...)
return err
}
func (c *PubSub) subscribe(redisCmd string, channels ...string) error {
cn, err := c.conn(channels)
if err != nil {
return err
}
err = c._subscribe(cn, redisCmd, channels)
c.releaseConn(cn, err, false)
return err
}
func (c *PubSub) Ping(payload ...string) error {
args := []interface{}{"ping"}
if len(payload) == 1 {
args = append(args, payload[0])
}
cmd := NewCmd(args...)
cn, err := c.connWithLock()
if err != nil {
return err
}
err = c.writeCmd(context.TODO(), cn, cmd)
c.releaseConnWithLock(cn, err, false)
return err
}
// Subscription received after a successful subscription to channel.
type Subscription struct {
// Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
Kind string
// Channel name we have subscribed to.
Channel string
// Number of channels we are currently subscribed to.
Count int
}
func (m *Subscription) String() string {
return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
}
// Message received as result of a PUBLISH command issued by another client.
type Message struct {
Channel string
Pattern string
Payload string
}
func (m *Message) String() string {
return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
}
// Pong received as result of a PING command issued by another client.
type Pong struct {
Payload string
}
func (p *Pong) String() string {
if p.Payload != "" {
return fmt.Sprintf("Pong<%s>", p.Payload)
}
return "Pong"
}
func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
switch reply := reply.(type) {
case string:
return &Pong{
Payload: reply,
}, nil
case []interface{}:
switch kind := reply[0].(string); kind {
case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
return &Subscription{
Kind: kind,
Channel: reply[1].(string),
Count: int(reply[2].(int64)),
}, nil
case "message":
return &Message{
Channel: reply[1].(string),
Payload: reply[2].(string),
}, nil
case "pmessage":
return &Message{
Pattern: reply[1].(string),
Channel: reply[2].(string),
Payload: reply[3].(string),
}, nil
case "pong":
return &Pong{
Payload: reply[1].(string),
}, nil
default:
return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
}
default:
return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
}
}
// ReceiveTimeout acts like Receive but returns an error if message
// is not received in time. This is low-level API and in most cases
// Channel should be used instead.
func (c *PubSub) ReceiveTimeout(timeout time.Duration) (interface{}, error) {
if c.cmd == nil {
c.cmd = NewCmd()
}
cn, err := c.connWithLock()
if err != nil {
return nil, err
}
err = cn.WithReader(context.TODO(), timeout, func(rd *proto.Reader) error {
return c.cmd.readReply(rd)
})
c.releaseConnWithLock(cn, err, timeout > 0)
if err != nil {
return nil, err
}
return c.newMessage(c.cmd.Val())
}
// Receive returns a message as a Subscription, Message, Pong or error.
// See PubSub example for details. This is low-level API and in most cases
// Channel should be used instead.
func (c *PubSub) Receive() (interface{}, error) {
return c.ReceiveTimeout(0)
}
// ReceiveMessage returns a Message or error ignoring Subscription and Pong
// messages. This is low-level API and in most cases Channel should be used
// instead.
func (c *PubSub) ReceiveMessage() (*Message, error) {
for {
msg, err := c.Receive()
if err != nil {
return nil, err
}
switch msg := msg.(type) {
case *Subscription:
// Ignore.
case *Pong:
// Ignore.
case *Message:
return msg, nil
default:
err := fmt.Errorf("redis: unknown message: %T", msg)
return nil, err
}
}
}
// Channel returns a Go channel for concurrently receiving messages.
// The channel is closed together with the PubSub. If the Go channel
// is blocked full for 30 seconds the message is dropped.
// Receive* APIs can not be used after channel is created.
//
// go-redis periodically sends ping messages to test connection health
// and re-subscribes if ping can not not received for 30 seconds.
func (c *PubSub) Channel() <-chan *Message {
return c.ChannelSize(100)
}
// ChannelSize is like Channel, but creates a Go channel
// with specified buffer size.
func (c *PubSub) ChannelSize(size int) <-chan *Message {
c.chOnce.Do(func() {
c.initPing()
c.initMsgChan(size)
})
if c.msgCh == nil {
err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
panic(err)
}
if cap(c.msgCh) != size {
err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
panic(err)
}
return c.msgCh
}
// ChannelWithSubscriptions is like Channel, but message type can be either
// *Subscription or *Message. Subscription messages can be used to detect
// reconnections.
//
// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
func (c *PubSub) ChannelWithSubscriptions(size int) <-chan interface{} {
c.chOnce.Do(func() {
c.initPing()
c.initAllChan(size)
})
if c.allCh == nil {
err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
panic(err)
}
if cap(c.allCh) != size {
err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
panic(err)
}
return c.allCh
}
func (c *PubSub) initPing() {
c.ping = make(chan struct{}, 1)
go func() {
timer := time.NewTimer(pingTimeout)
timer.Stop()
healthy := true
for {
timer.Reset(pingTimeout)
select {
case <-c.ping:
healthy = true
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
pingErr := c.Ping()
if healthy {
healthy = false
} else {
if pingErr == nil {
pingErr = errPingTimeout
}
c.mu.Lock()
c.reconnect(pingErr)
c.mu.Unlock()
}
case <-c.exit:
return
}
}
}()
}
// initMsgChan must be in sync with initAllChan.
func (c *PubSub) initMsgChan(size int) {
c.msgCh = make(chan *Message, size)
go func() {
timer := time.NewTimer(pingTimeout)
timer.Stop()
var errCount int
for {
msg, err := c.Receive()
if err != nil {
if err == pool.ErrClosed {
close(c.msgCh)
return
}
if errCount > 0 {
time.Sleep(c.retryBackoff(errCount))
}
errCount++
continue
}
errCount = 0
// Any message is as good as a ping.
select {
case c.ping <- struct{}{}:
default:
}
switch msg := msg.(type) {
case *Subscription:
// Ignore.
case *Pong:
// Ignore.
case *Message:
timer.Reset(pingTimeout)
select {
case c.msgCh <- msg:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
internal.Logger.Printf(
"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
}
default:
internal.Logger.Printf("redis: unknown message type: %T", msg)
}
}
}()
}
// initAllChan must be in sync with initMsgChan.
func (c *PubSub) initAllChan(size int) {
c.allCh = make(chan interface{}, size)
go func() {
timer := time.NewTimer(pingTimeout)
timer.Stop()
var errCount int
for {
msg, err := c.Receive()
if err != nil {
if err == pool.ErrClosed {
close(c.allCh)
return
}
if errCount > 0 {
time.Sleep(c.retryBackoff(errCount))
}
errCount++
continue
}
errCount = 0
// Any message is as good as a ping.
select {
case c.ping <- struct{}{}:
default:
}
switch msg := msg.(type) {
case *Subscription:
c.sendMessage(msg, timer)
case *Pong:
// Ignore.
case *Message:
c.sendMessage(msg, timer)
default:
internal.Logger.Printf("redis: unknown message type: %T", msg)
}
}
}()
}
func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
timer.Reset(pingTimeout)
select {
case c.allCh <- msg:
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
internal.Logger.Printf(
"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
}
}
func (c *PubSub) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}

690
vendor/github.com/go-redis/redis/redis.go generated vendored Normal file
View File

@@ -0,0 +1,690 @@
package redis
import (
"context"
"fmt"
"log"
"time"
"github.com/go-redis/redis/internal"
"github.com/go-redis/redis/internal/pool"
"github.com/go-redis/redis/internal/proto"
)
// Nil reply Redis returns when key does not exist.
const Nil = proto.Nil
func SetLogger(logger *log.Logger) {
internal.Logger = logger
}
//------------------------------------------------------------------------------
type Hook interface {
BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
AfterProcess(ctx context.Context, cmd Cmder) error
BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
}
type hooks struct {
hooks []Hook
}
func (hs *hooks) AddHook(hook Hook) {
hs.hooks = append(hs.hooks, hook)
}
func (hs hooks) process(
ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
) error {
ctx, err := hs.beforeProcess(ctx, cmd)
if err != nil {
return err
}
cmdErr := fn(ctx, cmd)
_, err = hs.afterProcess(ctx, cmd)
if err != nil {
return err
}
return cmdErr
}
func (hs hooks) beforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) {
for _, h := range hs.hooks {
var err error
ctx, err = h.BeforeProcess(ctx, cmd)
if err != nil {
return nil, err
}
}
return ctx, nil
}
func (hs hooks) afterProcess(ctx context.Context, cmd Cmder) (context.Context, error) {
var firstErr error
for _, h := range hs.hooks {
err := h.AfterProcess(ctx, cmd)
if err != nil && firstErr == nil {
firstErr = err
}
}
return ctx, firstErr
}
func (hs hooks) processPipeline(
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
) error {
ctx, err := hs.beforeProcessPipeline(ctx, cmds)
if err != nil {
return err
}
cmdsErr := fn(ctx, cmds)
_, err = hs.afterProcessPipeline(ctx, cmds)
if err != nil {
return err
}
return cmdsErr
}
func (hs hooks) beforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) {
for _, h := range hs.hooks {
var err error
ctx, err = h.BeforeProcessPipeline(ctx, cmds)
if err != nil {
return nil, err
}
}
return ctx, nil
}
func (hs hooks) afterProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) {
var firstErr error
for _, h := range hs.hooks {
err := h.AfterProcessPipeline(ctx, cmds)
if err != nil && firstErr == nil {
firstErr = err
}
}
return ctx, firstErr
}
//------------------------------------------------------------------------------
type baseClient struct {
opt *Options
connPool pool.Pooler
limiter Limiter
onClose func() error // hook called when client is closed
}
func (c *baseClient) String() string {
return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
}
func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
cn, err := c.connPool.NewConn(ctx)
if err != nil {
return nil, err
}
err = c.initConn(ctx, cn)
if err != nil {
_ = c.connPool.CloseConn(cn)
return nil, err
}
return cn, nil
}
func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
if c.limiter != nil {
err := c.limiter.Allow()
if err != nil {
return nil, err
}
}
cn, err := c._getConn(ctx)
if err != nil {
if c.limiter != nil {
c.limiter.ReportResult(err)
}
return nil, err
}
return cn, nil
}
func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
cn, err := c.connPool.Get(ctx)
if err != nil {
return nil, err
}
err = c.initConn(ctx, cn)
if err != nil {
c.connPool.Remove(cn)
return nil, err
}
return cn, nil
}
func (c *baseClient) releaseConn(cn *pool.Conn, err error) {
if c.limiter != nil {
c.limiter.ReportResult(err)
}
if internal.IsBadConn(err, false) {
c.connPool.Remove(cn)
} else {
c.connPool.Put(cn)
}
}
func (c *baseClient) releaseConnStrict(cn *pool.Conn, err error) {
if c.limiter != nil {
c.limiter.ReportResult(err)
}
if err == nil || internal.IsRedisError(err) {
c.connPool.Put(cn)
} else {
c.connPool.Remove(cn)
}
}
func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
if cn.Inited {
return nil
}
cn.Inited = true
if c.opt.Password == "" &&
c.opt.DB == 0 &&
!c.opt.readOnly &&
c.opt.OnConnect == nil {
return nil
}
conn := newConn(ctx, c.opt, cn)
_, err := conn.Pipelined(func(pipe Pipeliner) error {
if c.opt.Password != "" {
pipe.Auth(c.opt.Password)
}
if c.opt.DB > 0 {
pipe.Select(c.opt.DB)
}
if c.opt.readOnly {
pipe.ReadOnly()
}
return nil
})
if err != nil {
return err
}
if c.opt.OnConnect != nil {
return c.opt.OnConnect(conn)
}
return nil
}
func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
if attempt > 0 {
time.Sleep(c.retryBackoff(attempt))
}
cn, err := c.getConn(ctx)
if err != nil {
cmd.setErr(err)
if internal.IsRetryableError(err, true) {
continue
}
return err
}
err = cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmd)
})
if err != nil {
c.releaseConn(cn, err)
cmd.setErr(err)
if internal.IsRetryableError(err, true) {
continue
}
return err
}
err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
c.releaseConn(cn, err)
if err != nil && internal.IsRetryableError(err, cmd.readTimeout() == nil) {
continue
}
return err
}
return cmd.Err()
}
func (c *baseClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
if timeout := cmd.readTimeout(); timeout != nil {
t := *timeout
if t == 0 {
return 0
}
return t + 10*time.Second
}
return c.opt.ReadTimeout
}
// Close closes the client, releasing any open resources.
//
// It is rare to Close a Client, as the Client is meant to be
// long-lived and shared between many goroutines.
func (c *baseClient) Close() error {
var firstErr error
if c.onClose != nil {
if err := c.onClose(); err != nil {
firstErr = err
}
}
if err := c.connPool.Close(); err != nil && firstErr == nil {
firstErr = err
}
return firstErr
}
func (c *baseClient) getAddr() string {
return c.opt.Addr
}
func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
}
func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
}
type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
func (c *baseClient) generalProcessPipeline(
ctx context.Context, cmds []Cmder, p pipelineProcessor,
) error {
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
if attempt > 0 {
time.Sleep(c.retryBackoff(attempt))
}
cn, err := c.getConn(ctx)
if err != nil {
setCmdsErr(cmds, err)
return err
}
canRetry, err := p(ctx, cn, cmds)
c.releaseConnStrict(cn, err)
if !canRetry || !internal.IsRetryableError(err, true) {
break
}
}
return cmdsFirstErr(cmds)
}
func (c *baseClient) pipelineProcessCmds(
ctx context.Context, cn *pool.Conn, cmds []Cmder,
) (bool, error) {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return writeCmd(wr, cmds...)
})
if err != nil {
setCmdsErr(cmds, err)
return true, err
}
err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
return pipelineReadCmds(rd, cmds)
})
return true, err
}
func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
for _, cmd := range cmds {
err := cmd.readReply(rd)
if err != nil && !internal.IsRedisError(err) {
return err
}
}
return nil
}
func (c *baseClient) txPipelineProcessCmds(
ctx context.Context, cn *pool.Conn, cmds []Cmder,
) (bool, error) {
err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
return txPipelineWriteMulti(wr, cmds)
})
if err != nil {
setCmdsErr(cmds, err)
return true, err
}
err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
err := txPipelineReadQueued(rd, cmds)
if err != nil {
setCmdsErr(cmds, err)
return err
}
return pipelineReadCmds(rd, cmds)
})
return false, err
}
func txPipelineWriteMulti(wr *proto.Writer, cmds []Cmder) error {
multiExec := make([]Cmder, 0, len(cmds)+2)
multiExec = append(multiExec, NewStatusCmd("MULTI"))
multiExec = append(multiExec, cmds...)
multiExec = append(multiExec, NewSliceCmd("EXEC"))
return writeCmd(wr, multiExec...)
}
func txPipelineReadQueued(rd *proto.Reader, cmds []Cmder) error {
// Parse queued replies.
var statusCmd StatusCmd
err := statusCmd.readReply(rd)
if err != nil {
return err
}
for range cmds {
err = statusCmd.readReply(rd)
if err != nil && !internal.IsRedisError(err) {
return err
}
}
// Parse number of replies.
line, err := rd.ReadLine()
if err != nil {
if err == Nil {
err = TxFailedErr
}
return err
}
switch line[0] {
case proto.ErrorReply:
return proto.ParseErrorReply(line)
case proto.ArrayReply:
// ok
default:
err := fmt.Errorf("redis: expected '*', but got line %q", line)
return err
}
return nil
}
//------------------------------------------------------------------------------
type client struct {
baseClient
cmdable
hooks
}
// Client is a Redis client representing a pool of zero or more
// underlying connections. It's safe for concurrent use by multiple
// goroutines.
type Client struct {
*client
ctx context.Context
}
// NewClient returns a client to the Redis Server specified by Options.
func NewClient(opt *Options) *Client {
opt.init()
c := Client{
client: &client{
baseClient: baseClient{
opt: opt,
connPool: newConnPool(opt),
},
},
ctx: context.Background(),
}
c.init()
return &c
}
func (c *Client) init() {
c.cmdable = c.Process
}
func (c *Client) Context() context.Context {
return c.ctx
}
func (c *Client) WithContext(ctx context.Context) *Client {
if ctx == nil {
panic("nil context")
}
clone := *c
clone.ctx = ctx
clone.init()
return &clone
}
// Do creates a Cmd from the args and processes the cmd.
func (c *Client) Do(args ...interface{}) *Cmd {
return c.DoContext(c.ctx, args...)
}
func (c *Client) DoContext(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(args...)
_ = c.ProcessContext(ctx, cmd)
return cmd
}
func (c *Client) Process(cmd Cmder) error {
return c.ProcessContext(c.ctx, cmd)
}
func (c *Client) ProcessContext(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.baseClient.process)
}
func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
}
func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, c.baseClient.processTxPipeline)
}
// Options returns read-only Options that were used to create the client.
func (c *Client) Options() *Options {
return c.opt
}
func (c *Client) SetLimiter(l Limiter) *Client {
c.limiter = l
return c
}
type PoolStats pool.Stats
// PoolStats returns connection pool stats.
func (c *Client) PoolStats() *PoolStats {
stats := c.connPool.Stats()
return (*PoolStats)(stats)
}
func (c *Client) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipeline().Pipelined(fn)
}
func (c *Client) Pipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processPipeline,
}
pipe.init()
return &pipe
}
func (c *Client) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *Client) TxPipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processTxPipeline,
}
pipe.init()
return &pipe
}
func (c *Client) pubSub() *PubSub {
pubsub := &PubSub{
opt: c.opt,
newConn: func(channels []string) (*pool.Conn, error) {
return c.newConn(context.TODO())
},
closeConn: c.connPool.CloseConn,
}
pubsub.init()
return pubsub
}
// Subscribe subscribes the client to the specified channels.
// Channels can be omitted to create empty subscription.
// Note that this method does not wait on a response from Redis, so the
// subscription may not be active immediately. To force the connection to wait,
// you may call the Receive() method on the returned *PubSub like so:
//
// sub := client.Subscribe(queryResp)
// iface, err := sub.Receive()
// if err != nil {
// // handle error
// }
//
// // Should be *Subscription, but others are possible if other actions have been
// // taken on sub since it was created.
// switch iface.(type) {
// case *Subscription:
// // subscribe succeeded
// case *Message:
// // received first message
// case *Pong:
// // pong received
// default:
// // handle error
// }
//
// ch := sub.Channel()
func (c *Client) Subscribe(channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.Subscribe(channels...)
}
return pubsub
}
// PSubscribe subscribes the client to the given patterns.
// Patterns can be omitted to create empty subscription.
func (c *Client) PSubscribe(channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.PSubscribe(channels...)
}
return pubsub
}
//------------------------------------------------------------------------------
type conn struct {
baseClient
cmdable
statefulCmdable
}
// Conn is like Client, but its pool contains single connection.
type Conn struct {
*conn
ctx context.Context
}
func newConn(ctx context.Context, opt *Options, cn *pool.Conn) *Conn {
c := Conn{
conn: &conn{
baseClient: baseClient{
opt: opt,
connPool: pool.NewSingleConnPool(cn),
},
},
ctx: ctx,
}
c.cmdable = c.Process
c.statefulCmdable = c.Process
return &c
}
func (c *Conn) Process(cmd Cmder) error {
return c.ProcessContext(c.ctx, cmd)
}
func (c *Conn) ProcessContext(ctx context.Context, cmd Cmder) error {
return c.baseClient.process(ctx, cmd)
}
func (c *Conn) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipeline().Pipelined(fn)
}
func (c *Conn) Pipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processPipeline,
}
pipe.init()
return &pipe
}
func (c *Conn) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(fn)
}
// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
func (c *Conn) TxPipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processTxPipeline,
}
pipe.init()
return &pipe
}

148
vendor/github.com/go-redis/redis/result.go generated vendored Normal file
View File

@@ -0,0 +1,148 @@
package redis
import "time"
// NewCmdResult returns a Cmd initialised with val and err for testing
func NewCmdResult(val interface{}, err error) *Cmd {
var cmd Cmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewSliceResult returns a SliceCmd initialised with val and err for testing
func NewSliceResult(val []interface{}, err error) *SliceCmd {
var cmd SliceCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewStatusResult returns a StatusCmd initialised with val and err for testing
func NewStatusResult(val string, err error) *StatusCmd {
var cmd StatusCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewIntResult returns an IntCmd initialised with val and err for testing
func NewIntResult(val int64, err error) *IntCmd {
var cmd IntCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewDurationResult returns a DurationCmd initialised with val and err for testing
func NewDurationResult(val time.Duration, err error) *DurationCmd {
var cmd DurationCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewBoolResult returns a BoolCmd initialised with val and err for testing
func NewBoolResult(val bool, err error) *BoolCmd {
var cmd BoolCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewStringResult returns a StringCmd initialised with val and err for testing
func NewStringResult(val string, err error) *StringCmd {
var cmd StringCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewFloatResult returns a FloatCmd initialised with val and err for testing
func NewFloatResult(val float64, err error) *FloatCmd {
var cmd FloatCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing
func NewStringSliceResult(val []string, err error) *StringSliceCmd {
var cmd StringSliceCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing
func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
var cmd BoolSliceCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing
func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
var cmd StringStringMapCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing
func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
var cmd StringIntMapCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing
func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
var cmd ZSliceCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing
func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
var cmd ZWithKeyCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewScanCmdResult returns a ScanCmd initialised with val and err for testing
func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
var cmd ScanCmd
cmd.page = keys
cmd.cursor = cursor
cmd.setErr(err)
return &cmd
}
// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing
func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
var cmd ClusterSlotsCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}
// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing
func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
var cmd GeoLocationCmd
cmd.locations = val
cmd.setErr(err)
return &cmd
}
// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing
func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
var cmd CommandsInfoCmd
cmd.val = val
cmd.setErr(err)
return &cmd
}

725
vendor/github.com/go-redis/redis/ring.go generated vendored Normal file
View File

@@ -0,0 +1,725 @@
package redis
import (
"context"
"errors"
"fmt"
"math/rand"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/go-redis/redis/internal"
"github.com/go-redis/redis/internal/consistenthash"
"github.com/go-redis/redis/internal/hashtag"
"github.com/go-redis/redis/internal/pool"
)
// Hash is type of hash function used in consistent hash.
type Hash consistenthash.Hash
var errRingShardsDown = errors.New("redis: all ring shards are down")
// RingOptions are used to configure a ring client and should be
// passed to NewRing.
type RingOptions struct {
// Map of name => host:port addresses of ring shards.
Addrs map[string]string
// Map of name => password of ring shards, to allow different shards to have
// different passwords. It will be ignored if the Password field is set.
Passwords map[string]string
// Frequency of PING commands sent to check shards availability.
// Shard is considered down after 3 subsequent failed checks.
HeartbeatFrequency time.Duration
// Hash function used in consistent hash.
// Default is crc32.ChecksumIEEE.
Hash Hash
// Number of replicas in consistent hash.
// Default is 100 replicas.
//
// Higher number of replicas will provide less deviation, that is keys will be
// distributed to nodes more evenly.
//
// Following is deviation for common nreplicas:
// --------------------------------------------------------
// | nreplicas | standard error | 99% confidence interval |
// | 10 | 0.3152 | (0.37, 1.98) |
// | 100 | 0.0997 | (0.76, 1.28) |
// | 1000 | 0.0316 | (0.92, 1.09) |
// --------------------------------------------------------
//
// See https://arxiv.org/abs/1406.2294 for reference
HashReplicas int
// Following options are copied from Options struct.
OnConnect func(*Conn) error
DB int
Password string
MaxRetries int
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
}
func (opt *RingOptions) init() {
if opt.HeartbeatFrequency == 0 {
opt.HeartbeatFrequency = 500 * time.Millisecond
}
if opt.HashReplicas == 0 {
opt.HashReplicas = 100
}
switch opt.MinRetryBackoff {
case -1:
opt.MinRetryBackoff = 0
case 0:
opt.MinRetryBackoff = 8 * time.Millisecond
}
switch opt.MaxRetryBackoff {
case -1:
opt.MaxRetryBackoff = 0
case 0:
opt.MaxRetryBackoff = 512 * time.Millisecond
}
}
func (opt *RingOptions) clientOptions(shard string) *Options {
return &Options{
OnConnect: opt.OnConnect,
DB: opt.DB,
Password: opt.getPassword(shard),
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
}
}
func (opt *RingOptions) getPassword(shard string) string {
if opt.Password == "" {
return opt.Passwords[shard]
}
return opt.Password
}
//------------------------------------------------------------------------------
type ringShard struct {
Client *Client
down int32
}
func (shard *ringShard) String() string {
var state string
if shard.IsUp() {
state = "up"
} else {
state = "down"
}
return fmt.Sprintf("%s is %s", shard.Client, state)
}
func (shard *ringShard) IsDown() bool {
const threshold = 3
return atomic.LoadInt32(&shard.down) >= threshold
}
func (shard *ringShard) IsUp() bool {
return !shard.IsDown()
}
// Vote votes to set shard state and returns true if state was changed.
func (shard *ringShard) Vote(up bool) bool {
if up {
changed := shard.IsDown()
atomic.StoreInt32(&shard.down, 0)
return changed
}
if shard.IsDown() {
return false
}
atomic.AddInt32(&shard.down, 1)
return shard.IsDown()
}
//------------------------------------------------------------------------------
type ringShards struct {
opt *RingOptions
mu sync.RWMutex
hash *consistenthash.Map
shards map[string]*ringShard // read only
list []*ringShard // read only
len int
closed bool
}
func newRingShards(opt *RingOptions) *ringShards {
return &ringShards{
opt: opt,
hash: newConsistentHash(opt),
shards: make(map[string]*ringShard),
}
}
func (c *ringShards) Add(name string, cl *Client) {
shard := &ringShard{Client: cl}
c.hash.Add(name)
c.shards[name] = shard
c.list = append(c.list, shard)
}
func (c *ringShards) List() []*ringShard {
c.mu.RLock()
list := c.list
c.mu.RUnlock()
return list
}
func (c *ringShards) Hash(key string) string {
c.mu.RLock()
hash := c.hash.Get(key)
c.mu.RUnlock()
return hash
}
func (c *ringShards) GetByKey(key string) (*ringShard, error) {
key = hashtag.Key(key)
c.mu.RLock()
if c.closed {
c.mu.RUnlock()
return nil, pool.ErrClosed
}
hash := c.hash.Get(key)
if hash == "" {
c.mu.RUnlock()
return nil, errRingShardsDown
}
shard := c.shards[hash]
c.mu.RUnlock()
return shard, nil
}
func (c *ringShards) GetByHash(name string) (*ringShard, error) {
if name == "" {
return c.Random()
}
c.mu.RLock()
shard := c.shards[name]
c.mu.RUnlock()
return shard, nil
}
func (c *ringShards) Random() (*ringShard, error) {
return c.GetByKey(strconv.Itoa(rand.Int()))
}
// heartbeat monitors state of each shard in the ring.
func (c *ringShards) Heartbeat(frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
for range ticker.C {
var rebalance bool
c.mu.RLock()
if c.closed {
c.mu.RUnlock()
break
}
shards := c.list
c.mu.RUnlock()
for _, shard := range shards {
err := shard.Client.Ping().Err()
if shard.Vote(err == nil || err == pool.ErrPoolTimeout) {
internal.Logger.Printf("ring shard state changed: %s", shard)
rebalance = true
}
}
if rebalance {
c.rebalance()
}
}
}
// rebalance removes dead shards from the Ring.
func (c *ringShards) rebalance() {
c.mu.RLock()
shards := c.shards
c.mu.RUnlock()
hash := newConsistentHash(c.opt)
var shardsNum int
for name, shard := range shards {
if shard.IsUp() {
hash.Add(name)
shardsNum++
}
}
c.mu.Lock()
c.hash = hash
c.len = shardsNum
c.mu.Unlock()
}
func (c *ringShards) Len() int {
c.mu.RLock()
l := c.len
c.mu.RUnlock()
return l
}
func (c *ringShards) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return nil
}
c.closed = true
var firstErr error
for _, shard := range c.shards {
if err := shard.Client.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
c.hash = nil
c.shards = nil
c.list = nil
return firstErr
}
//------------------------------------------------------------------------------
type ring struct {
cmdable
hooks
opt *RingOptions
shards *ringShards
cmdsInfoCache *cmdsInfoCache //nolint:structcheck
}
// Ring is a Redis client that uses consistent hashing to distribute
// keys across multiple Redis servers (shards). It's safe for
// concurrent use by multiple goroutines.
//
// Ring monitors the state of each shard and removes dead shards from
// the ring. When a shard comes online it is added back to the ring. This
// gives you maximum availability and partition tolerance, but no
// consistency between different shards or even clients. Each client
// uses shards that are available to the client and does not do any
// coordination when shard state is changed.
//
// Ring should be used when you need multiple Redis servers for caching
// and can tolerate losing data when one of the servers dies.
// Otherwise you should use Redis Cluster.
type Ring struct {
*ring
ctx context.Context
}
func NewRing(opt *RingOptions) *Ring {
opt.init()
ring := Ring{
ring: &ring{
opt: opt,
shards: newRingShards(opt),
},
ctx: context.Background(),
}
ring.init()
ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
for name, addr := range opt.Addrs {
clopt := opt.clientOptions(name)
clopt.Addr = addr
ring.shards.Add(name, NewClient(clopt))
}
go ring.shards.Heartbeat(opt.HeartbeatFrequency)
return &ring
}
func (c *Ring) init() {
c.cmdable = c.Process
}
func (c *Ring) Context() context.Context {
return c.ctx
}
func (c *Ring) WithContext(ctx context.Context) *Ring {
if ctx == nil {
panic("nil context")
}
clone := *c
clone.ctx = ctx
clone.init()
return &clone
}
// Do creates a Cmd from the args and processes the cmd.
func (c *Ring) Do(args ...interface{}) *Cmd {
return c.DoContext(c.ctx, args...)
}
func (c *Ring) DoContext(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(args...)
_ = c.ProcessContext(ctx, cmd)
return cmd
}
func (c *Ring) Process(cmd Cmder) error {
return c.ProcessContext(c.ctx, cmd)
}
func (c *Ring) ProcessContext(ctx context.Context, cmd Cmder) error {
return c.hooks.process(ctx, cmd, c.process)
}
// Options returns read-only Options that were used to create the client.
func (c *Ring) Options() *RingOptions {
return c.opt
}
func (c *Ring) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
// PoolStats returns accumulated connection pool stats.
func (c *Ring) PoolStats() *PoolStats {
shards := c.shards.List()
var acc PoolStats
for _, shard := range shards {
s := shard.Client.connPool.Stats()
acc.Hits += s.Hits
acc.Misses += s.Misses
acc.Timeouts += s.Timeouts
acc.TotalConns += s.TotalConns
acc.IdleConns += s.IdleConns
}
return &acc
}
// Len returns the current number of shards in the ring.
func (c *Ring) Len() int {
return c.shards.Len()
}
// Subscribe subscribes the client to the specified channels.
func (c *Ring) Subscribe(channels ...string) *PubSub {
if len(channels) == 0 {
panic("at least one channel is required")
}
shard, err := c.shards.GetByKey(channels[0])
if err != nil {
// TODO: return PubSub with sticky error
panic(err)
}
return shard.Client.Subscribe(channels...)
}
// PSubscribe subscribes the client to the given patterns.
func (c *Ring) PSubscribe(channels ...string) *PubSub {
if len(channels) == 0 {
panic("at least one channel is required")
}
shard, err := c.shards.GetByKey(channels[0])
if err != nil {
// TODO: return PubSub with sticky error
panic(err)
}
return shard.Client.PSubscribe(channels...)
}
// ForEachShard concurrently calls the fn on each live shard in the ring.
// It returns the first error if any.
func (c *Ring) ForEachShard(fn func(client *Client) error) error {
shards := c.shards.List()
var wg sync.WaitGroup
errCh := make(chan error, 1)
for _, shard := range shards {
if shard.IsDown() {
continue
}
wg.Add(1)
go func(shard *ringShard) {
defer wg.Done()
err := fn(shard.Client)
if err != nil {
select {
case errCh <- err:
default:
}
}
}(shard)
}
wg.Wait()
select {
case err := <-errCh:
return err
default:
return nil
}
}
func (c *Ring) cmdsInfo() (map[string]*CommandInfo, error) {
shards := c.shards.List()
firstErr := errRingShardsDown
for _, shard := range shards {
cmdsInfo, err := shard.Client.Command().Result()
if err == nil {
return cmdsInfo, nil
}
if firstErr == nil {
firstErr = err
}
}
return nil, firstErr
}
func (c *Ring) cmdInfo(name string) *CommandInfo {
cmdsInfo, err := c.cmdsInfoCache.Get()
if err != nil {
return nil
}
info := cmdsInfo[name]
if info == nil {
internal.Logger.Printf("info for cmd=%s not found", name)
}
return info
}
func (c *Ring) cmdShard(cmd Cmder) (*ringShard, error) {
cmdInfo := c.cmdInfo(cmd.Name())
pos := cmdFirstKeyPos(cmd, cmdInfo)
if pos == 0 {
return c.shards.Random()
}
firstKey := cmd.stringArg(pos)
return c.shards.GetByKey(firstKey)
}
func (c *Ring) process(ctx context.Context, cmd Cmder) error {
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
if attempt > 0 {
time.Sleep(c.retryBackoff(attempt))
}
shard, err := c.cmdShard(cmd)
if err != nil {
cmd.setErr(err)
return err
}
err = shard.Client.ProcessContext(ctx, cmd)
if err == nil {
return nil
}
if !internal.IsRetryableError(err, cmd.readTimeout() == nil) {
return err
}
}
return cmd.Err()
}
func (c *Ring) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipeline().Pipelined(fn)
}
func (c *Ring) Pipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processPipeline,
}
pipe.init()
return &pipe
}
func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
return c.generalProcessPipeline(ctx, cmds, false)
})
}
func (c *Ring) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.TxPipeline().Pipelined(fn)
}
func (c *Ring) TxPipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processTxPipeline,
}
pipe.init()
return &pipe
}
func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
return c.generalProcessPipeline(ctx, cmds, true)
})
}
func (c *Ring) generalProcessPipeline(
ctx context.Context, cmds []Cmder, tx bool,
) error {
cmdsMap := make(map[string][]Cmder)
for _, cmd := range cmds {
cmdInfo := c.cmdInfo(cmd.Name())
hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
if hash != "" {
hash = c.shards.Hash(hashtag.Key(hash))
}
cmdsMap[hash] = append(cmdsMap[hash], cmd)
}
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
if attempt > 0 {
time.Sleep(c.retryBackoff(attempt))
}
var mu sync.Mutex
var failedCmdsMap map[string][]Cmder
var wg sync.WaitGroup
for hash, cmds := range cmdsMap {
wg.Add(1)
go func(hash string, cmds []Cmder) {
defer wg.Done()
shard, err := c.shards.GetByHash(hash)
if err != nil {
setCmdsErr(cmds, err)
return
}
cn, err := shard.Client.getConn(ctx)
if err != nil {
setCmdsErr(cmds, err)
return
}
var canRetry bool
if tx {
canRetry, err = shard.Client.txPipelineProcessCmds(ctx, cn, cmds)
} else {
canRetry, err = shard.Client.pipelineProcessCmds(ctx, cn, cmds)
}
shard.Client.releaseConnStrict(cn, err)
if canRetry && internal.IsRetryableError(err, true) {
mu.Lock()
if failedCmdsMap == nil {
failedCmdsMap = make(map[string][]Cmder)
}
failedCmdsMap[hash] = cmds
mu.Unlock()
}
}(hash, cmds)
}
wg.Wait()
if len(failedCmdsMap) == 0 {
break
}
cmdsMap = failedCmdsMap
}
return cmdsFirstErr(cmds)
}
// Close closes the ring client, releasing any open resources.
//
// It is rare to Close a Ring, as the Ring is meant to be long-lived
// and shared between many goroutines.
func (c *Ring) Close() error {
return c.shards.Close()
}
func (c *Ring) Watch(fn func(*Tx) error, keys ...string) error {
if len(keys) == 0 {
return fmt.Errorf("redis: Watch requires at least one key")
}
var shards []*ringShard
for _, key := range keys {
if key != "" {
shard, err := c.shards.GetByKey(hashtag.Key(key))
if err != nil {
return err
}
shards = append(shards, shard)
}
}
if len(shards) == 0 {
return fmt.Errorf("redis: Watch requires at least one shard")
}
if len(shards) > 1 {
for _, shard := range shards[1:] {
if shard.Client != shards[0].Client {
err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
return err
}
}
}
return shards[0].Client.Watch(fn, keys...)
}
func newConsistentHash(opt *RingOptions) *consistenthash.Map {
return consistenthash.New(opt.HashReplicas, consistenthash.Hash(opt.Hash))
}

62
vendor/github.com/go-redis/redis/script.go generated vendored Normal file
View File

@@ -0,0 +1,62 @@
package redis
import (
"crypto/sha1"
"encoding/hex"
"io"
"strings"
)
type scripter interface {
Eval(script string, keys []string, args ...interface{}) *Cmd
EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
ScriptExists(hashes ...string) *BoolSliceCmd
ScriptLoad(script string) *StringCmd
}
var _ scripter = (*Client)(nil)
var _ scripter = (*Ring)(nil)
var _ scripter = (*ClusterClient)(nil)
type Script struct {
src, hash string
}
func NewScript(src string) *Script {
h := sha1.New()
_, _ = io.WriteString(h, src)
return &Script{
src: src,
hash: hex.EncodeToString(h.Sum(nil)),
}
}
func (s *Script) Hash() string {
return s.hash
}
func (s *Script) Load(c scripter) *StringCmd {
return c.ScriptLoad(s.src)
}
func (s *Script) Exists(c scripter) *BoolSliceCmd {
return c.ScriptExists(s.hash)
}
func (s *Script) Eval(c scripter, keys []string, args ...interface{}) *Cmd {
return c.Eval(s.src, keys, args...)
}
func (s *Script) EvalSha(c scripter, keys []string, args ...interface{}) *Cmd {
return c.EvalSha(s.hash, keys, args...)
}
// Run optimistically uses EVALSHA to run the script. If script does not exist
// it is retried using EVAL.
func (s *Script) Run(c scripter, keys []string, args ...interface{}) *Cmd {
r := s.EvalSha(c, keys, args...)
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
return s.Eval(c, keys, args...)
}
return r
}

496
vendor/github.com/go-redis/redis/sentinel.go generated vendored Normal file
View File

@@ -0,0 +1,496 @@
package redis
import (
"context"
"crypto/tls"
"errors"
"net"
"strings"
"sync"
"time"
"github.com/go-redis/redis/internal"
"github.com/go-redis/redis/internal/pool"
)
//------------------------------------------------------------------------------
// FailoverOptions are used to configure a failover client and should
// be passed to NewFailoverClient.
type FailoverOptions struct {
// The master name.
MasterName string
// A seed list of host:port addresses of sentinel nodes.
SentinelAddrs []string
SentinelPassword string
// Following options are copied from Options struct.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(*Conn) error
Password string
DB int
MaxRetries int
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
TLSConfig *tls.Config
}
func (opt *FailoverOptions) options() *Options {
return &Options{
Addr: "FailoverClient",
Dialer: opt.Dialer,
OnConnect: opt.OnConnect,
DB: opt.DB,
Password: opt.Password,
MaxRetries: opt.MaxRetries,
DialTimeout: opt.DialTimeout,
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: opt.IdleCheckFrequency,
TLSConfig: opt.TLSConfig,
}
}
// NewFailoverClient returns a Redis client that uses Redis Sentinel
// for automatic failover. It's safe for concurrent use by multiple
// goroutines.
func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
opt := failoverOpt.options()
opt.init()
failover := &sentinelFailover{
masterName: failoverOpt.MasterName,
sentinelAddrs: failoverOpt.SentinelAddrs,
password: failoverOpt.SentinelPassword,
opt: opt,
}
c := Client{
client: &client{
baseClient: baseClient{
opt: opt,
connPool: failover.Pool(),
onClose: failover.Close,
},
},
ctx: context.Background(),
}
c.init()
return &c
}
//------------------------------------------------------------------------------
type SentinelClient struct {
*baseClient
ctx context.Context
}
func NewSentinelClient(opt *Options) *SentinelClient {
opt.init()
c := &SentinelClient{
baseClient: &baseClient{
opt: opt,
connPool: newConnPool(opt),
},
ctx: context.Background(),
}
return c
}
func (c *SentinelClient) Context() context.Context {
return c.ctx
}
func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
if ctx == nil {
panic("nil context")
}
clone := *c
clone.ctx = ctx
return &clone
}
func (c *SentinelClient) Process(cmd Cmder) error {
return c.ProcessContext(c.ctx, cmd)
}
func (c *SentinelClient) ProcessContext(ctx context.Context, cmd Cmder) error {
return c.baseClient.process(ctx, cmd)
}
func (c *SentinelClient) pubSub() *PubSub {
pubsub := &PubSub{
opt: c.opt,
newConn: func(channels []string) (*pool.Conn, error) {
return c.newConn(context.TODO())
},
closeConn: c.connPool.CloseConn,
}
pubsub.init()
return pubsub
}
// Ping is used to test if a connection is still alive, or to
// measure latency.
func (c *SentinelClient) Ping() *StringCmd {
cmd := NewStringCmd("ping")
_ = c.Process(cmd)
return cmd
}
// Subscribe subscribes the client to the specified channels.
// Channels can be omitted to create empty subscription.
func (c *SentinelClient) Subscribe(channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.Subscribe(channels...)
}
return pubsub
}
// PSubscribe subscribes the client to the given patterns.
// Patterns can be omitted to create empty subscription.
func (c *SentinelClient) PSubscribe(channels ...string) *PubSub {
pubsub := c.pubSub()
if len(channels) > 0 {
_ = pubsub.PSubscribe(channels...)
}
return pubsub
}
func (c *SentinelClient) GetMasterAddrByName(name string) *StringSliceCmd {
cmd := NewStringSliceCmd("sentinel", "get-master-addr-by-name", name)
_ = c.Process(cmd)
return cmd
}
func (c *SentinelClient) Sentinels(name string) *SliceCmd {
cmd := NewSliceCmd("sentinel", "sentinels", name)
_ = c.Process(cmd)
return cmd
}
// Failover forces a failover as if the master was not reachable, and without
// asking for agreement to other Sentinels.
func (c *SentinelClient) Failover(name string) *StatusCmd {
cmd := NewStatusCmd("sentinel", "failover", name)
_ = c.Process(cmd)
return cmd
}
// Reset resets all the masters with matching name. The pattern argument is a
// glob-style pattern. The reset process clears any previous state in a master
// (including a failover in progress), and removes every slave and sentinel
// already discovered and associated with the master.
func (c *SentinelClient) Reset(pattern string) *IntCmd {
cmd := NewIntCmd("sentinel", "reset", pattern)
_ = c.Process(cmd)
return cmd
}
// FlushConfig forces Sentinel to rewrite its configuration on disk, including
// the current Sentinel state.
func (c *SentinelClient) FlushConfig() *StatusCmd {
cmd := NewStatusCmd("sentinel", "flushconfig")
_ = c.Process(cmd)
return cmd
}
// Master shows the state and info of the specified master.
func (c *SentinelClient) Master(name string) *StringStringMapCmd {
cmd := NewStringStringMapCmd("sentinel", "master", name)
_ = c.Process(cmd)
return cmd
}
// Masters shows a list of monitored masters and their state.
func (c *SentinelClient) Masters() *SliceCmd {
cmd := NewSliceCmd("sentinel", "masters")
_ = c.Process(cmd)
return cmd
}
// Slaves shows a list of slaves for the specified master and their state.
func (c *SentinelClient) Slaves(name string) *SliceCmd {
cmd := NewSliceCmd("sentinel", "slaves", name)
_ = c.Process(cmd)
return cmd
}
// CkQuorum checks if the current Sentinel configuration is able to reach the
// quorum needed to failover a master, and the majority needed to authorize the
// failover. This command should be used in monitoring systems to check if a
// Sentinel deployment is ok.
func (c *SentinelClient) CkQuorum(name string) *StringCmd {
cmd := NewStringCmd("sentinel", "ckquorum", name)
_ = c.Process(cmd)
return cmd
}
// Monitor tells the Sentinel to start monitoring a new master with the specified
// name, ip, port, and quorum.
func (c *SentinelClient) Monitor(name, ip, port, quorum string) *StringCmd {
cmd := NewStringCmd("sentinel", "monitor", name, ip, port, quorum)
_ = c.Process(cmd)
return cmd
}
// Set is used in order to change configuration parameters of a specific master.
func (c *SentinelClient) Set(name, option, value string) *StringCmd {
cmd := NewStringCmd("sentinel", "set", name, option, value)
_ = c.Process(cmd)
return cmd
}
// Remove is used in order to remove the specified master: the master will no
// longer be monitored, and will totally be removed from the internal state of
// the Sentinel.
func (c *SentinelClient) Remove(name string) *StringCmd {
cmd := NewStringCmd("sentinel", "remove", name)
_ = c.Process(cmd)
return cmd
}
type sentinelFailover struct {
sentinelAddrs []string
opt *Options
password string
pool *pool.ConnPool
poolOnce sync.Once
mu sync.RWMutex
masterName string
_masterAddr string
sentinel *SentinelClient
pubsub *PubSub
}
func (c *sentinelFailover) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.sentinel != nil {
return c.closeSentinel()
}
return nil
}
func (c *sentinelFailover) Pool() *pool.ConnPool {
c.poolOnce.Do(func() {
opt := *c.opt
opt.Dialer = c.dial
c.pool = newConnPool(&opt)
})
return c.pool
}
func (c *sentinelFailover) dial(ctx context.Context, network, _ string) (net.Conn, error) {
addr, err := c.MasterAddr()
if err != nil {
return nil, err
}
if c.opt.Dialer != nil {
return c.opt.Dialer(ctx, network, addr)
}
return net.DialTimeout("tcp", addr, c.opt.DialTimeout)
}
func (c *sentinelFailover) MasterAddr() (string, error) {
addr, err := c.masterAddr()
if err != nil {
return "", err
}
c.switchMaster(addr)
return addr, nil
}
func (c *sentinelFailover) masterAddr() (string, error) {
addr := c.getMasterAddr()
if addr != "" {
return addr, nil
}
c.mu.Lock()
defer c.mu.Unlock()
for i, sentinelAddr := range c.sentinelAddrs {
sentinel := NewSentinelClient(&Options{
Addr: sentinelAddr,
Password: c.password,
MaxRetries: c.opt.MaxRetries,
DialTimeout: c.opt.DialTimeout,
ReadTimeout: c.opt.ReadTimeout,
WriteTimeout: c.opt.WriteTimeout,
PoolSize: c.opt.PoolSize,
PoolTimeout: c.opt.PoolTimeout,
IdleTimeout: c.opt.IdleTimeout,
IdleCheckFrequency: c.opt.IdleCheckFrequency,
TLSConfig: c.opt.TLSConfig,
})
masterAddr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
if err != nil {
internal.Logger.Printf("sentinel: GetMasterAddrByName master=%q failed: %s",
c.masterName, err)
_ = sentinel.Close()
continue
}
// Push working sentinel to the top.
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
c.setSentinel(sentinel)
addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
return addr, nil
}
return "", errors.New("redis: all sentinels are unreachable")
}
func (c *sentinelFailover) getMasterAddr() string {
c.mu.RLock()
sentinel := c.sentinel
c.mu.RUnlock()
if sentinel == nil {
return ""
}
addr, err := sentinel.GetMasterAddrByName(c.masterName).Result()
if err != nil {
internal.Logger.Printf("sentinel: GetMasterAddrByName name=%q failed: %s",
c.masterName, err)
c.mu.Lock()
if c.sentinel == sentinel {
_ = c.closeSentinel()
}
c.mu.Unlock()
return ""
}
return net.JoinHostPort(addr[0], addr[1])
}
func (c *sentinelFailover) switchMaster(addr string) {
c.mu.RLock()
masterAddr := c._masterAddr
c.mu.RUnlock()
if masterAddr == addr {
return
}
c.mu.Lock()
defer c.mu.Unlock()
internal.Logger.Printf("sentinel: new master=%q addr=%q",
c.masterName, addr)
_ = c.Pool().Filter(func(cn *pool.Conn) bool {
return cn.RemoteAddr().String() != addr
})
c._masterAddr = addr
}
func (c *sentinelFailover) setSentinel(sentinel *SentinelClient) {
c.discoverSentinels(sentinel)
c.sentinel = sentinel
c.pubsub = sentinel.Subscribe("+switch-master")
go c.listen(c.pubsub)
}
func (c *sentinelFailover) closeSentinel() error {
firstErr := c.pubsub.Close()
c.pubsub = nil
err := c.sentinel.Close()
if err != nil && firstErr == nil {
firstErr = err
}
c.sentinel = nil
return firstErr
}
func (c *sentinelFailover) discoverSentinels(sentinel *SentinelClient) {
sentinels, err := sentinel.Sentinels(c.masterName).Result()
if err != nil {
internal.Logger.Printf("sentinel: Sentinels master=%q failed: %s", c.masterName, err)
return
}
for _, sentinel := range sentinels {
vals := sentinel.([]interface{})
for i := 0; i < len(vals); i += 2 {
key := vals[i].(string)
if key == "name" {
sentinelAddr := vals[i+1].(string)
if !contains(c.sentinelAddrs, sentinelAddr) {
internal.Logger.Printf("sentinel: discovered new sentinel=%q for master=%q",
sentinelAddr, c.masterName)
c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
}
}
}
}
}
func (c *sentinelFailover) listen(pubsub *PubSub) {
ch := pubsub.Channel()
for {
msg, ok := <-ch
if !ok {
break
}
if msg.Channel == "+switch-master" {
parts := strings.Split(msg.Payload, " ")
if parts[0] != c.masterName {
internal.Logger.Printf("sentinel: ignore addr for master=%q", parts[0])
continue
}
addr := net.JoinHostPort(parts[3], parts[4])
c.switchMaster(addr)
}
}
}
func contains(slice []string, str string) bool {
for _, s := range slice {
if s == str {
return true
}
}
return false
}

142
vendor/github.com/go-redis/redis/tx.go generated vendored Normal file
View File

@@ -0,0 +1,142 @@
package redis
import (
"context"
"github.com/go-redis/redis/internal/pool"
"github.com/go-redis/redis/internal/proto"
)
// TxFailedErr transaction redis failed.
const TxFailedErr = proto.RedisError("redis: transaction failed")
// Tx implements Redis transactions as described in
// http://redis.io/topics/transactions. It's NOT safe for concurrent use
// by multiple goroutines, because Exec resets list of watched keys.
// If you don't need WATCH it is better to use Pipeline.
type Tx struct {
cmdable
statefulCmdable
baseClient
ctx context.Context
}
func (c *Client) newTx() *Tx {
tx := Tx{
baseClient: baseClient{
opt: c.opt,
connPool: pool.NewStickyConnPool(c.connPool.(*pool.ConnPool), true),
},
ctx: c.ctx,
}
tx.init()
return &tx
}
func (c *Tx) init() {
c.cmdable = c.Process
c.statefulCmdable = c.Process
}
func (c *Tx) Context() context.Context {
return c.ctx
}
func (c *Tx) WithContext(ctx context.Context) *Tx {
if ctx == nil {
panic("nil context")
}
clone := *c
clone.ctx = ctx
return &clone
}
func (c *Tx) Process(cmd Cmder) error {
return c.ProcessContext(c.ctx, cmd)
}
func (c *Tx) ProcessContext(ctx context.Context, cmd Cmder) error {
return c.baseClient.process(ctx, cmd)
}
// Watch prepares a transaction and marks the keys to be watched
// for conditional execution if there are any keys.
//
// The transaction is automatically closed when fn exits.
func (c *Client) Watch(fn func(*Tx) error, keys ...string) error {
tx := c.newTx()
if len(keys) > 0 {
if err := tx.Watch(keys...).Err(); err != nil {
_ = tx.Close()
return err
}
}
err := fn(tx)
_ = tx.Close()
return err
}
// Close closes the transaction, releasing any open resources.
func (c *Tx) Close() error {
_ = c.Unwatch().Err()
return c.baseClient.Close()
}
// Watch marks the keys to be watched for conditional execution
// of a transaction.
func (c *Tx) Watch(keys ...string) *StatusCmd {
args := make([]interface{}, 1+len(keys))
args[0] = "watch"
for i, key := range keys {
args[1+i] = key
}
cmd := NewStatusCmd(args...)
_ = c.Process(cmd)
return cmd
}
// Unwatch flushes all the previously watched keys for a transaction.
func (c *Tx) Unwatch(keys ...string) *StatusCmd {
args := make([]interface{}, 1+len(keys))
args[0] = "unwatch"
for i, key := range keys {
args[1+i] = key
}
cmd := NewStatusCmd(args...)
_ = c.Process(cmd)
return cmd
}
// Pipeline creates a new pipeline. It is more convenient to use Pipelined.
func (c *Tx) Pipeline() Pipeliner {
pipe := Pipeline{
ctx: c.ctx,
exec: c.processTxPipeline,
}
pipe.init()
return &pipe
}
// Pipelined executes commands queued in the fn in a transaction.
//
// When using WATCH, EXEC will execute commands only if the watched keys
// were not modified, allowing for a check-and-set mechanism.
//
// Exec always returns list of commands. If transaction fails
// TxFailedErr is returned. Otherwise Exec returns an error of the first
// failed command or nil.
func (c *Tx) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipeline().Pipelined(fn)
}
// TxPipelined is an alias for Pipelined.
func (c *Tx) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
return c.Pipelined(fn)
}
// TxPipeline is an alias for Pipeline.
func (c *Tx) TxPipeline() Pipeliner {
return c.Pipeline()
}

191
vendor/github.com/go-redis/redis/universal.go generated vendored Normal file
View File

@@ -0,0 +1,191 @@
package redis
import (
"context"
"crypto/tls"
"net"
"time"
)
// UniversalOptions information is required by UniversalClient to establish
// connections.
type UniversalOptions struct {
// Either a single address or a seed list of host:port addresses
// of cluster/sentinel nodes.
Addrs []string
// Database to be selected after connecting to the server.
// Only single-node and failover clients.
DB int
// Common options.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
OnConnect func(*Conn) error
Password string
MaxRetries int
MinRetryBackoff time.Duration
MaxRetryBackoff time.Duration
DialTimeout time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
TLSConfig *tls.Config
// Only cluster clients.
MaxRedirects int
ReadOnly bool
RouteByLatency bool
RouteRandomly bool
// The sentinel master name.
// Only failover clients.
MasterName string
}
func (o *UniversalOptions) cluster() *ClusterOptions {
if len(o.Addrs) == 0 {
o.Addrs = []string{"127.0.0.1:6379"}
}
return &ClusterOptions{
Addrs: o.Addrs,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
Password: o.Password,
MaxRedirects: o.MaxRedirects,
ReadOnly: o.ReadOnly,
RouteByLatency: o.RouteByLatency,
RouteRandomly: o.RouteRandomly,
MaxRetries: o.MaxRetries,
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
PoolTimeout: o.PoolTimeout,
IdleTimeout: o.IdleTimeout,
IdleCheckFrequency: o.IdleCheckFrequency,
TLSConfig: o.TLSConfig,
}
}
func (o *UniversalOptions) failover() *FailoverOptions {
if len(o.Addrs) == 0 {
o.Addrs = []string{"127.0.0.1:26379"}
}
return &FailoverOptions{
SentinelAddrs: o.Addrs,
MasterName: o.MasterName,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
DB: o.DB,
Password: o.Password,
MaxRetries: o.MaxRetries,
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
PoolTimeout: o.PoolTimeout,
IdleTimeout: o.IdleTimeout,
IdleCheckFrequency: o.IdleCheckFrequency,
TLSConfig: o.TLSConfig,
}
}
func (o *UniversalOptions) simple() *Options {
addr := "127.0.0.1:6379"
if len(o.Addrs) > 0 {
addr = o.Addrs[0]
}
return &Options{
Addr: addr,
Dialer: o.Dialer,
OnConnect: o.OnConnect,
DB: o.DB,
Password: o.Password,
MaxRetries: o.MaxRetries,
MinRetryBackoff: o.MinRetryBackoff,
MaxRetryBackoff: o.MaxRetryBackoff,
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
PoolTimeout: o.PoolTimeout,
IdleTimeout: o.IdleTimeout,
IdleCheckFrequency: o.IdleCheckFrequency,
TLSConfig: o.TLSConfig,
}
}
// --------------------------------------------------------------------
// UniversalClient is an abstract client which - based on the provided options -
// can connect to either clusters, or sentinel-backed failover instances
// or simple single-instance servers. This can be useful for testing
// cluster-specific applications locally.
type UniversalClient interface {
Cmdable
Context() context.Context
AddHook(Hook)
Watch(fn func(*Tx) error, keys ...string) error
Do(args ...interface{}) *Cmd
DoContext(ctx context.Context, args ...interface{}) *Cmd
Process(cmd Cmder) error
ProcessContext(ctx context.Context, cmd Cmder) error
Subscribe(channels ...string) *PubSub
PSubscribe(channels ...string) *PubSub
Close() error
}
var _ UniversalClient = (*Client)(nil)
var _ UniversalClient = (*ClusterClient)(nil)
var _ UniversalClient = (*Ring)(nil)
// NewUniversalClient returns a new multi client. The type of client returned depends
// on the following three conditions:
//
// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
// 2. if the number of Addrs is two or more, a ClusterClient will be returned
// 3. otherwise, a single-node redis Client will be returned.
func NewUniversalClient(opts *UniversalOptions) UniversalClient {
if opts.MasterName != "" {
return NewFailoverClient(opts.failover())
} else if len(opts.Addrs) > 1 {
return NewClusterClient(opts.cluster())
}
return NewClient(opts.simple())
}

27
vendor/github.com/golang/snappy/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

237
vendor/github.com/golang/snappy/decode.go generated vendored Normal file
View File

@@ -0,0 +1,237 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrTooLarge reports that the uncompressed length is too large.
ErrTooLarge = errors.New("snappy: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 || v > 0xffffffff {
return 0, 0, ErrCorrupt
}
const wordSize = 32 << (^uint(0) >> 32 & 1)
if wordSize == 32 && v > 0x7fffffff {
return 0, 0, ErrTooLarge
}
return int(v), n, nil
}
const (
decodeErrCodeCorrupt = 1
decodeErrCodeUnsupportedLiteralLength = 2
)
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if dLen <= len(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
switch decode(dst, src[s:]) {
case 0:
return dst, nil
case decodeErrCodeUnsupportedLiteralLength:
return nil, errUnsupportedLiteralLength
}
return nil, ErrCorrupt
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxBlockSize),
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
}
}
// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4], true) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.decoded[:n], false) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen], false) {
return 0, r.err
}
}
}

14
vendor/github.com/golang/snappy/decode_amd64.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// decode has the same semantics as in decode_other.go.
//
//go:noescape
func decode(dst, src []byte) int

490
vendor/github.com/golang/snappy/decode_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,490 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - AX scratch
// - BX scratch
// - CX length or x
// - DX offset
// - SI &src[s]
// - DI &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
TEXT ·decode(SB), NOSPLIT, $48-56
// Initialize SI, DI and R8-R13.
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, DI
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, SI
MOVQ R11, R13
ADDQ R12, R13
loop:
// for s < len(src)
CMPQ SI, R13
JEQ end
// CX = uint32(src[s])
//
// switch src[s] & 0x03
MOVBLZX (SI), CX
MOVL CX, BX
ANDL $3, BX
CMPL BX, $1
JAE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
SHRL $2, CX
CMPL CX, $60
JAE tagLit60Plus
// case x < 60:
// s++
INCQ SI
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that CX == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// CX can hold 64 bits, so the increment cannot overflow.
INCQ CX
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// AX = len(dst) - d
// BX = len(src) - s
MOVQ R10, AX
SUBQ DI, AX
MOVQ R13, BX
SUBQ SI, BX
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMPQ CX, $16
JGT callMemmove
CMPQ AX, $16
JLT callMemmove
CMPQ BX, $16
JLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(SI), X0
MOVOU X0, 0(DI)
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMPQ CX, AX
JGT errCorrupt
CMPQ CX, BX
JGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, R13
ADDQ R12, R13
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// case x == 60:
CMPL CX, $61
JEQ tagLit61
JA tagLit62Plus
// x = uint32(src[s-1])
MOVBLZX -1(SI), CX
JMP doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVWLZX -2(SI), CX
JMP doLit
tagLit62Plus:
CMPL CX, $62
JA tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVWLZX -3(SI), CX
MOVBLZX -1(SI), BX
SHLL $16, BX
ORL BX, CX
JMP doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVL -4(SI), CX
JMP doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADDQ $5, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-5])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVLQZX -4(SI), DX
JMP doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-3])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVWQZX -2(SI), DX
JMP doCopy
tagCopy:
// We have a copy tag. We assume that:
// - BX == src[s] & 0x03
// - CX == src[s]
CMPQ BX, $2
JEQ tagCopy2
JA tagCopy4
// case tagCopy1:
// s += 2
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
MOVQ CX, DX
ANDQ $0xe0, DX
SHLQ $3, DX
MOVBQZX -1(SI), BX
ORQ BX, DX
// length = 4 + int(src[s-2])>>2&0x7
SHRQ $2, CX
ANDQ $7, CX
ADDQ $4, CX
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - CX == length && CX > 0
// - DX == offset
// if offset <= 0 { etc }
CMPQ DX, $0
JLE errCorrupt
// if d < offset { etc }
MOVQ DI, BX
SUBQ R8, BX
CMPQ BX, DX
JLT errCorrupt
// if length > len(dst)-d { etc }
MOVQ R10, BX
SUBQ DI, BX
CMPQ CX, BX
JGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVQ R10, R14
SUBQ DI, R14
MOVQ DI, R15
SUBQ DX, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMPQ CX, $16
JGT slowForwardCopy
CMPQ DX, $8
JLT slowForwardCopy
CMPQ R14, $16
JLT slowForwardCopy
MOVQ 0(R15), AX
MOVQ AX, 0(DI)
MOVQ 8(R15), BX
MOVQ BX, 8(DI)
ADDQ CX, DI
JMP loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUBQ $10, R14
CMPQ CX, R14
JGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMPQ DX, $8
JGE fixUpSlowForwardCopy
MOVQ (R15), BX
MOVQ BX, (DI)
SUBQ DX, CX
ADDQ DX, DI
ADDQ DX, DX
JMP makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by DI being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save DI to AX so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVQ DI, AX
ADDQ CX, DI
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
CMPQ CX, $0
JLE loop
MOVQ (R15), BX
MOVQ BX, (AX)
ADDQ $8, R15
ADDQ $8, AX
SUBQ $8, CX
JMP finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), BX
MOVB BX, (DI)
INCQ R15
INCQ DI
DECQ CX
JNZ verySlowForwardCopy
JMP loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMPQ DI, R10
JNE errCorrupt
// return 0
MOVQ $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVQ $1, ret+48(FP)
RET

101
vendor/github.com/golang/snappy/decode_other.go generated vendored Normal file
View File

@@ -0,0 +1,101 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
// decode writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func decode(dst, src []byte) int {
var d, s, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length <= 0 {
return decodeErrCodeUnsupportedLiteralLength
}
if length > len(dst)-d || length > len(src)-s {
return decodeErrCodeCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-5])>>2
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
// the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
for end := d + length; d != end; d++ {
dst[d] = dst[d-offset]
}
}
if d != len(dst) {
return decodeErrCodeCorrupt
}
return 0
}

285
vendor/github.com/golang/snappy/encode.go generated vendored Normal file
View File

@@ -0,0 +1,285 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return dst[:d]
}
// inputMargin is the minimum number of extra input bytes to keep, inside
// encodeBlock's inner loop. On some architectures, this margin lets us
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
// literals can be implemented as a single load to and store from a 16-byte
// register. That literal's actual length can be as short as 1 byte, so this
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
// that we don't overrun the dst and src buffers.
const inputMargin = 16 - 1
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
// could be encoded with a copy tag. This is the minimum with respect to the
// algorithm used by encodeBlock, not a minimum enforced by the file format.
//
// The encoded output must start with at least a 1 byte literal, as there are
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
// from an emitCopy call in encodeBlock's main loop, would require at least
// another inputMargin bytes, for the reason above: we want any emitLiteral
// calls inside encodeBlock's main loop to use the fast path if possible, which
// requires being able to overrun by inputMargin bytes. Thus,
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
//
// The C++ code doesn't use this exact threshold, but it could, as discussed at
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
// optimization. It should not affect the encoded form. This is tested by
// TestSameEncodingAsCppShortCopies.
const minNonLiteralBlockSize = 1 + 1 + inputMargin
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
//
// It will return a negative value if srcLen is too large to encode.
func MaxEncodedLen(srcLen int) int {
n := uint64(srcLen)
if n > 0xffffffff {
return -1
}
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
n = 32 + n + n/6
if n > 0xffffffff {
return -1
}
return int(n)
}
var errClosed = errors.New("snappy: Writer is closed")
// NewWriter returns a new Writer that compresses to w.
//
// The Writer returned does not buffer writes. There is no need to Flush or
// Close such a Writer.
//
// Deprecated: the Writer returned is not suitable for many small writes, only
// for few large writes. Use NewBufferedWriter instead, which is efficient
// regardless of the frequency and shape of the writes, and remember to Close
// that Writer when done.
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
obuf: make([]byte, obufLen),
}
}
// NewBufferedWriter returns a new Writer that compresses to w, using the
// framing format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
//
// The Writer returned buffers writes. Users must call Close to guarantee all
// data has been forwarded to the underlying io.Writer. They may also call
// Flush zero or more times before calling Close.
func NewBufferedWriter(w io.Writer) *Writer {
return &Writer{
w: w,
ibuf: make([]byte, 0, maxBlockSize),
obuf: make([]byte, obufLen),
}
}
// Writer is an io.Writer that can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
// ibuf is a buffer for the incoming (uncompressed) bytes.
//
// Its use is optional. For backwards compatibility, Writers created by the
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
// therefore do not need to be Flush'ed or Close'd.
ibuf []byte
// obuf is a buffer for the outgoing (compressed) bytes.
obuf []byte
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
if w.ibuf != nil {
w.ibuf = w.ibuf[:0]
}
w.wroteStreamHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
if w.ibuf == nil {
// Do not buffer incoming bytes. This does not perform or compress well
// if the caller of Writer.Write writes many small slices. This
// behavior is therefore deprecated, but still supported for backwards
// compatibility with code that doesn't explicitly Flush or Close.
return w.write(p)
}
// The remainder of this method is based on bufio.Writer.Write from the
// standard library.
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
var n int
if len(w.ibuf) == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, _ = w.write(p)
} else {
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
w.Flush()
}
nRet += n
p = p[n:]
}
if w.err != nil {
return nRet, w.err
}
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
nRet += n
return nRet, nil
}
func (w *Writer) write(p []byte) (nRet int, errRet error) {
if w.err != nil {
return 0, w.err
}
for len(p) > 0 {
obufStart := len(magicChunk)
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
copy(w.obuf, magicChunk)
obufStart = 0
}
var uncompressed []byte
if len(p) > maxBlockSize {
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
chunkType := uint8(chunkTypeCompressedData)
chunkLen := 4 + len(compressed)
obufEnd := obufHeaderLen + len(compressed)
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
chunkType = chunkTypeUncompressedData
chunkLen = 4 + len(uncompressed)
obufEnd = obufHeaderLen
}
// Fill in the per-chunk header that comes before the body.
w.obuf[len(magicChunk)+0] = chunkType
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
w.err = err
return nRet, err
}
if chunkType == chunkTypeUncompressedData {
if _, err := w.w.Write(uncompressed); err != nil {
w.err = err
return nRet, err
}
}
nRet += len(uncompressed)
}
return nRet, nil
}
// Flush flushes the Writer to its underlying io.Writer.
func (w *Writer) Flush() error {
if w.err != nil {
return w.err
}
if len(w.ibuf) == 0 {
return nil
}
w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
return w.err
}
// Close calls Flush and then closes the Writer.
func (w *Writer) Close() error {
w.Flush()
ret := w.err
if w.err == nil {
w.err = errClosed
}
return ret
}

29
vendor/github.com/golang/snappy/encode_amd64.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// emitLiteral has the same semantics as in encode_other.go.
//
//go:noescape
func emitLiteral(dst, lit []byte) int
// emitCopy has the same semantics as in encode_other.go.
//
//go:noescape
func emitCopy(dst []byte, offset, length int) int
// extendMatch has the same semantics as in encode_other.go.
//
//go:noescape
func extendMatch(src []byte, i, j int) int
// encodeBlock has the same semantics as in encode_other.go.
//
//go:noescape
func encodeBlock(dst, src []byte) (d int)

730
vendor/github.com/golang/snappy/encode_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,730 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
// https://github.com/golang/snappy/issues/29
//
// As a workaround, the package was built with a known good assembler, and
// those instructions were disassembled by "objdump -d" to yield the
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
// style comments, in AT&T asm syntax. Note that rsp here is a physical
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
// fine on Go 1.6.
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
// ----------------------------------------------------------------------------
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - AX len(lit)
// - BX n
// - DX return value
// - DI &dst[i]
// - R10 &lit[0]
//
// The 24 bytes of stack space is to call runtime·memmove.
//
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
MOVQ dst_base+0(FP), DI
MOVQ lit_base+24(FP), R10
MOVQ lit_len+32(FP), AX
MOVQ AX, DX
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT oneByte
CMPL BX, $256
JLT twoBytes
threeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
ADDQ $3, DX
JMP memmove
twoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
ADDQ $2, DX
JMP memmove
oneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
ADDQ $1, DX
memmove:
MOVQ DX, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
CALL runtime·memmove(SB)
RET
// ----------------------------------------------------------------------------
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - AX length
// - SI &dst[0]
// - DI &dst[i]
// - R11 offset
//
// The unusual register allocation of local variables, such as R11 for the
// offset, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVQ dst_base+0(FP), DI
MOVQ DI, SI
MOVQ offset+24(FP), R11
MOVQ length+32(FP), AX
loop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP loop0
step1:
// if length > 64 { etc }
CMPL AX, $64
JLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMPL AX, $12
JGE step3
CMPL R11, $2048
JGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - DX &src[0]
// - SI &src[j]
// - R13 &src[len(src) - 8]
// - R14 &src[len(src)]
// - R15 &src[i]
//
// The unusual register allocation of local variables, such as R15 for a source
// pointer, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVQ src_base+0(FP), DX
MOVQ src_len+8(FP), R14
MOVQ i+24(FP), R15
MOVQ j+32(FP), SI
ADDQ DX, R14
ADDQ DX, R15
ADDQ DX, SI
MOVQ R14, R13
SUBQ $8, R13
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA cmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE bsf
ADDQ $8, R15
ADDQ $8, SI
JMP cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE extendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE extendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func encodeBlock(dst, src []byte) (d int)
//
// All local variables fit into registers, other than "var table". The register
// allocation:
// - AX . .
// - BX . .
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
// - DX 64 &src[0], tableSize
// - SI 72 &src[s]
// - DI 80 &dst[d]
// - R9 88 sLimit
// - R10 . &src[nextEmit]
// - R11 96 prevHash, currHash, nextHash, offset
// - R12 104 &src[base], skip
// - R13 . &src[nextS], &src[len(src) - 8]
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
// - R15 112 candidate
//
// The second column (56, 64, etc) is the stack offset to spill the registers
// when calling other functions. We could pack this slightly tighter, but it's
// simpler to have a dedicated spill map independent of the function called.
//
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
TEXT ·encodeBlock(SB), 0, $32888-56
MOVQ dst_base+0(FP), DI
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R14
// shift, tableSize := uint32(32-8), 1<<8
MOVQ $24, CX
MOVQ $256, DX
calcShift:
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
// shift--
// }
CMPQ DX, $16384
JGE varTable
CMPQ DX, R14
JGE varTable
SUBQ $1, CX
SHLQ $1, DX
JMP calcShift
varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
// 2048 writes that would zero-initialize all of table's 32768 bytes.
SHRQ $3, DX
LEAQ table-32768(SP), BX
PXOR X0, X0
memclr:
MOVOU X0, 0(BX)
ADDQ $16, BX
SUBQ $1, DX
JNZ memclr
// !!! DX = &src[0]
MOVQ SI, DX
// sLimit := len(src) - inputMargin
MOVQ R14, R9
SUBQ $15, R9
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
// change for the rest of the function.
MOVQ CX, 56(SP)
MOVQ DX, 64(SP)
MOVQ R9, 88(SP)
// nextEmit := 0
MOVQ DX, R10
// s := 1
ADDQ $1, SI
// nextHash := hash(load32(src, s), shift)
MOVL 0(SI), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
outer:
// for { etc }
// skip := 32
MOVQ $32, R12
// nextS := s
MOVQ SI, R13
// candidate := 0
MOVQ $0, R15
inner0:
// for { etc }
// s := nextS
MOVQ R13, SI
// bytesBetweenHashLookups := skip >> 5
MOVQ R12, R14
SHRQ $5, R14
// nextS = s + bytesBetweenHashLookups
ADDQ R14, R13
// skip += bytesBetweenHashLookups
ADDQ R14, R12
// if nextS > sLimit { goto emitRemainder }
MOVQ R13, AX
SUBQ DX, AX
CMPQ AX, R9
JA emitRemainder
// candidate = int(table[nextHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[nextHash] = uint16(s)
MOVQ SI, AX
SUBQ DX, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// nextHash = hash(load32(src, nextS), shift)
MOVL 0(R13), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// if load32(src, s) != load32(src, candidate) { continue } break
MOVL 0(SI), AX
MOVL (DX)(R15*1), BX
CMPL AX, BX
JNE inner0
fourByteMatch:
// As per the encode_other.go code:
//
// A 4-byte match has been found. We'll later see etc.
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
// on inputMargin in encode.go.
MOVQ SI, AX
SUBQ R10, AX
CMPQ AX, $16
JLE emitLiteralFastPath
// ----------------------------------------
// Begin inline of the emitLiteral call.
//
// d += emitLiteral(dst[d:], src[nextEmit:s])
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT inlineEmitLiteralOneByte
CMPL BX, $256
JLT inlineEmitLiteralTwoBytes
inlineEmitLiteralThreeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralTwoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralOneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
inlineEmitLiteralMemmove:
// Spill local variables (registers) onto the stack; call; unspill.
//
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
MOVQ R15, 112(SP)
CALL runtime·memmove(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
MOVQ 112(SP), R15
JMP inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
// ----------------------------------------
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB AX, BX
SUBB $1, BX
SHLB $2, BX
MOVB BX, (DI)
ADDQ $1, DI
// !!! Implement the copy from lit to dst as a 16-byte load and store.
// (Encode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
// OK. Subsequent iterations will fix up the overrun.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(R10), X0
MOVOU X0, 0(DI)
ADDQ AX, DI
inner1:
// for { etc }
// base := s
MOVQ SI, R12
// !!! offset := base - candidate
MOVQ R12, R11
SUBQ R15, R11
SUBQ DX, R11
// ----------------------------------------
// Begin inline of the extendMatch call.
//
// s = extendMatch(src, candidate+4, s+4)
// !!! R14 = &src[len(src)]
MOVQ src_len+32(FP), R14
ADDQ DX, R14
// !!! R13 = &src[len(src) - 8]
MOVQ R14, R13
SUBQ $8, R13
// !!! R15 = &src[candidate + 4]
ADDQ $4, R15
ADDQ DX, R15
// !!! s += 4
ADDQ $4, SI
inlineExtendMatchCmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA inlineExtendMatchCmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE inlineExtendMatchBSF
ADDQ $8, R15
ADDQ $8, SI
JMP inlineExtendMatchCmp8
inlineExtendMatchBSF:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
JMP inlineExtendMatchEnd
inlineExtendMatchCmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE inlineExtendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE inlineExtendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP inlineExtendMatchCmp1
inlineExtendMatchEnd:
// End inline of the extendMatch call.
// ----------------------------------------
// ----------------------------------------
// Begin inline of the emitCopy call.
//
// d += emitCopy(dst[d:], base-candidate, s-base)
// !!! length := s - base
MOVQ SI, AX
SUBQ R12, AX
inlineEmitCopyLoop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT inlineEmitCopyStep1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP inlineEmitCopyLoop0
inlineEmitCopyStep1:
// if length > 64 { etc }
CMPL AX, $64
JLE inlineEmitCopyStep2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
inlineEmitCopyStep2:
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
CMPL AX, $12
JGE inlineEmitCopyStep3
CMPL R11, $2048
JGE inlineEmitCopyStep3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
JMP inlineEmitCopyEnd
inlineEmitCopyStep3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
inlineEmitCopyEnd:
// End inline of the emitCopy call.
// ----------------------------------------
// nextEmit = s
MOVQ SI, R10
// if s >= sLimit { goto emitRemainder }
MOVQ SI, AX
SUBQ DX, AX
CMPQ AX, R9
JAE emitRemainder
// As per the encode_other.go code:
//
// We could immediately etc.
// x := load64(src, s-1)
MOVQ -1(SI), R14
// prevHash := hash(uint32(x>>0), shift)
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// table[prevHash] = uint16(s-1)
MOVQ SI, AX
SUBQ DX, AX
SUBQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// currHash := hash(uint32(x>>8), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// candidate = int(table[currHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[currHash] = uint16(s)
ADDQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// if uint32(x>>8) == load32(src, candidate) { continue }
MOVL (DX)(R15*1), BX
CMPL R14, BX
JEQ inner1
// nextHash = hash(uint32(x>>16), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// s++
ADDQ $1, SI
// break out of the inner1 for loop, i.e. continue the outer loop.
JMP outer
emitRemainder:
// if nextEmit < len(src) { etc }
MOVQ src_len+32(FP), AX
ADDQ DX, AX
CMPQ R10, AX
JEQ encodeBlockEnd
// d += emitLiteral(dst[d:], src[nextEmit:])
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R10, 24(SP)
SUBQ R10, AX
MOVQ AX, 32(SP)
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ DI, 80(SP)
CALL ·emitLiteral(SB)
MOVQ 80(SP), DI
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADDQ 48(SP), DI
encodeBlockEnd:
MOVQ dst_base+0(FP), AX
SUBQ AX, DI
MOVQ DI, d+48(FP)
RET

238
vendor/github.com/golang/snappy/encode_other.go generated vendored Normal file
View File

@@ -0,0 +1,238 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
func load32(b []byte, i int) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load64(b []byte, i int) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
default:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
}
return i + copy(dst[i:], lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
func emitCopy(dst []byte, offset, length int) int {
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
// length emitted down below is is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
for length >= 68 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[i+0] = 63<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 64
}
if length > 64 {
// Emit a length 60 copy, encoded as 3 bytes.
dst[i+0] = 59<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 60
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[i+0] = uint8(length-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
return i + 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
dst[i+1] = uint8(offset)
return i + 2
}
// extendMatch returns the largest k such that k <= len(src) and that
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
}
return j
}
func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
const (
maxTableSize = 1 << 14
// tableMask is redundant, but helps the compiler eliminate bounds
// checks.
tableMask = maxTableSize - 1
)
shift := uint32(32 - 8)
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
shift--
}
// In Go, all array elements are zero-initialized, so there is no advantage
// to a smaller tableSize per se. However, it matches the C++ algorithm,
// and in the asm versions of this code, we can get away with zeroing only
// the first tableSize elements.
var table [maxTableSize]uint16
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
nextHash := hash(load32(src, s), shift)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := 32
nextS := s
candidate := 0
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidate = int(table[nextHash&tableMask])
table[nextHash&tableMask] = uint16(s)
nextHash = hash(load32(src, nextS), shift)
if load32(src, s) == load32(src, candidate) {
break
}
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
// Extend the 4-byte match as long as possible.
//
// This is an inlined version of:
// s = extendMatch(src, candidate+4, s+4)
s += 4
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
}
d += emitCopy(dst[d:], base-candidate, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load64(src, s-1)
prevHash := hash(uint32(x>>0), shift)
table[prevHash&tableMask] = uint16(s - 1)
currHash := hash(uint32(x>>8), shift)
candidate = int(table[currHash&tableMask])
table[currHash&tableMask] = uint16(s)
if uint32(x>>8) != load32(src, candidate) {
nextHash = hash(uint32(x>>16), shift)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}

87
vendor/github.com/golang/snappy/snappy.go generated vendored Normal file
View File

@@ -0,0 +1,87 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the snappy block-based compression format.
// It aims for very high speeds and reasonable compression.
//
// The C++ snappy implementation is at https://github.com/google/snappy
package snappy // import "github.com/golang/snappy"
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer issued by most
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
integer denoted by the next 4 bytes.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
// part of the wire format per se, but some parts of the encoder assume
// that an offset fits into a uint16.
//
// Also, for the framing format (Writer type instead of Encode function),
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536
// bytes".
maxBlockSize = 65536
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
// hard coded to be a const instead of a variable, so that obufLen can also
// be a const. Their equivalence is confirmed by
// TestMaxEncodedLenOfMaxBlockSize.
maxEncodedLenOfMaxBlockSize = 76490
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

21
vendor/github.com/pelletier/go-toml/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

23
vendor/github.com/pelletier/go-toml/doc.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
// Package toml is a TOML parser and manipulation library.
//
// This version supports the specification as described in
// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md
//
// Marshaling
//
// Go-toml can marshal and unmarshal TOML documents from and to data
// structures.
//
// TOML document as a tree
//
// Go-toml can operate on a TOML document as a tree. Use one of the Load*
// functions to parse TOML data and obtain a Tree instance, then one of its
// methods to manipulate the tree.
//
// JSONPath-like queries
//
// The package github.com/pelletier/go-toml/query implements a system
// similar to JSONPath to quickly retrive elements of a TOML document using a
// single expression. See the package documentation for more information.
//
package toml

94
vendor/github.com/pelletier/go-toml/keysparsing.go generated vendored Normal file
View File

@@ -0,0 +1,94 @@
// Parsing keys handling both bare and quoted keys.
package toml
import (
"bytes"
"errors"
"fmt"
"unicode"
)
func parseKey(key string) ([]string, error) {
groups := []string{}
var buffer bytes.Buffer
inQuotes := false
wasInQuotes := false
escapeNext := false
ignoreSpace := true
expectDot := false
for _, char := range key {
if ignoreSpace {
if char == ' ' {
continue
}
ignoreSpace = false
}
if escapeNext {
buffer.WriteRune(char)
escapeNext = false
continue
}
switch char {
case '\\':
escapeNext = true
continue
case '"':
if inQuotes {
groups = append(groups, buffer.String())
buffer.Reset()
wasInQuotes = true
}
inQuotes = !inQuotes
expectDot = false
case '.':
if inQuotes {
buffer.WriteRune(char)
} else {
if !wasInQuotes {
if buffer.Len() == 0 {
return nil, errors.New("empty table key")
}
groups = append(groups, buffer.String())
buffer.Reset()
}
ignoreSpace = true
expectDot = false
wasInQuotes = false
}
case ' ':
if inQuotes {
buffer.WriteRune(char)
} else {
expectDot = true
}
default:
if !inQuotes && !isValidBareChar(char) {
return nil, fmt.Errorf("invalid bare character: %c", char)
}
if !inQuotes && expectDot {
return nil, errors.New("what?")
}
buffer.WriteRune(char)
expectDot = false
}
}
if inQuotes {
return nil, errors.New("mismatched quotes")
}
if escapeNext {
return nil, errors.New("unfinished escape sequence")
}
if buffer.Len() > 0 {
groups = append(groups, buffer.String())
}
if len(groups) == 0 {
return nil, errors.New("empty key")
}
return groups, nil
}
func isValidBareChar(r rune) bool {
return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r)
}

651
vendor/github.com/pelletier/go-toml/lexer.go generated vendored Normal file
View File

@@ -0,0 +1,651 @@
// TOML lexer.
//
// Written using the principles developed by Rob Pike in
// http://www.youtube.com/watch?v=HxaD_trXwRE
package toml
import (
"bytes"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
var dateRegexp *regexp.Regexp
// Define state functions
type tomlLexStateFn func() tomlLexStateFn
// Define lexer
type tomlLexer struct {
inputIdx int
input []rune // Textual source
currentTokenStart int
currentTokenStop int
tokens []token
depth int
line int
col int
endbufferLine int
endbufferCol int
}
// Basic read operations on input
func (l *tomlLexer) read() rune {
r := l.peek()
if r == '\n' {
l.endbufferLine++
l.endbufferCol = 1
} else {
l.endbufferCol++
}
l.inputIdx++
return r
}
func (l *tomlLexer) next() rune {
r := l.read()
if r != eof {
l.currentTokenStop++
}
return r
}
func (l *tomlLexer) ignore() {
l.currentTokenStart = l.currentTokenStop
l.line = l.endbufferLine
l.col = l.endbufferCol
}
func (l *tomlLexer) skip() {
l.next()
l.ignore()
}
func (l *tomlLexer) fastForward(n int) {
for i := 0; i < n; i++ {
l.next()
}
}
func (l *tomlLexer) emitWithValue(t tokenType, value string) {
l.tokens = append(l.tokens, token{
Position: Position{l.line, l.col},
typ: t,
val: value,
})
l.ignore()
}
func (l *tomlLexer) emit(t tokenType) {
l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop]))
}
func (l *tomlLexer) peek() rune {
if l.inputIdx >= len(l.input) {
return eof
}
return l.input[l.inputIdx]
}
func (l *tomlLexer) peekString(size int) string {
maxIdx := len(l.input)
upperIdx := l.inputIdx + size // FIXME: potential overflow
if upperIdx > maxIdx {
upperIdx = maxIdx
}
return string(l.input[l.inputIdx:upperIdx])
}
func (l *tomlLexer) follow(next string) bool {
return next == l.peekString(len(next))
}
// Error management
func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
l.tokens = append(l.tokens, token{
Position: Position{l.line, l.col},
typ: tokenError,
val: fmt.Sprintf(format, args...),
})
return nil
}
// State functions
func (l *tomlLexer) lexVoid() tomlLexStateFn {
for {
next := l.peek()
switch next {
case '[':
return l.lexTableKey
case '#':
return l.lexComment(l.lexVoid)
case '=':
return l.lexEqual
case '\r':
fallthrough
case '\n':
l.skip()
continue
}
if isSpace(next) {
l.skip()
}
if l.depth > 0 {
return l.lexRvalue
}
if isKeyStartChar(next) {
return l.lexKey
}
if next == eof {
l.next()
break
}
}
l.emit(tokenEOF)
return nil
}
func (l *tomlLexer) lexRvalue() tomlLexStateFn {
for {
next := l.peek()
switch next {
case '.':
return l.errorf("cannot start float with a dot")
case '=':
return l.lexEqual
case '[':
l.depth++
return l.lexLeftBracket
case ']':
l.depth--
return l.lexRightBracket
case '{':
return l.lexLeftCurlyBrace
case '}':
return l.lexRightCurlyBrace
case '#':
return l.lexComment(l.lexRvalue)
case '"':
return l.lexString
case '\'':
return l.lexLiteralString
case ',':
return l.lexComma
case '\r':
fallthrough
case '\n':
l.skip()
if l.depth == 0 {
return l.lexVoid
}
return l.lexRvalue
case '_':
return l.errorf("cannot start number with underscore")
}
if l.follow("true") {
return l.lexTrue
}
if l.follow("false") {
return l.lexFalse
}
if isSpace(next) {
l.skip()
continue
}
if next == eof {
l.next()
break
}
possibleDate := l.peekString(35)
dateMatch := dateRegexp.FindString(possibleDate)
if dateMatch != "" {
l.fastForward(len(dateMatch))
return l.lexDate
}
if next == '+' || next == '-' || isDigit(next) {
return l.lexNumber
}
if isAlphanumeric(next) {
return l.lexKey
}
return l.errorf("no value can start with %c", next)
}
l.emit(tokenEOF)
return nil
}
func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
l.next()
l.emit(tokenLeftCurlyBrace)
return l.lexRvalue
}
func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
l.next()
l.emit(tokenRightCurlyBrace)
return l.lexRvalue
}
func (l *tomlLexer) lexDate() tomlLexStateFn {
l.emit(tokenDate)
return l.lexRvalue
}
func (l *tomlLexer) lexTrue() tomlLexStateFn {
l.fastForward(4)
l.emit(tokenTrue)
return l.lexRvalue
}
func (l *tomlLexer) lexFalse() tomlLexStateFn {
l.fastForward(5)
l.emit(tokenFalse)
return l.lexRvalue
}
func (l *tomlLexer) lexEqual() tomlLexStateFn {
l.next()
l.emit(tokenEqual)
return l.lexRvalue
}
func (l *tomlLexer) lexComma() tomlLexStateFn {
l.next()
l.emit(tokenComma)
return l.lexRvalue
}
func (l *tomlLexer) lexKey() tomlLexStateFn {
growingString := ""
for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() {
if r == '"' {
l.next()
str, err := l.lexStringAsString(`"`, false, true)
if err != nil {
return l.errorf(err.Error())
}
growingString += `"` + str + `"`
l.next()
continue
} else if r == '\n' {
return l.errorf("keys cannot contain new lines")
} else if isSpace(r) {
break
} else if !isValidBareChar(r) {
return l.errorf("keys cannot contain %c character", r)
}
growingString += string(r)
l.next()
}
l.emitWithValue(tokenKey, growingString)
return l.lexVoid
}
func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn {
return func() tomlLexStateFn {
for next := l.peek(); next != '\n' && next != eof; next = l.peek() {
if next == '\r' && l.follow("\r\n") {
break
}
l.next()
}
l.ignore()
return previousState
}
}
func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
l.next()
l.emit(tokenLeftBracket)
return l.lexRvalue
}
func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) {
growingString := ""
if discardLeadingNewLine {
if l.follow("\r\n") {
l.skip()
l.skip()
} else if l.peek() == '\n' {
l.skip()
}
}
// find end of string
for {
if l.follow(terminator) {
return growingString, nil
}
next := l.peek()
if next == eof {
break
}
growingString += string(l.next())
}
return "", errors.New("unclosed string")
}
func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
l.skip()
// handle special case for triple-quote
terminator := "'"
discardLeadingNewLine := false
if l.follow("''") {
l.skip()
l.skip()
terminator = "'''"
discardLeadingNewLine = true
}
str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine)
if err != nil {
return l.errorf(err.Error())
}
l.emitWithValue(tokenString, str)
l.fastForward(len(terminator))
l.ignore()
return l.lexRvalue
}
// Lex a string and return the results as a string.
// Terminator is the substring indicating the end of the token.
// The resulting string does not include the terminator.
func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) {
growingString := ""
if discardLeadingNewLine {
if l.follow("\r\n") {
l.skip()
l.skip()
} else if l.peek() == '\n' {
l.skip()
}
}
for {
if l.follow(terminator) {
return growingString, nil
}
if l.follow("\\") {
l.next()
switch l.peek() {
case '\r':
fallthrough
case '\n':
fallthrough
case '\t':
fallthrough
case ' ':
// skip all whitespace chars following backslash
for strings.ContainsRune("\r\n\t ", l.peek()) {
l.next()
}
case '"':
growingString += "\""
l.next()
case 'n':
growingString += "\n"
l.next()
case 'b':
growingString += "\b"
l.next()
case 'f':
growingString += "\f"
l.next()
case '/':
growingString += "/"
l.next()
case 't':
growingString += "\t"
l.next()
case 'r':
growingString += "\r"
l.next()
case '\\':
growingString += "\\"
l.next()
case 'u':
l.next()
code := ""
for i := 0; i < 4; i++ {
c := l.peek()
if !isHexDigit(c) {
return "", errors.New("unfinished unicode escape")
}
l.next()
code = code + string(c)
}
intcode, err := strconv.ParseInt(code, 16, 32)
if err != nil {
return "", errors.New("invalid unicode escape: \\u" + code)
}
growingString += string(rune(intcode))
case 'U':
l.next()
code := ""
for i := 0; i < 8; i++ {
c := l.peek()
if !isHexDigit(c) {
return "", errors.New("unfinished unicode escape")
}
l.next()
code = code + string(c)
}
intcode, err := strconv.ParseInt(code, 16, 64)
if err != nil {
return "", errors.New("invalid unicode escape: \\U" + code)
}
growingString += string(rune(intcode))
default:
return "", errors.New("invalid escape sequence: \\" + string(l.peek()))
}
} else {
r := l.peek()
if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) {
return "", fmt.Errorf("unescaped control character %U", r)
}
l.next()
growingString += string(r)
}
if l.peek() == eof {
break
}
}
return "", errors.New("unclosed string")
}
func (l *tomlLexer) lexString() tomlLexStateFn {
l.skip()
// handle special case for triple-quote
terminator := `"`
discardLeadingNewLine := false
acceptNewLines := false
if l.follow(`""`) {
l.skip()
l.skip()
terminator = `"""`
discardLeadingNewLine = true
acceptNewLines = true
}
str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines)
if err != nil {
return l.errorf(err.Error())
}
l.emitWithValue(tokenString, str)
l.fastForward(len(terminator))
l.ignore()
return l.lexRvalue
}
func (l *tomlLexer) lexTableKey() tomlLexStateFn {
l.next()
if l.peek() == '[' {
// token '[[' signifies an array of tables
l.next()
l.emit(tokenDoubleLeftBracket)
return l.lexInsideTableArrayKey
}
// vanilla table key
l.emit(tokenLeftBracket)
return l.lexInsideTableKey
}
func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
case ']':
if l.currentTokenStop > l.currentTokenStart {
l.emit(tokenKeyGroupArray)
}
l.next()
if l.peek() != ']' {
break
}
l.next()
l.emit(tokenDoubleRightBracket)
return l.lexVoid
case '[':
return l.errorf("table array key cannot contain ']'")
default:
l.next()
}
}
return l.errorf("unclosed table array key")
}
func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
case ']':
if l.currentTokenStop > l.currentTokenStart {
l.emit(tokenKeyGroup)
}
l.next()
l.emit(tokenRightBracket)
return l.lexVoid
case '[':
return l.errorf("table key cannot contain ']'")
default:
l.next()
}
}
return l.errorf("unclosed table key")
}
func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
l.next()
l.emit(tokenRightBracket)
return l.lexRvalue
}
func (l *tomlLexer) lexNumber() tomlLexStateFn {
r := l.peek()
if r == '+' || r == '-' {
l.next()
}
pointSeen := false
expSeen := false
digitSeen := false
for {
next := l.peek()
if next == '.' {
if pointSeen {
return l.errorf("cannot have two dots in one float")
}
l.next()
if !isDigit(l.peek()) {
return l.errorf("float cannot end with a dot")
}
pointSeen = true
} else if next == 'e' || next == 'E' {
expSeen = true
l.next()
r := l.peek()
if r == '+' || r == '-' {
l.next()
}
} else if isDigit(next) {
digitSeen = true
l.next()
} else if next == '_' {
l.next()
} else {
break
}
if pointSeen && !digitSeen {
return l.errorf("cannot start float with a dot")
}
}
if !digitSeen {
return l.errorf("no digit in that number")
}
if pointSeen || expSeen {
l.emit(tokenFloat)
} else {
l.emit(tokenInteger)
}
return l.lexRvalue
}
func (l *tomlLexer) run() {
for state := l.lexVoid; state != nil; {
state = state()
}
}
func init() {
dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`)
}
// Entry point
func lexToml(inputBytes []byte) []token {
runes := bytes.Runes(inputBytes)
l := &tomlLexer{
input: runes,
tokens: make([]token, 0, 256),
line: 1,
col: 1,
endbufferLine: 1,
endbufferCol: 1,
}
l.run()
return l.tokens
}

489
vendor/github.com/pelletier/go-toml/marshal.go generated vendored Normal file
View File

@@ -0,0 +1,489 @@
package toml
import (
"bytes"
"errors"
"fmt"
"reflect"
"strings"
"time"
)
type tomlOpts struct {
name string
include bool
omitempty bool
}
var timeType = reflect.TypeOf(time.Time{})
var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
// Check if the given marshall type maps to a Tree primitive
func isPrimitive(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Ptr:
return isPrimitive(mtype.Elem())
case reflect.Bool:
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Struct:
return mtype == timeType || isCustomMarshaler(mtype)
default:
return false
}
}
// Check if the given marshall type maps to a Tree slice
func isTreeSlice(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Slice:
return !isOtherSlice(mtype)
default:
return false
}
}
// Check if the given marshall type maps to a non-Tree slice
func isOtherSlice(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Ptr:
return isOtherSlice(mtype.Elem())
case reflect.Slice:
return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem())
default:
return false
}
}
// Check if the given marshall type maps to a Tree
func isTree(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Map:
return true
case reflect.Struct:
return !isPrimitive(mtype)
default:
return false
}
}
func isCustomMarshaler(mtype reflect.Type) bool {
return mtype.Implements(marshalerType)
}
func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
return mval.Interface().(Marshaler).MarshalTOML()
}
// Marshaler is the interface implemented by types that
// can marshal themselves into valid TOML.
type Marshaler interface {
MarshalTOML() ([]byte, error)
}
/*
Marshal returns the TOML encoding of v. Behavior is similar to the Go json
encoder, except that there is no concept of a Marshaler interface or MarshalTOML
function for sub-structs, and currently only definite types can be marshaled
(i.e. no `interface{}`).
Note that pointers are automatically assigned the "omitempty" option, as TOML
explicity does not handle null values (saying instead the label should be
dropped).
Tree structural types and corresponding marshal types:
*Tree (*)struct, (*)map[string]interface{}
[]*Tree (*)[](*)struct, (*)[](*)map[string]interface{}
[]interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{})
interface{} (*)primitive
Tree primitive types and corresponding marshal types:
uint64 uint, uint8-uint64, pointers to same
int64 int, int8-uint64, pointers to same
float64 float32, float64, pointers to same
string string, pointers to same
bool bool, pointers to same
time.Time time.Time{}, pointers to same
*/
func Marshal(v interface{}) ([]byte, error) {
mtype := reflect.TypeOf(v)
if mtype.Kind() != reflect.Struct {
return []byte{}, errors.New("Only a struct can be marshaled to TOML")
}
sval := reflect.ValueOf(v)
if isCustomMarshaler(mtype) {
return callCustomMarshaler(sval)
}
t, err := valueToTree(mtype, sval)
if err != nil {
return []byte{}, err
}
s, err := t.ToTomlString()
return []byte(s), err
}
// Convert given marshal struct or map value to toml tree
func valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
if mtype.Kind() == reflect.Ptr {
return valueToTree(mtype.Elem(), mval.Elem())
}
tval := newTree()
switch mtype.Kind() {
case reflect.Struct:
for i := 0; i < mtype.NumField(); i++ {
mtypef, mvalf := mtype.Field(i), mval.Field(i)
opts := tomlOptions(mtypef)
if opts.include && (!opts.omitempty || !isZero(mvalf)) {
val, err := valueToToml(mtypef.Type, mvalf)
if err != nil {
return nil, err
}
tval.Set(opts.name, val)
}
}
case reflect.Map:
for _, key := range mval.MapKeys() {
mvalf := mval.MapIndex(key)
val, err := valueToToml(mtype.Elem(), mvalf)
if err != nil {
return nil, err
}
tval.Set(key.String(), val)
}
}
return tval, nil
}
// Convert given marshal slice to slice of Toml trees
func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
tval := make([]*Tree, mval.Len(), mval.Len())
for i := 0; i < mval.Len(); i++ {
val, err := valueToTree(mtype.Elem(), mval.Index(i))
if err != nil {
return nil, err
}
tval[i] = val
}
return tval, nil
}
// Convert given marshal slice to slice of toml values
func valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
tval := make([]interface{}, mval.Len(), mval.Len())
for i := 0; i < mval.Len(); i++ {
val, err := valueToToml(mtype.Elem(), mval.Index(i))
if err != nil {
return nil, err
}
tval[i] = val
}
return tval, nil
}
// Convert given marshal value to toml value
func valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
if mtype.Kind() == reflect.Ptr {
return valueToToml(mtype.Elem(), mval.Elem())
}
switch {
case isCustomMarshaler(mtype):
return callCustomMarshaler(mval)
case isTree(mtype):
return valueToTree(mtype, mval)
case isTreeSlice(mtype):
return valueToTreeSlice(mtype, mval)
case isOtherSlice(mtype):
return valueToOtherSlice(mtype, mval)
default:
switch mtype.Kind() {
case reflect.Bool:
return mval.Bool(), nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return mval.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return mval.Uint(), nil
case reflect.Float32, reflect.Float64:
return mval.Float(), nil
case reflect.String:
return mval.String(), nil
case reflect.Struct:
return mval.Interface().(time.Time), nil
default:
return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
}
}
}
// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
// sub-structs, and only definite types can be unmarshaled.
func (t *Tree) Unmarshal(v interface{}) error {
mtype := reflect.TypeOf(v)
if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
return errors.New("Only a pointer to struct can be unmarshaled from TOML")
}
sval, err := valueFromTree(mtype.Elem(), t)
if err != nil {
return err
}
reflect.ValueOf(v).Elem().Set(sval)
return nil
}
// Unmarshal parses the TOML-encoded data and stores the result in the value
// pointed to by v. Behavior is similar to the Go json encoder, except that there
// is no concept of an Unmarshaler interface or UnmarshalTOML function for
// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
// `interface{}`).
//
// See Marshal() documentation for types mapping table.
func Unmarshal(data []byte, v interface{}) error {
t, err := LoadReader(bytes.NewReader(data))
if err != nil {
return err
}
return t.Unmarshal(v)
}
// Convert toml tree to marshal struct or map, using marshal type
func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr {
return unwrapPointer(mtype, tval)
}
var mval reflect.Value
switch mtype.Kind() {
case reflect.Struct:
mval = reflect.New(mtype).Elem()
for i := 0; i < mtype.NumField(); i++ {
mtypef := mtype.Field(i)
opts := tomlOptions(mtypef)
if opts.include {
baseKey := opts.name
keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)}
for _, key := range keysToTry {
exists := tval.Has(key)
if !exists {
continue
}
val := tval.Get(key)
mvalf, err := valueFromToml(mtypef.Type, val)
if err != nil {
return mval, formatError(err, tval.GetPosition(key))
}
mval.Field(i).Set(mvalf)
break
}
}
}
case reflect.Map:
mval = reflect.MakeMap(mtype)
for _, key := range tval.Keys() {
val := tval.Get(key)
mvalf, err := valueFromToml(mtype.Elem(), val)
if err != nil {
return mval, formatError(err, tval.GetPosition(key))
}
mval.SetMapIndex(reflect.ValueOf(key), mvalf)
}
}
return mval, nil
}
// Convert toml value to marshal struct/map slice, using marshal type
func valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
mval := reflect.MakeSlice(mtype, len(tval), len(tval))
for i := 0; i < len(tval); i++ {
val, err := valueFromTree(mtype.Elem(), tval[i])
if err != nil {
return mval, err
}
mval.Index(i).Set(val)
}
return mval, nil
}
// Convert toml value to marshal primitive slice, using marshal type
func valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
mval := reflect.MakeSlice(mtype, len(tval), len(tval))
for i := 0; i < len(tval); i++ {
val, err := valueFromToml(mtype.Elem(), tval[i])
if err != nil {
return mval, err
}
mval.Index(i).Set(val)
}
return mval, nil
}
// Convert toml value to marshal value, using marshal type
func valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr {
return unwrapPointer(mtype, tval)
}
switch {
case isTree(mtype):
return valueFromTree(mtype, tval.(*Tree))
case isTreeSlice(mtype):
return valueFromTreeSlice(mtype, tval.([]*Tree))
case isOtherSlice(mtype):
return valueFromOtherSlice(mtype, tval.([]interface{}))
default:
switch mtype.Kind() {
case reflect.Bool:
val, ok := tval.(bool)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to bool", tval, tval)
}
return reflect.ValueOf(val), nil
case reflect.Int:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(int(val)), nil
case reflect.Int8:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(int8(val)), nil
case reflect.Int16:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(int16(val)), nil
case reflect.Int32:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(int32(val)), nil
case reflect.Int64:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
}
return reflect.ValueOf(val), nil
case reflect.Uint:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint(val)), nil
case reflect.Uint8:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint8(val)), nil
case reflect.Uint16:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint16(val)), nil
case reflect.Uint32:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint32(val)), nil
case reflect.Uint64:
val, ok := tval.(int64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
}
return reflect.ValueOf(uint64(val)), nil
case reflect.Float32:
val, ok := tval.(float64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
}
return reflect.ValueOf(float32(val)), nil
case reflect.Float64:
val, ok := tval.(float64)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
}
return reflect.ValueOf(val), nil
case reflect.String:
val, ok := tval.(string)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to string", tval, tval)
}
return reflect.ValueOf(val), nil
case reflect.Struct:
val, ok := tval.(time.Time)
if !ok {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to time", tval, tval)
}
return reflect.ValueOf(val), nil
default:
return reflect.ValueOf(nil), fmt.Errorf("Unmarshal can't handle %v(%v)", mtype, mtype.Kind())
}
}
}
func unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
val, err := valueFromToml(mtype.Elem(), tval)
if err != nil {
return reflect.ValueOf(nil), err
}
mval := reflect.New(mtype.Elem())
mval.Elem().Set(val)
return mval, nil
}
func tomlOptions(vf reflect.StructField) tomlOpts {
tag := vf.Tag.Get("toml")
parse := strings.Split(tag, ",")
result := tomlOpts{vf.Name, true, false}
if parse[0] != "" {
if parse[0] == "-" && len(parse) == 1 {
result.include = false
} else {
result.name = strings.Trim(parse[0], " ")
}
}
if vf.PkgPath != "" {
result.include = false
}
if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" {
result.omitempty = true
}
if vf.Type.Kind() == reflect.Ptr {
result.omitempty = true
}
return result
}
func isZero(val reflect.Value) bool {
switch val.Type().Kind() {
case reflect.Map:
fallthrough
case reflect.Array:
fallthrough
case reflect.Slice:
return val.Len() == 0
default:
return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
}
}
func formatError(err error, pos Position) error {
if err.Error()[0] == '(' { // Error already contains position information
return err
}
return fmt.Errorf("%s: %s", pos, err)
}

383
vendor/github.com/pelletier/go-toml/parser.go generated vendored Normal file
View File

@@ -0,0 +1,383 @@
// TOML Parser.
package toml
import (
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
type tomlParser struct {
flowIdx int
flow []token
tree *Tree
currentTable []string
seenTableKeys []string
}
type tomlParserStateFn func() tomlParserStateFn
// Formats and panics an error message based on a token
func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) {
panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
}
func (p *tomlParser) run() {
for state := p.parseStart; state != nil; {
state = state()
}
}
func (p *tomlParser) peek() *token {
if p.flowIdx >= len(p.flow) {
return nil
}
return &p.flow[p.flowIdx]
}
func (p *tomlParser) assume(typ tokenType) {
tok := p.getToken()
if tok == nil {
p.raiseError(tok, "was expecting token %s, but token stream is empty", tok)
}
if tok.typ != typ {
p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok)
}
}
func (p *tomlParser) getToken() *token {
tok := p.peek()
if tok == nil {
return nil
}
p.flowIdx++
return tok
}
func (p *tomlParser) parseStart() tomlParserStateFn {
tok := p.peek()
// end of stream, parsing is finished
if tok == nil {
return nil
}
switch tok.typ {
case tokenDoubleLeftBracket:
return p.parseGroupArray
case tokenLeftBracket:
return p.parseGroup
case tokenKey:
return p.parseAssign
case tokenEOF:
return nil
default:
p.raiseError(tok, "unexpected token")
}
return nil
}
func (p *tomlParser) parseGroupArray() tomlParserStateFn {
startToken := p.getToken() // discard the [[
key := p.getToken()
if key.typ != tokenKeyGroupArray {
p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
}
// get or create table array element at the indicated part in the path
keys, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "invalid table array key: %s", err)
}
p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
destTree := p.tree.GetPath(keys)
var array []*Tree
if destTree == nil {
array = make([]*Tree, 0)
} else if target, ok := destTree.([]*Tree); ok && target != nil {
array = destTree.([]*Tree)
} else {
p.raiseError(key, "key %s is already assigned and not of type table array", key)
}
p.currentTable = keys
// add a new tree to the end of the table array
newTree := newTree()
newTree.position = startToken.Position
array = append(array, newTree)
p.tree.SetPath(p.currentTable, array)
// remove all keys that were children of this table array
prefix := key.val + "."
found := false
for ii := 0; ii < len(p.seenTableKeys); {
tableKey := p.seenTableKeys[ii]
if strings.HasPrefix(tableKey, prefix) {
p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
} else {
found = (tableKey == key.val)
ii++
}
}
// keep this key name from use by other kinds of assignments
if !found {
p.seenTableKeys = append(p.seenTableKeys, key.val)
}
// move to next parser state
p.assume(tokenDoubleRightBracket)
return p.parseStart
}
func (p *tomlParser) parseGroup() tomlParserStateFn {
startToken := p.getToken() // discard the [
key := p.getToken()
if key.typ != tokenKeyGroup {
p.raiseError(key, "unexpected token %s, was expecting a table key", key)
}
for _, item := range p.seenTableKeys {
if item == key.val {
p.raiseError(key, "duplicated tables")
}
}
p.seenTableKeys = append(p.seenTableKeys, key.val)
keys, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "invalid table array key: %s", err)
}
if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
p.raiseError(key, "%s", err)
}
p.assume(tokenRightBracket)
p.currentTable = keys
return p.parseStart
}
func (p *tomlParser) parseAssign() tomlParserStateFn {
key := p.getToken()
p.assume(tokenEqual)
value := p.parseRvalue()
var tableKey []string
if len(p.currentTable) > 0 {
tableKey = p.currentTable
} else {
tableKey = []string{}
}
// find the table to assign, looking out for arrays of tables
var targetNode *Tree
switch node := p.tree.GetPath(tableKey).(type) {
case []*Tree:
targetNode = node[len(node)-1]
case *Tree:
targetNode = node
default:
p.raiseError(key, "Unknown table type for path: %s",
strings.Join(tableKey, "."))
}
// assign value to the found table
keyVals, err := parseKey(key.val)
if err != nil {
p.raiseError(key, "%s", err)
}
if len(keyVals) != 1 {
p.raiseError(key, "Invalid key")
}
keyVal := keyVals[0]
localKey := []string{keyVal}
finalKey := append(tableKey, keyVal)
if targetNode.GetPath(localKey) != nil {
p.raiseError(key, "The following key was defined twice: %s",
strings.Join(finalKey, "."))
}
var toInsert interface{}
switch value.(type) {
case *Tree, []*Tree:
toInsert = value
default:
toInsert = &tomlValue{value, key.Position}
}
targetNode.values[keyVal] = toInsert
return p.parseStart
}
var numberUnderscoreInvalidRegexp *regexp.Regexp
func cleanupNumberToken(value string) (string, error) {
if numberUnderscoreInvalidRegexp.MatchString(value) {
return "", errors.New("invalid use of _ in number")
}
cleanedVal := strings.Replace(value, "_", "", -1)
return cleanedVal, nil
}
func (p *tomlParser) parseRvalue() interface{} {
tok := p.getToken()
if tok == nil || tok.typ == tokenEOF {
p.raiseError(tok, "expecting a value")
}
switch tok.typ {
case tokenString:
return tok.val
case tokenTrue:
return true
case tokenFalse:
return false
case tokenInteger:
cleanedVal, err := cleanupNumberToken(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
val, err := strconv.ParseInt(cleanedVal, 10, 64)
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
case tokenFloat:
cleanedVal, err := cleanupNumberToken(tok.val)
if err != nil {
p.raiseError(tok, "%s", err)
}
val, err := strconv.ParseFloat(cleanedVal, 64)
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
case tokenDate:
val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC)
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
case tokenLeftBracket:
return p.parseArray()
case tokenLeftCurlyBrace:
return p.parseInlineTable()
case tokenEqual:
p.raiseError(tok, "cannot have multiple equals for the same key")
case tokenError:
p.raiseError(tok, "%s", tok)
}
p.raiseError(tok, "never reached")
return nil
}
func tokenIsComma(t *token) bool {
return t != nil && t.typ == tokenComma
}
func (p *tomlParser) parseInlineTable() *Tree {
tree := newTree()
var previous *token
Loop:
for {
follow := p.peek()
if follow == nil || follow.typ == tokenEOF {
p.raiseError(follow, "unterminated inline table")
}
switch follow.typ {
case tokenRightCurlyBrace:
p.getToken()
break Loop
case tokenKey:
if !tokenIsComma(previous) && previous != nil {
p.raiseError(follow, "comma expected between fields in inline table")
}
key := p.getToken()
p.assume(tokenEqual)
value := p.parseRvalue()
tree.Set(key.val, value)
case tokenComma:
if previous == nil {
p.raiseError(follow, "inline table cannot start with a comma")
}
if tokenIsComma(previous) {
p.raiseError(follow, "need field between two commas in inline table")
}
p.getToken()
default:
p.raiseError(follow, "unexpected token type in inline table: %s", follow.typ.String())
}
previous = follow
}
if tokenIsComma(previous) {
p.raiseError(previous, "trailing comma at the end of inline table")
}
return tree
}
func (p *tomlParser) parseArray() interface{} {
var array []interface{}
arrayType := reflect.TypeOf(nil)
for {
follow := p.peek()
if follow == nil || follow.typ == tokenEOF {
p.raiseError(follow, "unterminated array")
}
if follow.typ == tokenRightBracket {
p.getToken()
break
}
val := p.parseRvalue()
if arrayType == nil {
arrayType = reflect.TypeOf(val)
}
if reflect.TypeOf(val) != arrayType {
p.raiseError(follow, "mixed types in array")
}
array = append(array, val)
follow = p.peek()
if follow == nil || follow.typ == tokenEOF {
p.raiseError(follow, "unterminated array")
}
if follow.typ != tokenRightBracket && follow.typ != tokenComma {
p.raiseError(follow, "missing comma")
}
if follow.typ == tokenComma {
p.getToken()
}
}
// An array of Trees is actually an array of inline
// tables, which is a shorthand for a table array. If the
// array was not converted from []interface{} to []*Tree,
// the two notations would not be equivalent.
if arrayType == reflect.TypeOf(newTree()) {
tomlArray := make([]*Tree, len(array))
for i, v := range array {
tomlArray[i] = v.(*Tree)
}
return tomlArray
}
return array
}
func parseToml(flow []token) *Tree {
result := newTree()
result.position = Position{1, 1}
parser := &tomlParser{
flowIdx: 0,
flow: flow,
tree: result,
currentTable: make([]string, 0),
seenTableKeys: make([]string, 0),
}
parser.run()
return result
}
func init() {
numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d]|_$|^_)`)
}

29
vendor/github.com/pelletier/go-toml/position.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
// Position support for go-toml
package toml
import (
"fmt"
)
// Position of a document element within a TOML document.
//
// Line and Col are both 1-indexed positions for the element's line number and
// column number, respectively. Values of zero or less will cause Invalid(),
// to return true.
type Position struct {
Line int // line within the document
Col int // column within the line
}
// String representation of the position.
// Displays 1-indexed line and column numbers.
func (p Position) String() string {
return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
}
// Invalid returns whether or not the position is valid (i.e. with negative or
// null values)
func (p Position) Invalid() bool {
return p.Line <= 0 || p.Col <= 0
}

140
vendor/github.com/pelletier/go-toml/token.go generated vendored Normal file
View File

@@ -0,0 +1,140 @@
package toml
import (
"fmt"
"strconv"
"unicode"
)
// Define tokens
type tokenType int
const (
eof = -(iota + 1)
)
const (
tokenError tokenType = iota
tokenEOF
tokenComment
tokenKey
tokenString
tokenInteger
tokenTrue
tokenFalse
tokenFloat
tokenEqual
tokenLeftBracket
tokenRightBracket
tokenLeftCurlyBrace
tokenRightCurlyBrace
tokenLeftParen
tokenRightParen
tokenDoubleLeftBracket
tokenDoubleRightBracket
tokenDate
tokenKeyGroup
tokenKeyGroupArray
tokenComma
tokenColon
tokenDollar
tokenStar
tokenQuestion
tokenDot
tokenDotDot
tokenEOL
)
var tokenTypeNames = []string{
"Error",
"EOF",
"Comment",
"Key",
"String",
"Integer",
"True",
"False",
"Float",
"=",
"[",
"]",
"{",
"}",
"(",
")",
"]]",
"[[",
"Date",
"KeyGroup",
"KeyGroupArray",
",",
":",
"$",
"*",
"?",
".",
"..",
"EOL",
}
type token struct {
Position
typ tokenType
val string
}
func (tt tokenType) String() string {
idx := int(tt)
if idx < len(tokenTypeNames) {
return tokenTypeNames[idx]
}
return "Unknown"
}
func (t token) Int() int {
if result, err := strconv.Atoi(t.val); err != nil {
panic(err)
} else {
return result
}
}
func (t token) String() string {
switch t.typ {
case tokenEOF:
return "EOF"
case tokenError:
return t.val
}
return fmt.Sprintf("%q", t.val)
}
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
func isAlphanumeric(r rune) bool {
return unicode.IsLetter(r) || r == '_'
}
func isKeyChar(r rune) bool {
// Keys start with the first character that isn't whitespace or [ and end
// with the last non-whitespace character before the equals sign. Keys
// cannot contain a # character."
return !(r == '\r' || r == '\n' || r == eof || r == '=')
}
func isKeyStartChar(r rune) bool {
return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[')
}
func isDigit(r rune) bool {
return unicode.IsNumber(r)
}
func isHexDigit(r rune) bool {
return isDigit(r) ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}

292
vendor/github.com/pelletier/go-toml/toml.go generated vendored Normal file
View File

@@ -0,0 +1,292 @@
package toml
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"strings"
)
type tomlValue struct {
value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
position Position
}
// Tree is the result of the parsing of a TOML file.
type Tree struct {
values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
position Position
}
func newTree() *Tree {
return &Tree{
values: make(map[string]interface{}),
position: Position{},
}
}
// TreeFromMap initializes a new Tree object using the given map.
func TreeFromMap(m map[string]interface{}) (*Tree, error) {
result, err := toTree(m)
if err != nil {
return nil, err
}
return result.(*Tree), nil
}
// Position returns the position of the tree.
func (t *Tree) Position() Position {
return t.position
}
// Has returns a boolean indicating if the given key exists.
func (t *Tree) Has(key string) bool {
if key == "" {
return false
}
return t.HasPath(strings.Split(key, "."))
}
// HasPath returns true if the given path of keys exists, false otherwise.
func (t *Tree) HasPath(keys []string) bool {
return t.GetPath(keys) != nil
}
// Keys returns the keys of the toplevel tree (does not recurse).
func (t *Tree) Keys() []string {
keys := make([]string, len(t.values))
i := 0
for k := range t.values {
keys[i] = k
i++
}
return keys
}
// Get the value at key in the Tree.
// Key is a dot-separated path (e.g. a.b.c).
// Returns nil if the path does not exist in the tree.
// If keys is of length zero, the current tree is returned.
func (t *Tree) Get(key string) interface{} {
if key == "" {
return t
}
comps, err := parseKey(key)
if err != nil {
return nil
}
return t.GetPath(comps)
}
// GetPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
func (t *Tree) GetPath(keys []string) interface{} {
if len(keys) == 0 {
return t
}
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
value, exists := subtree.values[intermediateKey]
if !exists {
return nil
}
switch node := value.(type) {
case *Tree:
subtree = node
case []*Tree:
// go to most recent element
if len(node) == 0 {
return nil
}
subtree = node[len(node)-1]
default:
return nil // cannot navigate through other node types
}
}
// branch based on final node type
switch node := subtree.values[keys[len(keys)-1]].(type) {
case *tomlValue:
return node.value
default:
return node
}
}
// GetPosition returns the position of the given key.
func (t *Tree) GetPosition(key string) Position {
if key == "" {
return t.position
}
return t.GetPositionPath(strings.Split(key, "."))
}
// GetPositionPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
func (t *Tree) GetPositionPath(keys []string) Position {
if len(keys) == 0 {
return t.position
}
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
value, exists := subtree.values[intermediateKey]
if !exists {
return Position{0, 0}
}
switch node := value.(type) {
case *Tree:
subtree = node
case []*Tree:
// go to most recent element
if len(node) == 0 {
return Position{0, 0}
}
subtree = node[len(node)-1]
default:
return Position{0, 0}
}
}
// branch based on final node type
switch node := subtree.values[keys[len(keys)-1]].(type) {
case *tomlValue:
return node.position
case *Tree:
return node.position
case []*Tree:
// go to most recent element
if len(node) == 0 {
return Position{0, 0}
}
return node[len(node)-1].position
default:
return Position{0, 0}
}
}
// GetDefault works like Get but with a default value
func (t *Tree) GetDefault(key string, def interface{}) interface{} {
val := t.Get(key)
if val == nil {
return def
}
return val
}
// Set an element in the tree.
// Key is a dot-separated path (e.g. a.b.c).
// Creates all necessary intermediate trees, if needed.
func (t *Tree) Set(key string, value interface{}) {
t.SetPath(strings.Split(key, "."), value)
}
// SetPath sets an element in the tree.
// Keys is an array of path elements (e.g. {"a","b","c"}).
// Creates all necessary intermediate trees, if needed.
func (t *Tree) SetPath(keys []string, value interface{}) {
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
nextTree = newTree()
subtree.values[intermediateKey] = nextTree // add new element here
}
switch node := nextTree.(type) {
case *Tree:
subtree = node
case []*Tree:
// go to most recent element
if len(node) == 0 {
// create element if it does not exist
subtree.values[intermediateKey] = append(node, newTree())
}
subtree = node[len(node)-1]
}
}
var toInsert interface{}
switch value.(type) {
case *Tree:
toInsert = value
case []*Tree:
toInsert = value
case *tomlValue:
toInsert = value
default:
toInsert = &tomlValue{value: value}
}
subtree.values[keys[len(keys)-1]] = toInsert
}
// createSubTree takes a tree and a key and create the necessary intermediate
// subtrees to create a subtree at that point. In-place.
//
// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b]
// and tree[a][b][c]
//
// Returns nil on success, error object on failure
func (t *Tree) createSubTree(keys []string, pos Position) error {
subtree := t
for _, intermediateKey := range keys {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
tree := newTree()
tree.position = pos
subtree.values[intermediateKey] = tree
nextTree = tree
}
switch node := nextTree.(type) {
case []*Tree:
subtree = node[len(node)-1]
case *Tree:
subtree = node
default:
return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
strings.Join(keys, "."), intermediateKey, nextTree, nextTree)
}
}
return nil
}
// LoadBytes creates a Tree from a []byte.
func LoadBytes(b []byte) (tree *Tree, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
err = errors.New(r.(string))
}
}()
tree = parseToml(lexToml(b))
return
}
// LoadReader creates a Tree from any io.Reader.
func LoadReader(reader io.Reader) (tree *Tree, err error) {
inputBytes, err := ioutil.ReadAll(reader)
if err != nil {
return
}
tree, err = LoadBytes(inputBytes)
return
}
// Load creates a Tree from a string.
func Load(content string) (tree *Tree, err error) {
return LoadBytes([]byte(content))
}
// LoadFile creates a Tree from a file.
func LoadFile(path string) (tree *Tree, err error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
return LoadReader(file)
}

142
vendor/github.com/pelletier/go-toml/tomltree_create.go generated vendored Normal file
View File

@@ -0,0 +1,142 @@
package toml
import (
"fmt"
"reflect"
"time"
)
var kindToType = [reflect.String + 1]reflect.Type{
reflect.Bool: reflect.TypeOf(true),
reflect.String: reflect.TypeOf(""),
reflect.Float32: reflect.TypeOf(float64(1)),
reflect.Float64: reflect.TypeOf(float64(1)),
reflect.Int: reflect.TypeOf(int64(1)),
reflect.Int8: reflect.TypeOf(int64(1)),
reflect.Int16: reflect.TypeOf(int64(1)),
reflect.Int32: reflect.TypeOf(int64(1)),
reflect.Int64: reflect.TypeOf(int64(1)),
reflect.Uint: reflect.TypeOf(uint64(1)),
reflect.Uint8: reflect.TypeOf(uint64(1)),
reflect.Uint16: reflect.TypeOf(uint64(1)),
reflect.Uint32: reflect.TypeOf(uint64(1)),
reflect.Uint64: reflect.TypeOf(uint64(1)),
}
// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
// supported values:
// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
func typeFor(k reflect.Kind) reflect.Type {
if k > 0 && int(k) < len(kindToType) {
return kindToType[k]
}
return nil
}
func simpleValueCoercion(object interface{}) (interface{}, error) {
switch original := object.(type) {
case string, bool, int64, uint64, float64, time.Time:
return original, nil
case int:
return int64(original), nil
case int8:
return int64(original), nil
case int16:
return int64(original), nil
case int32:
return int64(original), nil
case uint:
return uint64(original), nil
case uint8:
return uint64(original), nil
case uint16:
return uint64(original), nil
case uint32:
return uint64(original), nil
case float32:
return float64(original), nil
case fmt.Stringer:
return original.String(), nil
default:
return nil, fmt.Errorf("cannot convert type %T to Tree", object)
}
}
func sliceToTree(object interface{}) (interface{}, error) {
// arrays are a bit tricky, since they can represent either a
// collection of simple values, which is represented by one
// *tomlValue, or an array of tables, which is represented by an
// array of *Tree.
// holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
value := reflect.ValueOf(object)
insideType := value.Type().Elem()
length := value.Len()
if length > 0 {
insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
}
if insideType.Kind() == reflect.Map {
// this is considered as an array of tables
tablesArray := make([]*Tree, 0, length)
for i := 0; i < length; i++ {
table := value.Index(i)
tree, err := toTree(table.Interface())
if err != nil {
return nil, err
}
tablesArray = append(tablesArray, tree.(*Tree))
}
return tablesArray, nil
}
sliceType := typeFor(insideType.Kind())
if sliceType == nil {
sliceType = insideType
}
arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
for i := 0; i < length; i++ {
val := value.Index(i).Interface()
simpleValue, err := simpleValueCoercion(val)
if err != nil {
return nil, err
}
arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
}
return &tomlValue{arrayValue.Interface(), Position{}}, nil
}
func toTree(object interface{}) (interface{}, error) {
value := reflect.ValueOf(object)
if value.Kind() == reflect.Map {
values := map[string]interface{}{}
keys := value.MapKeys()
for _, key := range keys {
if key.Kind() != reflect.String {
if _, ok := key.Interface().(string); !ok {
return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
}
}
v := value.MapIndex(key)
newValue, err := toTree(v.Interface())
if err != nil {
return nil, err
}
values[key.String()] = newValue
}
return &Tree{values, Position{}}, nil
}
if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
return sliceToTree(object)
}
simpleValue, err := simpleValueCoercion(object)
if err != nil {
return nil, err
}
return &tomlValue{simpleValue, Position{}}, nil
}

233
vendor/github.com/pelletier/go-toml/tomltree_write.go generated vendored Normal file
View File

@@ -0,0 +1,233 @@
package toml
import (
"bytes"
"fmt"
"io"
"math"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
// encodes a string to a TOML-compliant string value
func encodeTomlString(value string) string {
var b bytes.Buffer
for _, rr := range value {
switch rr {
case '\b':
b.WriteString(`\b`)
case '\t':
b.WriteString(`\t`)
case '\n':
b.WriteString(`\n`)
case '\f':
b.WriteString(`\f`)
case '\r':
b.WriteString(`\r`)
case '"':
b.WriteString(`\"`)
case '\\':
b.WriteString(`\\`)
default:
intRr := uint16(rr)
if intRr < 0x001F {
b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
} else {
b.WriteRune(rr)
}
}
}
return b.String()
}
func tomlValueStringRepresentation(v interface{}) (string, error) {
switch value := v.(type) {
case uint64:
return strconv.FormatUint(value, 10), nil
case int64:
return strconv.FormatInt(value, 10), nil
case float64:
// Ensure a round float does contain a decimal point. Otherwise feeding
// the output back to the parser would convert to an integer.
if math.Trunc(value) == value {
return strconv.FormatFloat(value, 'f', 1, 32), nil
}
return strconv.FormatFloat(value, 'f', -1, 32), nil
case string:
return "\"" + encodeTomlString(value) + "\"", nil
case []byte:
b, _ := v.([]byte)
return tomlValueStringRepresentation(string(b))
case bool:
if value {
return "true", nil
}
return "false", nil
case time.Time:
return value.Format(time.RFC3339), nil
case nil:
return "", nil
}
rv := reflect.ValueOf(v)
if rv.Kind() == reflect.Slice {
values := []string{}
for i := 0; i < rv.Len(); i++ {
item := rv.Index(i).Interface()
itemRepr, err := tomlValueStringRepresentation(item)
if err != nil {
return "", err
}
values = append(values, itemRepr)
}
return "[" + strings.Join(values, ",") + "]", nil
}
return "", fmt.Errorf("unsupported value type %T: %v", v, v)
}
func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) {
simpleValuesKeys := make([]string, 0)
complexValuesKeys := make([]string, 0)
for k := range t.values {
v := t.values[k]
switch v.(type) {
case *Tree, []*Tree:
complexValuesKeys = append(complexValuesKeys, k)
default:
simpleValuesKeys = append(simpleValuesKeys, k)
}
}
sort.Strings(simpleValuesKeys)
sort.Strings(complexValuesKeys)
for _, k := range simpleValuesKeys {
v, ok := t.values[k].(*tomlValue)
if !ok {
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
}
repr, err := tomlValueStringRepresentation(v.value)
if err != nil {
return bytesCount, err
}
writtenBytesCount, err := writeStrings(w, indent, k, " = ", repr, "\n")
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
}
}
for _, k := range complexValuesKeys {
v := t.values[k]
combinedKey := k
if keyspace != "" {
combinedKey = keyspace + "." + combinedKey
}
switch node := v.(type) {
// node has to be of those two types given how keys are sorted above
case *Tree:
writtenBytesCount, err := writeStrings(w, "\n", indent, "[", combinedKey, "]\n")
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
}
bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount)
if err != nil {
return bytesCount, err
}
case []*Tree:
for _, subTree := range node {
writtenBytesCount, err := writeStrings(w, "\n", indent, "[[", combinedKey, "]]\n")
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
}
bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount)
if err != nil {
return bytesCount, err
}
}
}
}
return bytesCount, nil
}
func writeStrings(w io.Writer, s ...string) (int, error) {
var n int
for i := range s {
b, err := io.WriteString(w, s[i])
n += b
if err != nil {
return n, err
}
}
return n, nil
}
// WriteTo encode the Tree as Toml and writes it to the writer w.
// Returns the number of bytes written in case of success, or an error if anything happened.
func (t *Tree) WriteTo(w io.Writer) (int64, error) {
return t.writeTo(w, "", "", 0)
}
// ToTomlString generates a human-readable representation of the current tree.
// Output spans multiple lines, and is suitable for ingest by a TOML parser.
// If the conversion cannot be performed, ToString returns a non-nil error.
func (t *Tree) ToTomlString() (string, error) {
var buf bytes.Buffer
_, err := t.WriteTo(&buf)
if err != nil {
return "", err
}
return buf.String(), nil
}
// String generates a human-readable representation of the current tree.
// Alias of ToString. Present to implement the fmt.Stringer interface.
func (t *Tree) String() string {
result, _ := t.ToTomlString()
return result
}
// ToMap recursively generates a representation of the tree using Go built-in structures.
// The following types are used:
//
// * bool
// * float64
// * int64
// * string
// * uint64
// * time.Time
// * map[string]interface{} (where interface{} is any of this list)
// * []interface{} (where interface{} is any of this list)
func (t *Tree) ToMap() map[string]interface{} {
result := map[string]interface{}{}
for k, v := range t.values {
switch node := v.(type) {
case []*Tree:
var array []interface{}
for _, item := range node {
array = append(array, item.ToMap())
}
result[k] = array
case *Tree:
result[k] = node.ToMap()
case *tomlValue:
result[k] = node.value
}
}
return result
}

20
vendor/github.com/siddontang/go/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2014 siddontang
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

27
vendor/github.com/siddontang/go/filelock/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2011 The LevelDB-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,17 @@
// Copyright 2012 The LevelDB-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
package filelock
import (
"fmt"
"io"
"runtime"
)
func Lock(name string) (io.Closer, error) {
return nil, fmt.Errorf("leveldb/db: file locking is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}

View File

@@ -0,0 +1,43 @@
// Copyright 2014 The LevelDB-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build solaris
package filelock
import (
"io"
"os"
"syscall"
)
// lockCloser hides all of an os.File's methods, except for Close.
type lockCloser struct {
f *os.File
}
func (l lockCloser) Close() error {
return l.f.Close()
}
func Lock(name string) (io.Closer, error) {
f, err := os.Create(name)
if err != nil {
return nil, err
}
spec := syscall.Flock_t{
Type: syscall.F_WRLCK,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0, // 0 means to lock the entire file.
Pid: int32(os.Getpid()),
}
if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &spec); err != nil {
f.Close()
return nil, err
}
return lockCloser{f}, nil
}

View File

@@ -0,0 +1,51 @@
// Copyright 2014 The LevelDB-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd
package filelock
import (
"io"
"os"
"syscall"
)
// lockCloser hides all of an os.File's methods, except for Close.
type lockCloser struct {
f *os.File
}
func (l lockCloser) Close() error {
return l.f.Close()
}
func Lock(name string) (io.Closer, error) {
f, err := os.Create(name)
if err != nil {
return nil, err
}
/*
Some people tell me FcntlFlock does not exist, so use flock here
*/
if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
f.Close()
return nil, err
}
// spec := syscall.Flock_t{
// Type: syscall.F_WRLCK,
// Whence: int16(os.SEEK_SET),
// Start: 0,
// Len: 0, // 0 means to lock the entire file.
// Pid: int32(os.Getpid()),
// }
// if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &spec); err != nil {
// f.Close()
// return nil, err
// }
return lockCloser{f}, nil
}

View File

@@ -0,0 +1,36 @@
// Copyright 2013 The LevelDB-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package filelock
import (
"io"
"syscall"
)
// lockCloser hides all of an syscall.Handle's methods, except for Close.
type lockCloser struct {
fd syscall.Handle
}
func (l lockCloser) Close() error {
return syscall.Close(l.fd)
}
func Lock(name string) (io.Closer, error) {
p, err := syscall.UTF16PtrFromString(name)
if err != nil {
return nil, err
}
fd, err := syscall.CreateFile(p,
syscall.GENERIC_READ|syscall.GENERIC_WRITE,
0, nil, syscall.CREATE_ALWAYS,
syscall.FILE_ATTRIBUTE_NORMAL,
0,
)
if err != nil {
return nil, err
}
return lockCloser{fd: fd}, nil
}

27
vendor/github.com/siddontang/go/hack/hack.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
package hack
import (
"reflect"
"unsafe"
)
// no copy to change slice to string
// use your own risk
func String(b []byte) (s string) {
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
pstring.Data = pbytes.Data
pstring.Len = pbytes.Len
return
}
// no copy to change string to slice
// use your own risk
func Slice(s string) (b []byte) {
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
pbytes.Data = pstring.Data
pbytes.Len = pstring.Len
pbytes.Cap = pstring.Len
return
}

39
vendor/github.com/siddontang/go/ioutil2/ioutil.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
// Copyright 2012, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ioutil2
import (
"io"
"io/ioutil"
"os"
"path"
)
// Write file to temp and atomically move when everything else succeeds.
func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error {
dir, name := path.Dir(filename), path.Base(filename)
f, err := ioutil.TempFile(dir, name)
if err != nil {
return err
}
n, err := f.Write(data)
f.Close()
if err == nil && n < len(data) {
err = io.ErrShortWrite
} else {
err = os.Chmod(f.Name(), perm)
}
if err != nil {
os.Remove(f.Name())
return err
}
return os.Rename(f.Name(), filename)
}
// Check file exists or not
func FileExists(name string) bool {
_, err := os.Stat(name)
return !os.IsNotExist(err)
}

View File

@@ -0,0 +1,69 @@
package ioutil2
import (
"errors"
"io"
)
var ErrExceedLimit = errors.New("write exceed limit")
func NewSectionWriter(w io.WriterAt, off int64, n int64) *SectionWriter {
return &SectionWriter{w, off, off, off + n}
}
type SectionWriter struct {
w io.WriterAt
base int64
off int64
limit int64
}
func (s *SectionWriter) Write(p []byte) (n int, err error) {
if s.off >= s.limit {
return 0, ErrExceedLimit
}
if max := s.limit - s.off; int64(len(p)) > max {
return 0, ErrExceedLimit
}
n, err = s.w.WriteAt(p, s.off)
s.off += int64(n)
return
}
var errWhence = errors.New("Seek: invalid whence")
var errOffset = errors.New("Seek: invalid offset")
func (s *SectionWriter) Seek(offset int64, whence int) (int64, error) {
switch whence {
default:
return 0, errWhence
case 0:
offset += s.base
case 1:
offset += s.off
case 2:
offset += s.limit
}
if offset < s.base {
return 0, errOffset
}
s.off = offset
return offset - s.base, nil
}
func (s *SectionWriter) WriteAt(p []byte, off int64) (n int, err error) {
if off < 0 || off >= s.limit-s.base {
return 0, errOffset
}
off += s.base
if max := s.limit - off; int64(len(p)) > max {
return 0, ErrExceedLimit
}
return s.w.WriteAt(p, off)
}
// Size returns the size of the section in bytes.
func (s *SectionWriter) Size() int64 { return s.limit - s.base }

67
vendor/github.com/siddontang/go/num/bytes.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
package num
import (
"encoding/binary"
)
//all are bigendian format
func BytesToUint16(b []byte) uint16 {
return binary.BigEndian.Uint16(b)
}
func Uint16ToBytes(u uint16) []byte {
buf := make([]byte, 2)
binary.BigEndian.PutUint16(buf, u)
return buf
}
func BytesToUint32(b []byte) uint32 {
return binary.BigEndian.Uint32(b)
}
func Uint32ToBytes(u uint32) []byte {
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, u)
return buf
}
func BytesToUint64(b []byte) uint64 {
return binary.BigEndian.Uint64(b)
}
func Uint64ToBytes(u uint64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, u)
return buf
}
func BytesToInt16(b []byte) int16 {
return int16(binary.BigEndian.Uint16(b))
}
func Int16ToBytes(u int16) []byte {
buf := make([]byte, 2)
binary.BigEndian.PutUint16(buf, uint16(u))
return buf
}
func BytesToInt32(b []byte) int32 {
return int32(binary.BigEndian.Uint32(b))
}
func Int32ToBytes(u int32) []byte {
buf := make([]byte, 4)
binary.BigEndian.PutUint32(buf, uint32(u))
return buf
}
func BytesToInt64(b []byte) int64 {
return int64(binary.BigEndian.Uint64(b))
}
func Int64ToBytes(u int64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(u))
return buf
}

161
vendor/github.com/siddontang/go/num/cmp.go generated vendored Normal file
View File

@@ -0,0 +1,161 @@
package num
func MinUint(a uint, b uint) uint {
if a > b {
return b
} else {
return a
}
}
func MaxUint(a uint, b uint) uint {
if a > b {
return a
} else {
return b
}
}
func MinInt(a int, b int) int {
if a > b {
return b
} else {
return a
}
}
func MaxInt(a int, b int) int {
if a > b {
return a
} else {
return b
}
}
func MinUint8(a uint8, b uint8) uint8 {
if a > b {
return b
} else {
return a
}
}
func MaxUint8(a uint8, b uint8) uint8 {
if a > b {
return a
} else {
return b
}
}
func MinInt8(a int8, b int8) int8 {
if a > b {
return b
} else {
return a
}
}
func MaxInt8(a int8, b int8) int8 {
if a > b {
return a
} else {
return b
}
}
func MinUint16(a uint16, b uint16) uint16 {
if a > b {
return b
} else {
return a
}
}
func MaxUint16(a uint16, b uint16) uint16 {
if a > b {
return a
} else {
return b
}
}
func MinInt16(a int16, b int16) int16 {
if a > b {
return b
} else {
return a
}
}
func MaxInt16(a int16, b int16) int16 {
if a > b {
return a
} else {
return b
}
}
func MinUint32(a uint32, b uint32) uint32 {
if a > b {
return b
} else {
return a
}
}
func MaxUint32(a uint32, b uint32) uint32 {
if a > b {
return a
} else {
return b
}
}
func MinInt32(a int32, b int32) int32 {
if a > b {
return b
} else {
return a
}
}
func MaxInt32(a int32, b int32) int32 {
if a > b {
return a
} else {
return b
}
}
func MinUint64(a uint64, b uint64) uint64 {
if a > b {
return b
} else {
return a
}
}
func MaxUint64(a uint64, b uint64) uint64 {
if a > b {
return a
} else {
return b
}
}
func MinInt64(a int64, b int64) int64 {
if a > b {
return b
} else {
return a
}
}
func MaxInt64(a int64, b int64) int64 {
if a > b {
return a
} else {
return b
}
}

157
vendor/github.com/siddontang/go/num/str.go generated vendored Normal file
View File

@@ -0,0 +1,157 @@
package num
import (
"strconv"
)
func ParseUint(s string) (uint, error) {
if v, err := strconv.ParseUint(s, 10, 0); err != nil {
return 0, err
} else {
return uint(v), nil
}
}
func ParseUint8(s string) (uint8, error) {
if v, err := strconv.ParseUint(s, 10, 8); err != nil {
return 0, err
} else {
return uint8(v), nil
}
}
func ParseUint16(s string) (uint16, error) {
if v, err := strconv.ParseUint(s, 10, 16); err != nil {
return 0, err
} else {
return uint16(v), nil
}
}
func ParseUint32(s string) (uint32, error) {
if v, err := strconv.ParseUint(s, 10, 32); err != nil {
return 0, err
} else {
return uint32(v), nil
}
}
func ParseUint64(s string) (uint64, error) {
return strconv.ParseUint(s, 10, 64)
}
func ParseInt(s string) (int, error) {
if v, err := strconv.ParseInt(s, 10, 0); err != nil {
return 0, err
} else {
return int(v), nil
}
}
func ParseInt8(s string) (int8, error) {
if v, err := strconv.ParseInt(s, 10, 8); err != nil {
return 0, err
} else {
return int8(v), nil
}
}
func ParseInt16(s string) (int16, error) {
if v, err := strconv.ParseInt(s, 10, 16); err != nil {
return 0, err
} else {
return int16(v), nil
}
}
func ParseInt32(s string) (int32, error) {
if v, err := strconv.ParseInt(s, 10, 32); err != nil {
return 0, err
} else {
return int32(v), nil
}
}
func ParseInt64(s string) (int64, error) {
return strconv.ParseInt(s, 10, 64)
}
func FormatInt(v int) string {
return strconv.FormatInt(int64(v), 10)
}
func FormatInt8(v int8) string {
return strconv.FormatInt(int64(v), 10)
}
func FormatInt16(v int16) string {
return strconv.FormatInt(int64(v), 10)
}
func FormatInt32(v int32) string {
return strconv.FormatInt(int64(v), 10)
}
func FormatInt64(v int64) string {
return strconv.FormatInt(int64(v), 10)
}
func FormatUint(v uint) string {
return strconv.FormatUint(uint64(v), 10)
}
func FormatUint8(v uint8) string {
return strconv.FormatUint(uint64(v), 10)
}
func FormatUint16(v uint16) string {
return strconv.FormatUint(uint64(v), 10)
}
func FormatUint32(v uint32) string {
return strconv.FormatUint(uint64(v), 10)
}
func FormatUint64(v uint64) string {
return strconv.FormatUint(uint64(v), 10)
}
func FormatIntToSlice(v int) []byte {
return strconv.AppendInt(nil, int64(v), 10)
}
func FormatInt8ToSlice(v int8) []byte {
return strconv.AppendInt(nil, int64(v), 10)
}
func FormatInt16ToSlice(v int16) []byte {
return strconv.AppendInt(nil, int64(v), 10)
}
func FormatInt32ToSlice(v int32) []byte {
return strconv.AppendInt(nil, int64(v), 10)
}
func FormatInt64ToSlice(v int64) []byte {
return strconv.AppendInt(nil, int64(v), 10)
}
func FormatUintToSlice(v uint) []byte {
return strconv.AppendUint(nil, uint64(v), 10)
}
func FormatUint8ToSlice(v uint8) []byte {
return strconv.AppendUint(nil, uint64(v), 10)
}
func FormatUint16ToSlice(v uint16) []byte {
return strconv.AppendUint(nil, uint64(v), 10)
}
func FormatUint32ToSlice(v uint32) []byte {
return strconv.AppendUint(nil, uint64(v), 10)
}
func FormatUint64ToSlice(v uint64) []byte {
return strconv.AppendUint(nil, uint64(v), 10)
}

27
vendor/github.com/siddontang/go/snappy/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

124
vendor/github.com/siddontang/go/snappy/decode.go generated vendored Normal file
View File

@@ -0,0 +1,124 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
)
// ErrCorrupt reports that the input is invalid.
var ErrCorrupt = errors.New("snappy: corrupt input")
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n == 0 {
return 0, 0, ErrCorrupt
}
if uint64(int(v)) != v {
return 0, 0, errors.New("snappy: decoded block is too large")
}
return int(v), n, nil
}
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if len(dst) < dLen {
dst = make([]byte, dLen)
}
var d, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint(src[s] >> 2)
switch {
case x < 60:
s += 1
case x == 60:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-1])
case x == 61:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-2]) | uint(src[s-1])<<8
case x == 62:
s += 4
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
case x == 63:
s += 5
if s > len(src) {
return nil, ErrCorrupt
}
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
}
length = int(x + 1)
if length <= 0 {
return nil, errors.New("snappy: unsupported literal length")
}
if length > len(dst)-d || length > len(src)-s {
return nil, ErrCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if s > len(src) {
return nil, ErrCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
case tagCopy2:
s += 3
if s > len(src) {
return nil, ErrCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(src[s-2]) | int(src[s-1])<<8
case tagCopy4:
return nil, errors.New("snappy: unsupported COPY_4 tag")
}
end := d + length
if offset > d || end > len(dst) {
return nil, ErrCorrupt
}
for ; d < end; d++ {
dst[d] = dst[d-offset]
}
}
if d != dLen {
return nil, ErrCorrupt
}
return dst[:d], nil
}

174
vendor/github.com/siddontang/go/snappy/encode.go generated vendored Normal file
View File

@@ -0,0 +1,174 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
)
// We limit how far copy back-references can go, the same as the C++ code.
const maxOffset = 1 << 15
// emitLiteral writes a literal chunk and returns the number of bytes written.
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
case n < 1<<16:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
case n < 1<<24:
dst[0] = 62<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
i = 4
case int64(n) < 1<<32:
dst[0] = 63<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
dst[3] = uint8(n >> 16)
dst[4] = uint8(n >> 24)
i = 5
default:
panic("snappy: source buffer is too long")
}
if copy(dst[i:], lit) != len(lit) {
panic("snappy: destination buffer is too short")
}
return i + len(lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
func emitCopy(dst []byte, offset, length int) int {
i := 0
for length > 0 {
x := length - 4
if 0 <= x && x < 1<<3 && offset < 1<<11 {
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
dst[i+1] = uint8(offset)
i += 2
break
}
x = length
if x > 1<<6 {
x = 1 << 6
}
dst[i+0] = uint8(x-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= x
}
return i
}
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
// It is valid to pass a nil dst.
func Encode(dst, src []byte) ([]byte, error) {
if n := MaxEncodedLen(len(src)); len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
// Return early if src is short.
if len(src) <= 4 {
if len(src) != 0 {
d += emitLiteral(dst[d:], src)
}
return dst[:d], nil
}
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
const maxTableSize = 1 << 14
shift, tableSize := uint(32-8), 1<<8
for tableSize < maxTableSize && tableSize < len(src) {
shift--
tableSize *= 2
}
var table [maxTableSize]int
// Iterate over the source bytes.
var (
s int // The iterator position.
t int // The last position with the same hash as s.
lit int // The start position of any pending literal bytes.
)
for s+3 < len(src) {
// Update the hash table.
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
p := &table[(h*0x1e35a7bd)>>shift]
// We need to to store values in [-1, inf) in table. To save
// some initialization time, (re)use the table's zero value
// and shift the values against this zero: add 1 on writes,
// subtract 1 on reads.
t, *p = *p-1, s+1
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
s++
continue
}
// Otherwise, we have a match. First, emit any pending literal bytes.
if lit != s {
d += emitLiteral(dst[d:], src[lit:s])
}
// Extend the match to be as long as possible.
s0 := s
s, t = s+4, t+4
for s < len(src) && src[s] == src[t] {
s++
t++
}
// Emit the copied bytes.
d += emitCopy(dst[d:], s-t, s-s0)
lit = s
}
// Emit any final pending literal bytes and return.
if lit != len(src) {
d += emitLiteral(dst[d:], src[lit:])
}
return dst[:d], nil
}
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
func MaxEncodedLen(srcLen int) int {
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
return 32 + srcLen + srcLen/6
}

38
vendor/github.com/siddontang/go/snappy/snappy.go generated vendored Normal file
View File

@@ -0,0 +1,38 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the snappy block-based compression format.
// It aims for very high speeds and reasonable compression.
//
// The C++ snappy implementation is at http://code.google.com/p/snappy/
package snappy
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer supported.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)

146
vendor/github.com/siddontang/go/sync2/atomic.go generated vendored Normal file
View File

@@ -0,0 +1,146 @@
// Copyright 2013, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sync2
import (
"sync"
"sync/atomic"
"time"
)
type AtomicInt32 int32
func (i *AtomicInt32) Add(n int32) int32 {
return atomic.AddInt32((*int32)(i), n)
}
func (i *AtomicInt32) Set(n int32) {
atomic.StoreInt32((*int32)(i), n)
}
func (i *AtomicInt32) Get() int32 {
return atomic.LoadInt32((*int32)(i))
}
func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) {
return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval)
}
type AtomicUint32 uint32
func (i *AtomicUint32) Add(n uint32) uint32 {
return atomic.AddUint32((*uint32)(i), n)
}
func (i *AtomicUint32) Set(n uint32) {
atomic.StoreUint32((*uint32)(i), n)
}
func (i *AtomicUint32) Get() uint32 {
return atomic.LoadUint32((*uint32)(i))
}
func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) {
return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval)
}
type AtomicInt64 int64
func (i *AtomicInt64) Add(n int64) int64 {
return atomic.AddInt64((*int64)(i), n)
}
func (i *AtomicInt64) Set(n int64) {
atomic.StoreInt64((*int64)(i), n)
}
func (i *AtomicInt64) Get() int64 {
return atomic.LoadInt64((*int64)(i))
}
func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) {
return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval)
}
type AtomicUint64 uint64
func (i *AtomicUint64) Add(n uint64) uint64 {
return atomic.AddUint64((*uint64)(i), n)
}
func (i *AtomicUint64) Set(n uint64) {
atomic.StoreUint64((*uint64)(i), n)
}
func (i *AtomicUint64) Get() uint64 {
return atomic.LoadUint64((*uint64)(i))
}
func (i *AtomicUint64) CompareAndSwap(oldval, newval uint64) (swapped bool) {
return atomic.CompareAndSwapUint64((*uint64)(i), oldval, newval)
}
type AtomicDuration int64
func (d *AtomicDuration) Add(duration time.Duration) time.Duration {
return time.Duration(atomic.AddInt64((*int64)(d), int64(duration)))
}
func (d *AtomicDuration) Set(duration time.Duration) {
atomic.StoreInt64((*int64)(d), int64(duration))
}
func (d *AtomicDuration) Get() time.Duration {
return time.Duration(atomic.LoadInt64((*int64)(d)))
}
func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) {
return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval))
}
// AtomicString gives you atomic-style APIs for string, but
// it's only a convenience wrapper that uses a mutex. So, it's
// not as efficient as the rest of the atomic types.
type AtomicString struct {
mu sync.Mutex
str string
}
func (s *AtomicString) Set(str string) {
s.mu.Lock()
s.str = str
s.mu.Unlock()
}
func (s *AtomicString) Get() string {
s.mu.Lock()
str := s.str
s.mu.Unlock()
return str
}
func (s *AtomicString) CompareAndSwap(oldval, newval string) (swapped bool) {
s.mu.Lock()
defer s.mu.Unlock()
if s.str == oldval {
s.str = newval
return true
}
return false
}
type AtomicBool int32
func (b *AtomicBool) Set(v bool) {
if v {
atomic.StoreInt32((*int32)(b), 1)
} else {
atomic.StoreInt32((*int32)(b), 0)
}
}
func (b *AtomicBool) Get() bool {
return atomic.LoadInt32((*int32)(b)) == 1
}

65
vendor/github.com/siddontang/go/sync2/semaphore.go generated vendored Normal file
View File

@@ -0,0 +1,65 @@
package sync2
import (
"sync"
"sync/atomic"
"time"
)
func NewSemaphore(initialCount int) *Semaphore {
res := &Semaphore{
counter: int64(initialCount),
}
res.cond.L = &res.lock
return res
}
type Semaphore struct {
lock sync.Mutex
cond sync.Cond
counter int64
}
func (s *Semaphore) Release() {
s.lock.Lock()
s.counter += 1
if s.counter >= 0 {
s.cond.Signal()
}
s.lock.Unlock()
}
func (s *Semaphore) Acquire() {
s.lock.Lock()
for s.counter < 1 {
s.cond.Wait()
}
s.counter -= 1
s.lock.Unlock()
}
func (s *Semaphore) AcquireTimeout(timeout time.Duration) bool {
done := make(chan bool, 1)
// Gate used to communicate between the threads and decide what the result
// is. If the main thread decides, we have timed out, otherwise we succeed.
decided := new(int32)
go func() {
s.Acquire()
if atomic.SwapInt32(decided, 1) == 0 {
done <- true
} else {
// If we already decided the result, and this thread did not win
s.Release()
}
}()
select {
case <-done:
return true
case <-time.NewTimer(timeout).C:
if atomic.SwapInt32(decided, 1) == 1 {
// The other thread already decided the result
return true
}
return false
}
}

21
vendor/github.com/siddontang/ledisdb/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 siddontang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,170 @@
# LedisDB configuration
# Server listen address
addr = "0.0.0.0:6380"
# Unix socket permissions, 755 by default.
# Ignored for tcp socket.
addr_unixsocketperm = "0770"
# Server http listen address, set empty to disable
http_addr = "0.0.0.0:11181"
# Data store path, all ledisdb's data will be saved here
data_dir = "/datastore"
# Set the number of databases. You can use `select dbindex` to choose a db.
# dbindex must be in [0, databases - 1].
# Default databases is 16, maximum is 10240 now.
databases = 16
# Log server command, set empty to disable
access_log = ""
# Set slaveof to enable replication from master, empty, no replication
# Any write operations except flushall and replication will be disabled in slave mode.
slaveof = ""
# Readonly mode, slave server is always readonly even readonly = false
# for readonly mode, only replication and flushall can write
readonly = false
# Choose which backend storage to use, now support:
#
# leveldb
# rocksdb
# goleveldb
# memory
#
db_name = "leveldb"
# If not set, use data_dir/"db_name"_data
db_path = ""
# Sync commit to disk if possible
# 0: no sync
# 1: sync every second
# 2: sync every commit
db_sync_commit = 0
# enable replication or not
use_replication = false
# set connection buffer, you can increase them appropriately
# more size, more memory used
conn_read_buffer_size = 10240
conn_write_buffer_size = 10240
# if connection receives no data after n seconds, it may be dead, close
# 0 to disable and not check
conn_keepalive_interval = 0
# checking TTL (time to live) data every n seconds
# if you set big, the expired data may not be deleted immediately
ttl_check_interval = 1
[leveldb]
# for leveldb and goleveldb
compression = false
block_size = 32768
write_buffer_size = 67108864
cache_size = 524288000
max_open_files = 1024
max_file_size = 33554432
[rocksdb]
# rocksdb has many many configurations,
# we only list little now, but may add more later.
# good luck!
# 0:no, 1:snappy, 2:zlib, 3:bz2, 4:lz4, 5:lz4hc
compression = 0
block_size = 65536
write_buffer_size = 134217728
cache_size = 1073741824
max_open_files = 1024
max_write_buffer_num = 6
min_write_buffer_number_to_merge = 2
num_levels = 7
level0_file_num_compaction_trigger = 8
level0_slowdown_writes_trigger = 16
level0_stop_writes_trigger = 64
target_file_size_base = 67108864
target_file_size_multiplier = 1
max_bytes_for_level_base = 536870912
max_bytes_for_level_multiplier = 8
disable_auto_compactions = false
disable_data_sync = false
use_fsync = false
background_theads = 16
high_priority_background_threads = 1
max_background_compactions = 15
max_background_flushes = 1
allow_os_buffer = true
enable_statistics = false
stats_dump_period_sec = 3600
# dangerous to set true, write may got lost after a crash
# you can set true if replication opened, we may recover from replication log,
# but it is still not a easy work.
disable_wal = false
max_manifest_file_size = 20971520
[lmdb]
map_size = 524288000
nosync = true
[replication]
# Path to store replication information(write ahead log, commit log, etc.)
# if not set, use data_dir/rpl
path = ""
# If sync is true, the new log must be sent to some slaves, and then commit.
# It will reduce performance but have better high availability.
sync = false
# If sync is true, wait at last wait_sync_time milliseconds for slave syncing this log
wait_sync_time = 500
# If sync is true, wait at most min(wait_max_slave_acks, (n + 1) / 2) to promise syncing ok.
# n is slave number
# If 0, wait (n + 1) / 2 acks.
wait_max_slave_acks = 2
# store name: file, goleveldb
# change in runtime is very dangerous
store_name = "file"
# Expire write ahead logs after the given days
expired_log_days = 7
# for file store, if 0, use default 256MB, max is 1G
max_log_file_size = 0
# for file store, if 0, use default 50
max_log_file_num = 0
# for file store, use mmap for file read and write
use_mmap = true
# Sync log to disk if possible
# 0: no sync
# 1: sync every second
# 2: sync every commit
sync_log = 0
# Compress the log or not
compression = false
[snapshot]
# Path to store snapshot dump file
# if not set, use data_dir/snapshot
# snapshot file name format is dmp-2006-01-02T15:04:05.999999999
path = ""
# Reserve newest max_num snapshot dump files
max_num = 1
[tls]
enabled = false
certificate = "test.crt"
key = "test.key"

316
vendor/github.com/siddontang/ledisdb/config/config.go generated vendored Normal file
View File

@@ -0,0 +1,316 @@
package config
import (
"bytes"
"errors"
"io"
"io/ioutil"
"sync"
"fmt"
"github.com/pelletier/go-toml"
"github.com/siddontang/go/ioutil2"
)
var (
ErrNoConfigFile = errors.New("Running without a config file")
)
const (
DefaultAddr string = "127.0.0.1:6380"
DefaultDBName string = "goleveldb"
DefaultDataDir string = "./var"
KB int = 1024
MB int = KB * 1024
GB int = MB * 1024
)
type LevelDBConfig struct {
Compression bool `toml:"compression"`
BlockSize int `toml:"block_size"`
WriteBufferSize int `toml:"write_buffer_size"`
CacheSize int `toml:"cache_size"`
MaxOpenFiles int `toml:"max_open_files"`
MaxFileSize int `toml:"max_file_size"`
}
type RocksDBConfig struct {
Compression int `toml:"compression"`
BlockSize int `toml:"block_size"`
WriteBufferSize int `toml:"write_buffer_size"`
CacheSize int `toml:"cache_size"`
MaxOpenFiles int `toml:"max_open_files"`
MaxWriteBufferNum int `toml:"max_write_buffer_num"`
MinWriteBufferNumberToMerge int `toml:"min_write_buffer_number_to_merge"`
NumLevels int `toml:"num_levels"`
Level0FileNumCompactionTrigger int `toml:"level0_file_num_compaction_trigger"`
Level0SlowdownWritesTrigger int `toml:"level0_slowdown_writes_trigger"`
Level0StopWritesTrigger int `toml:"level0_stop_writes_trigger"`
TargetFileSizeBase int `toml:"target_file_size_base"`
TargetFileSizeMultiplier int `toml:"target_file_size_multiplier"`
MaxBytesForLevelBase int `toml:"max_bytes_for_level_base"`
MaxBytesForLevelMultiplier int `toml:"max_bytes_for_level_multiplier"`
DisableAutoCompactions bool `toml:"disable_auto_compactions"`
UseFsync bool `toml:"use_fsync"`
MaxBackgroundCompactions int `toml:"max_background_compactions"`
MaxBackgroundFlushes int `toml:"max_background_flushes"`
EnableStatistics bool `toml:"enable_statistics"`
StatsDumpPeriodSec int `toml:"stats_dump_period_sec"`
BackgroundThreads int `toml:"background_theads"`
HighPriorityBackgroundThreads int `toml:"high_priority_background_threads"`
DisableWAL bool `toml:"disable_wal"`
MaxManifestFileSize int `toml:"max_manifest_file_size"`
}
type LMDBConfig struct {
MapSize int `toml:"map_size"`
NoSync bool `toml:"nosync"`
}
type ReplicationConfig struct {
Path string `toml:"path"`
Sync bool `toml:"sync"`
WaitSyncTime int `toml:"wait_sync_time"`
WaitMaxSlaveAcks int `toml:"wait_max_slave_acks"`
ExpiredLogDays int `toml:"expired_log_days"`
StoreName string `toml:"store_name"`
MaxLogFileSize int64 `toml:"max_log_file_size"`
MaxLogFileNum int `toml:"max_log_file_num"`
SyncLog int `toml:"sync_log"`
Compression bool `toml:"compression"`
UseMmap bool `toml:"use_mmap"`
MasterPassword string `toml:"master_password"`
}
type SnapshotConfig struct {
Path string `toml:"path"`
MaxNum int `toml:"max_num"`
}
type TLS struct {
Enabled bool `toml:"enabled"`
Certificate string `toml:"certificate"`
Key string `toml:"key"`
}
type AuthMethod func(c *Config, password string) bool
type Config struct {
m sync.RWMutex `toml:"-"`
AuthPassword string `toml:"auth_password"`
//AuthMethod custom authentication method
AuthMethod AuthMethod `toml:"-"`
FileName string `toml:"-"`
// Addr can be empty to assign a local address dynamically
Addr string `toml:"addr"`
AddrUnixSocketPerm string `toml:"addr_unixsocketperm"`
HttpAddr string `toml:"http_addr"`
SlaveOf string `toml:"slaveof"`
Readonly bool `toml:"readonly"`
DataDir string `toml:"data_dir"`
Databases int `toml:"databases"`
DBName string `toml:"db_name"`
DBPath string `toml:"db_path"`
DBSyncCommit int `toml:"db_sync_commit"`
LevelDB LevelDBConfig `toml:"leveldb"`
RocksDB RocksDBConfig `toml:"rocksdb"`
LMDB LMDBConfig `toml:"lmdb"`
AccessLog string `toml:"access_log"`
UseReplication bool `toml:"use_replication"`
Replication ReplicationConfig `toml:"replication"`
Snapshot SnapshotConfig `toml:"snapshot"`
ConnReadBufferSize int `toml:"conn_read_buffer_size"`
ConnWriteBufferSize int `toml:"conn_write_buffer_size"`
ConnKeepaliveInterval int `toml:"conn_keepalive_interval"`
TTLCheckInterval int `toml:"ttl_check_interval"`
//tls config
TLS TLS `toml:"tls"`
}
func NewConfigWithFile(fileName string) (*Config, error) {
data, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
cfg, err := NewConfigWithData(data)
if err != nil {
return nil, err
}
cfg.FileName = fileName
return cfg, nil
}
func NewConfigWithData(data []byte) (*Config, error) {
cfg := NewConfigDefault()
if err := toml.Unmarshal(data, cfg); err != nil {
return nil, fmt.Errorf("newConfigwithData: unmarashal: %s", err)
}
cfg.adjust()
return cfg, nil
}
func NewConfigDefault() *Config {
cfg := new(Config)
cfg.Addr = DefaultAddr
cfg.HttpAddr = ""
cfg.DataDir = DefaultDataDir
cfg.DBName = DefaultDBName
cfg.SlaveOf = ""
cfg.Readonly = false
// Disable Auth by default, by setting password to blank
cfg.AuthPassword = ""
// default databases number
cfg.Databases = 16
// disable access log
cfg.AccessLog = ""
cfg.LMDB.MapSize = 20 * MB
cfg.LMDB.NoSync = true
cfg.UseReplication = false
cfg.Replication.WaitSyncTime = 500
cfg.Replication.Compression = true
cfg.Replication.WaitMaxSlaveAcks = 2
cfg.Replication.SyncLog = 0
cfg.Replication.UseMmap = true
cfg.Snapshot.MaxNum = 1
cfg.RocksDB.EnableStatistics = false
cfg.RocksDB.UseFsync = false
cfg.RocksDB.DisableAutoCompactions = false
cfg.RocksDB.DisableWAL = false
cfg.adjust()
return cfg
}
func getDefault(d int, s int) int {
if s <= 0 {
return d
}
return s
}
func (cfg *Config) adjust() {
cfg.LevelDB.adjust()
cfg.RocksDB.adjust()
cfg.Replication.ExpiredLogDays = getDefault(7, cfg.Replication.ExpiredLogDays)
cfg.Replication.MaxLogFileNum = getDefault(50, cfg.Replication.MaxLogFileNum)
cfg.ConnReadBufferSize = getDefault(4*KB, cfg.ConnReadBufferSize)
cfg.ConnWriteBufferSize = getDefault(4*KB, cfg.ConnWriteBufferSize)
cfg.TTLCheckInterval = getDefault(1, cfg.TTLCheckInterval)
cfg.Databases = getDefault(16, cfg.Databases)
}
func (cfg *LevelDBConfig) adjust() {
cfg.CacheSize = getDefault(4*MB, cfg.CacheSize)
cfg.BlockSize = getDefault(4*KB, cfg.BlockSize)
cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize)
cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles)
cfg.MaxFileSize = getDefault(32*MB, cfg.MaxFileSize)
}
func (cfg *RocksDBConfig) adjust() {
cfg.CacheSize = getDefault(4*MB, cfg.CacheSize)
cfg.BlockSize = getDefault(4*KB, cfg.BlockSize)
cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize)
cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles)
cfg.MaxWriteBufferNum = getDefault(2, cfg.MaxWriteBufferNum)
cfg.MinWriteBufferNumberToMerge = getDefault(1, cfg.MinWriteBufferNumberToMerge)
cfg.NumLevels = getDefault(7, cfg.NumLevels)
cfg.Level0FileNumCompactionTrigger = getDefault(4, cfg.Level0FileNumCompactionTrigger)
cfg.Level0SlowdownWritesTrigger = getDefault(16, cfg.Level0SlowdownWritesTrigger)
cfg.Level0StopWritesTrigger = getDefault(64, cfg.Level0StopWritesTrigger)
cfg.TargetFileSizeBase = getDefault(32*MB, cfg.TargetFileSizeBase)
cfg.TargetFileSizeMultiplier = getDefault(1, cfg.TargetFileSizeMultiplier)
cfg.MaxBytesForLevelBase = getDefault(32*MB, cfg.MaxBytesForLevelBase)
cfg.MaxBytesForLevelMultiplier = getDefault(1, cfg.MaxBytesForLevelMultiplier)
cfg.MaxBackgroundCompactions = getDefault(1, cfg.MaxBackgroundCompactions)
cfg.MaxBackgroundFlushes = getDefault(1, cfg.MaxBackgroundFlushes)
cfg.StatsDumpPeriodSec = getDefault(3600, cfg.StatsDumpPeriodSec)
cfg.BackgroundThreads = getDefault(2, cfg.BackgroundThreads)
cfg.HighPriorityBackgroundThreads = getDefault(1, cfg.HighPriorityBackgroundThreads)
cfg.MaxManifestFileSize = getDefault(20*MB, cfg.MaxManifestFileSize)
}
func (cfg *Config) Dump(w io.Writer) error {
data, err := toml.Marshal(*cfg)
if err != nil {
return err
}
if _, err := w.Write(data); err != nil {
return err
}
return nil
}
func (cfg *Config) DumpFile(fileName string) error {
var b bytes.Buffer
if err := cfg.Dump(&b); err != nil {
return err
}
return ioutil2.WriteFileAtomic(fileName, b.Bytes(), 0644)
}
func (cfg *Config) Rewrite() error {
if len(cfg.FileName) == 0 {
return ErrNoConfigFile
}
return cfg.DumpFile(cfg.FileName)
}
func (cfg *Config) GetReadonly() bool {
cfg.m.RLock()
b := cfg.Readonly
cfg.m.RUnlock()
return b
}
func (cfg *Config) SetReadonly(b bool) {
cfg.m.Lock()
cfg.Readonly = b
cfg.m.Unlock()
}

170
vendor/github.com/siddontang/ledisdb/config/config.toml generated vendored Normal file
View File

@@ -0,0 +1,170 @@
# LedisDB configuration
# Server listen address
addr = "127.0.0.1:6380"
# Unix socket permissions, 755 by default.
# Ignored for tcp socket.
addr_unixsocketperm = "0770"
# Server http listen address, set empty to disable
http_addr = "127.0.0.1:11181"
# Data store path, all ledisdb's data will be saved here
data_dir = "/tmp/ledis_server"
# Set the number of databases. You can use `select dbindex` to choose a db.
# dbindex must be in [0, databases - 1].
# Default databases is 16, maximum is 10240 now.
databases = 16
# Log server command, set empty to disable
access_log = ""
# Set slaveof to enable replication from master, empty, no replication
# Any write operations except flushall and replication will be disabled in slave mode.
slaveof = ""
# Readonly mode, slave server is always readonly even readonly = false
# for readonly mode, only replication and flushall can write
readonly = false
# Choose which backend storage to use, now support:
#
# leveldb
# rocksdb
# goleveldb
# memory
#
db_name = "leveldb"
# If not set, use data_dir/"db_name"_data
db_path = ""
# Sync commit to disk if possible
# 0: no sync
# 1: sync every second
# 2: sync every commit
db_sync_commit = 0
# enable replication or not
use_replication = false
# set connection buffer, you can increase them appropriately
# more size, more memory used
conn_read_buffer_size = 10240
conn_write_buffer_size = 10240
# if connection receives no data after n seconds, it may be dead, close
# 0 to disable and not check
conn_keepalive_interval = 0
# checking TTL (time to live) data every n seconds
# if you set big, the expired data may not be deleted immediately
ttl_check_interval = 1
[leveldb]
# for leveldb and goleveldb
compression = false
block_size = 32768
write_buffer_size = 67108864
cache_size = 524288000
max_open_files = 1024
max_file_size = 33554432
[rocksdb]
# rocksdb has many many configurations,
# we only list little now, but may add more later.
# good luck!
# 0:no, 1:snappy, 2:zlib, 3:bz2, 4:lz4, 5:lz4hc
compression = 0
block_size = 65536
write_buffer_size = 134217728
cache_size = 1073741824
max_open_files = 1024
max_write_buffer_num = 6
min_write_buffer_number_to_merge = 2
num_levels = 7
level0_file_num_compaction_trigger = 8
level0_slowdown_writes_trigger = 16
level0_stop_writes_trigger = 64
target_file_size_base = 67108864
target_file_size_multiplier = 1
max_bytes_for_level_base = 536870912
max_bytes_for_level_multiplier = 8
disable_auto_compactions = false
disable_data_sync = false
use_fsync = false
background_theads = 16
high_priority_background_threads = 1
max_background_compactions = 15
max_background_flushes = 1
allow_os_buffer = true
enable_statistics = false
stats_dump_period_sec = 3600
# dangerous to set true, write may got lost after a crash
# you can set true if replication opened, we may recover from replication log,
# but it is still not a easy work.
disable_wal = false
max_manifest_file_size = 20971520
[lmdb]
map_size = 524288000
nosync = true
[replication]
# Path to store replication information(write ahead log, commit log, etc.)
# if not set, use data_dir/rpl
path = ""
# If sync is true, the new log must be sent to some slaves, and then commit.
# It will reduce performance but have better high availability.
sync = false
# If sync is true, wait at last wait_sync_time milliseconds for slave syncing this log
wait_sync_time = 500
# If sync is true, wait at most min(wait_max_slave_acks, (n + 1) / 2) to promise syncing ok.
# n is slave number
# If 0, wait (n + 1) / 2 acks.
wait_max_slave_acks = 2
# store name: file, goleveldb
# change in runtime is very dangerous
store_name = "file"
# Expire write ahead logs after the given days
expired_log_days = 7
# for file store, if 0, use default 256MB, max is 1G
max_log_file_size = 0
# for file store, if 0, use default 50
max_log_file_num = 0
# for file store, use mmap for file read and write
use_mmap = true
# Sync log to disk if possible
# 0: no sync
# 1: sync every second
# 2: sync every commit
sync_log = 0
# Compress the log or not
compression = false
[snapshot]
# Path to store snapshot dump file
# if not set, use data_dir/snapshot
# snapshot file name format is dmp-2006-01-02T15:04:05.999999999
path = ""
# Reserve newest max_num snapshot dump files
max_num = 1
[tls]
enabled = true
certificate = "test.crt"
key = "test.key"

139
vendor/github.com/siddontang/ledisdb/ledis/batch.go generated vendored Normal file
View File

@@ -0,0 +1,139 @@
package ledis
import (
"sync"
"github.com/siddontang/go/log"
"github.com/siddontang/ledisdb/rpl"
"github.com/siddontang/ledisdb/store"
)
type batch struct {
l *Ledis
*store.WriteBatch
sync.Locker
// tx *Tx
}
func (b *batch) Commit() error {
if b.l.cfg.GetReadonly() {
return ErrWriteInROnly
}
return b.l.handleCommit(b.WriteBatch, b.WriteBatch)
// if b.tx == nil {
// return b.l.handleCommit(b.WriteBatch, b.WriteBatch)
// } else {
// if b.l.r != nil {
// if err := b.tx.data.Append(b.WriteBatch.BatchData()); err != nil {
// return err
// }
// }
// return b.WriteBatch.Commit()
// }
}
func (b *batch) Lock() {
b.Locker.Lock()
}
func (b *batch) Unlock() {
b.WriteBatch.Rollback()
b.Locker.Unlock()
}
func (b *batch) Put(key []byte, value []byte) {
b.WriteBatch.Put(key, value)
}
func (b *batch) Delete(key []byte) {
b.WriteBatch.Delete(key)
}
type dbBatchLocker struct {
l *sync.Mutex
wrLock *sync.RWMutex
}
func (l *dbBatchLocker) Lock() {
l.wrLock.RLock()
l.l.Lock()
}
func (l *dbBatchLocker) Unlock() {
l.l.Unlock()
l.wrLock.RUnlock()
}
// type txBatchLocker struct {
// }
// func (l *txBatchLocker) Lock() {}
// func (l *txBatchLocker) Unlock() {}
// type multiBatchLocker struct {
// }
// func (l *multiBatchLocker) Lock() {}
// func (l *multiBatchLocker) Unlock() {}
func (l *Ledis) newBatch(wb *store.WriteBatch, locker sync.Locker) *batch {
b := new(batch)
b.l = l
b.WriteBatch = wb
b.Locker = locker
return b
}
type commiter interface {
Commit() error
}
type commitDataGetter interface {
Data() []byte
}
func (l *Ledis) handleCommit(g commitDataGetter, c commiter) error {
l.commitLock.Lock()
var err error
if l.r != nil {
var rl *rpl.Log
if rl, err = l.r.Log(g.Data()); err != nil {
l.commitLock.Unlock()
log.Fatalf("write wal error %s", err.Error())
return err
}
l.propagate(rl)
if err = c.Commit(); err != nil {
l.commitLock.Unlock()
log.Fatalf("commit error %s", err.Error())
l.noticeReplication()
return err
}
if err = l.r.UpdateCommitID(rl.ID); err != nil {
l.commitLock.Unlock()
log.Fatalf("update commit id error %s", err.Error())
l.noticeReplication()
return err
}
} else {
err = c.Commit()
}
l.commitLock.Unlock()
return err
}

150
vendor/github.com/siddontang/ledisdb/ledis/const.go generated vendored Normal file
View File

@@ -0,0 +1,150 @@
package ledis
import (
"errors"
)
// Version is for version
const Version = "0.5"
// DataType is defined for the different types
type DataType byte
// for out use
const (
KV DataType = iota
LIST
HASH
SET
ZSET
)
func (d DataType) String() string {
switch d {
case KV:
return KVName
case LIST:
return ListName
case HASH:
return HashName
case SET:
return SetName
case ZSET:
return ZSetName
default:
return "unknown"
}
}
// For different type name
const (
KVName = "KV"
ListName = "LIST"
HashName = "HASH"
SetName = "SET"
ZSetName = "ZSET"
)
// for backend store
const (
NoneType byte = 0
KVType byte = 1
HashType byte = 2
HSizeType byte = 3
ListType byte = 4
LMetaType byte = 5
ZSetType byte = 6
ZSizeType byte = 7
ZScoreType byte = 8
// BitType byte = 9
// BitMetaType byte = 10
SetType byte = 11
SSizeType byte = 12
maxDataType byte = 100
/*
I make a big mistake about TTL time key format and have to use a new one (change 101 to 103).
You must run the ledis-upgrade-ttl to upgrade db.
*/
ObsoleteExpTimeType byte = 101
ExpMetaType byte = 102
ExpTimeType byte = 103
MetaType byte = 201
)
// TypeName is the map of type -> name
var TypeName = map[byte]string{
KVType: "kv",
HashType: "hash",
HSizeType: "hsize",
ListType: "list",
LMetaType: "lmeta",
ZSetType: "zset",
ZSizeType: "zsize",
ZScoreType: "zscore",
// BitType: "bit",
// BitMetaType: "bitmeta",
SetType: "set",
SSizeType: "ssize",
ExpTimeType: "exptime",
ExpMetaType: "expmeta",
}
const (
defaultScanCount int = 10
)
var (
errKeySize = errors.New("invalid key size")
errValueSize = errors.New("invalid value size")
errHashFieldSize = errors.New("invalid hash field size")
errSetMemberSize = errors.New("invalid set member size")
errZSetMemberSize = errors.New("invalid zset member size")
errExpireValue = errors.New("invalid expire value")
errListIndex = errors.New("invalid list index")
)
// For different const size configuration
const (
// max allowed databases
MaxDatabases int = 10240
// max key size
MaxKeySize int = 1024
// max hash field size
MaxHashFieldSize int = 1024
// max zset member size
MaxZSetMemberSize int = 1024
// max set member size
MaxSetMemberSize int = 1024
// max value size
MaxValueSize int = 1024 * 1024 * 1024
)
// For different common errors
var (
ErrScoreMiss = errors.New("zset score miss")
ErrWriteInROnly = errors.New("write not support in readonly mode")
ErrRplInRDWR = errors.New("replication not support in read write mode")
ErrRplNotSupport = errors.New("replication not support")
)
// const (
// DBAutoCommit uint8 = 0x0
// DBInTransaction uint8 = 0x1
// DBInMulti uint8 = 0x2
// )
// For bit operation
const (
BitAND = "and"
BitOR = "or"
BitXOR = "xor"
BitNot = "not"
)

58
vendor/github.com/siddontang/ledisdb/ledis/doc.go generated vendored Normal file
View File

@@ -0,0 +1,58 @@
// Package ledis is a high performance embedded NoSQL.
//
// Ledis supports various data structure like kv, list, hash and zset like redis.
//
// Other features include replication, data with a limited time-to-live.
//
// Usage
//
// First create a ledis instance before use:
//
// l := ledis.Open(cfg)
//
// cfg is a Config instance which contains configuration for ledis use,
// like DataDir (root directory for ledis working to store data).
//
// After you create a ledis instance, you can select a DB to store you data:
//
// db, _ := l.Select(0)
//
// DB must be selected by a index, ledis supports only 16 databases, so the index range is [0-15].
//
// KV
//
// KV is the most basic ledis type like any other key-value database.
//
// err := db.Set(key, value)
// value, err := db.Get(key)
//
// List
//
// List is simply lists of values, sorted by insertion order.
// You can push or pop value on the list head (left) or tail (right).
//
// err := db.LPush(key, value1)
// err := db.RPush(key, value2)
// value1, err := db.LPop(key)
// value2, err := db.RPop(key)
//
// Hash
//
// Hash is a map between fields and values.
//
// n, err := db.HSet(key, field1, value1)
// n, err := db.HSet(key, field2, value2)
// value1, err := db.HGet(key, field1)
// value2, err := db.HGet(key, field2)
//
// ZSet
//
// ZSet is a sorted collections of values.
// Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score.
// Members are unique, but score may be same.
//
// n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2})
// ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1)
//
//
package ledis

220
vendor/github.com/siddontang/ledisdb/ledis/dump.go generated vendored Normal file
View File

@@ -0,0 +1,220 @@
package ledis
import (
"bufio"
"bytes"
"encoding/binary"
"io"
"os"
"github.com/siddontang/go/snappy"
"github.com/siddontang/ledisdb/store"
)
// DumpHead is the head of a dump.
type DumpHead struct {
CommitID uint64
}
// Read reads meta from the Reader.
func (h *DumpHead) Read(r io.Reader) error {
return binary.Read(r, binary.BigEndian, &h.CommitID)
}
// Write writes meta to the Writer
func (h *DumpHead) Write(w io.Writer) error {
return binary.Write(w, binary.BigEndian, h.CommitID)
}
// DumpFile dumps data to the file
func (l *Ledis) DumpFile(path string) error {
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
return l.Dump(f)
}
// Dump dumps data to the Writer.
func (l *Ledis) Dump(w io.Writer) error {
var err error
var commitID uint64
var snap *store.Snapshot
l.wLock.Lock()
if l.r != nil {
if commitID, err = l.r.LastCommitID(); err != nil {
l.wLock.Unlock()
return err
}
}
if snap, err = l.ldb.NewSnapshot(); err != nil {
l.wLock.Unlock()
return err
}
defer snap.Close()
l.wLock.Unlock()
wb := bufio.NewWriterSize(w, 4096)
h := &DumpHead{commitID}
if err = h.Write(wb); err != nil {
return err
}
it := snap.NewIterator()
defer it.Close()
it.SeekToFirst()
compressBuf := make([]byte, 4096)
var key []byte
var value []byte
for ; it.Valid(); it.Next() {
key = it.RawKey()
value = it.RawValue()
if key, err = snappy.Encode(compressBuf, key); err != nil {
return err
}
if err = binary.Write(wb, binary.BigEndian, uint16(len(key))); err != nil {
return err
}
if _, err = wb.Write(key); err != nil {
return err
}
if value, err = snappy.Encode(compressBuf, value); err != nil {
return err
}
if err = binary.Write(wb, binary.BigEndian, uint32(len(value))); err != nil {
return err
}
if _, err = wb.Write(value); err != nil {
return err
}
}
if err = wb.Flush(); err != nil {
return err
}
compressBuf = nil
return nil
}
// LoadDumpFile clears all data and loads dump file to db
func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return l.LoadDump(f)
}
// LoadDump clears all data and loads dump file to db
func (l *Ledis) LoadDump(r io.Reader) (*DumpHead, error) {
l.wLock.Lock()
defer l.wLock.Unlock()
var err error
if err = l.flushAll(); err != nil {
return nil, err
}
rb := bufio.NewReaderSize(r, 4096)
h := new(DumpHead)
if err = h.Read(rb); err != nil {
return nil, err
}
var keyLen uint16
var valueLen uint32
var keyBuf bytes.Buffer
var valueBuf bytes.Buffer
deKeyBuf := make([]byte, 4096)
deValueBuf := make([]byte, 4096)
var key, value []byte
wb := l.ldb.NewWriteBatch()
defer wb.Close()
n := 0
for {
if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF {
return nil, err
} else if err == io.EOF {
break
}
if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil {
return nil, err
}
if key, err = snappy.Decode(deKeyBuf, keyBuf.Bytes()); err != nil {
return nil, err
}
if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil {
return nil, err
}
if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil {
return nil, err
}
if value, err = snappy.Decode(deValueBuf, valueBuf.Bytes()); err != nil {
return nil, err
}
wb.Put(key, value)
n++
if n%1024 == 0 {
if err = wb.Commit(); err != nil {
return nil, err
}
}
// if err = l.ldb.Put(key, value); err != nil {
// return nil, err
// }
keyBuf.Reset()
valueBuf.Reset()
}
if err = wb.Commit(); err != nil {
return nil, err
}
deKeyBuf = nil
deValueBuf = nil
if l.r != nil {
if err := l.r.UpdateCommitID(h.CommitID); err != nil {
return nil, err
}
}
return h, nil
}

136
vendor/github.com/siddontang/ledisdb/ledis/event.go generated vendored Normal file
View File

@@ -0,0 +1,136 @@
package ledis
import (
"errors"
"fmt"
"strconv"
"github.com/siddontang/go/hack"
)
var errInvalidEvent = errors.New("invalid event")
func formatEventKey(buf []byte, k []byte) ([]byte, error) {
if len(k) < 2 {
return nil, errInvalidEvent
}
buf = append(buf, fmt.Sprintf("DB:%2d ", k[0])...)
buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...)
db := new(DB)
index, _, err := decodeDBIndex(k)
if err != nil {
return nil, err
}
db.setIndex(index)
//to do format at respective place
switch k[1] {
case KVType:
key, err := db.decodeKVKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case HashType:
key, field, err := db.hDecodeHashKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(field))
case HSizeType:
key, err := db.hDecodeSizeKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case ListType:
key, seq, err := db.lDecodeListKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendInt(buf, int64(seq), 10)
case LMetaType:
key, err := db.lDecodeMetaKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case ZSetType:
key, m, err := db.zDecodeSetKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(m))
case ZSizeType:
key, err := db.zDecodeSizeKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case ZScoreType:
key, m, score, err := db.zDecodeScoreKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(m))
buf = append(buf, ' ')
buf = strconv.AppendInt(buf, score, 10)
case SetType:
key, member, err := db.sDecodeSetKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(member))
case SSizeType:
key, err := db.sDecodeSizeKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case ExpTimeType:
tp, key, t, err := db.expDecodeTimeKey(k)
if err != nil {
return nil, err
}
buf = append(buf, TypeName[tp]...)
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendInt(buf, t, 10)
case ExpMetaType:
tp, key, err := db.expDecodeMetaKey(k)
if err != nil {
return nil, err
}
buf = append(buf, TypeName[tp]...)
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(key))
default:
return nil, errInvalidEvent
}
return buf, nil
}

248
vendor/github.com/siddontang/ledisdb/ledis/ledis.go generated vendored Normal file
View File

@@ -0,0 +1,248 @@
package ledis
import (
"fmt"
"io"
"os"
"path"
"sync"
"time"
"github.com/siddontang/go/filelock"
"github.com/siddontang/go/log"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/rpl"
"github.com/siddontang/ledisdb/store"
)
// Ledis is the core structure to handle the database.
type Ledis struct {
cfg *config.Config
ldb *store.DB
dbLock sync.Mutex
dbs map[int]*DB
quit chan struct{}
wg sync.WaitGroup
//for replication
r *rpl.Replication
rc chan struct{}
rbatch *store.WriteBatch
rDoneCh chan struct{}
rhs []NewLogEventHandler
wLock sync.RWMutex //allow one write at same time
commitLock sync.Mutex //allow one write commit at same time
lock io.Closer
ttlCheckers []*ttlChecker
ttlCheckerCh chan *ttlChecker
}
// Open opens the Ledis with a config.
func Open(cfg *config.Config) (*Ledis, error) {
if len(cfg.DataDir) == 0 {
cfg.DataDir = config.DefaultDataDir
}
if cfg.Databases == 0 {
cfg.Databases = 16
} else if cfg.Databases > MaxDatabases {
cfg.Databases = MaxDatabases
}
os.MkdirAll(cfg.DataDir, 0755)
var err error
l := new(Ledis)
l.cfg = cfg
if l.lock, err = filelock.Lock(path.Join(cfg.DataDir, "LOCK")); err != nil {
return nil, err
}
l.quit = make(chan struct{})
if l.ldb, err = store.Open(cfg); err != nil {
return nil, err
}
if cfg.UseReplication {
if l.r, err = rpl.NewReplication(cfg); err != nil {
return nil, err
}
l.rc = make(chan struct{}, 1)
l.rbatch = l.ldb.NewWriteBatch()
l.rDoneCh = make(chan struct{}, 1)
l.wg.Add(1)
go l.onReplication()
//first we must try wait all replication ok
//maybe some logs are not committed
l.WaitReplication()
} else {
l.r = nil
}
l.dbs = make(map[int]*DB, 16)
l.checkTTL()
return l, nil
}
// Close closes the Ledis.
func (l *Ledis) Close() {
close(l.quit)
l.wg.Wait()
l.ldb.Close()
if l.r != nil {
l.r.Close()
//l.r = nil
}
if l.lock != nil {
l.lock.Close()
//l.lock = nil
}
}
// Select chooses a database.
func (l *Ledis) Select(index int) (*DB, error) {
if index < 0 || index >= l.cfg.Databases {
return nil, fmt.Errorf("invalid db index %d, must in [0, %d]", index, l.cfg.Databases-1)
}
l.dbLock.Lock()
defer l.dbLock.Unlock()
db, ok := l.dbs[index]
if ok {
return db, nil
}
db = l.newDB(index)
l.dbs[index] = db
go func(db *DB) {
l.ttlCheckerCh <- db.ttlChecker
}(db)
return db, nil
}
// FlushAll will clear all data and replication logs
func (l *Ledis) FlushAll() error {
l.wLock.Lock()
defer l.wLock.Unlock()
return l.flushAll()
}
func (l *Ledis) flushAll() error {
it := l.ldb.NewIterator()
defer it.Close()
it.SeekToFirst()
w := l.ldb.NewWriteBatch()
defer w.Rollback()
n := 0
for ; it.Valid(); it.Next() {
n++
if n == 10000 {
if err := w.Commit(); err != nil {
log.Fatalf("flush all commit error: %s", err.Error())
return err
}
n = 0
}
w.Delete(it.RawKey())
}
if err := w.Commit(); err != nil {
log.Fatalf("flush all commit error: %s", err.Error())
return err
}
if l.r != nil {
if err := l.r.Clear(); err != nil {
log.Fatalf("flush all replication clear error: %s", err.Error())
return err
}
}
return nil
}
// IsReadOnly returns whether Ledis is read only or not.
func (l *Ledis) IsReadOnly() bool {
if l.cfg.GetReadonly() {
return true
} else if l.r != nil {
if b, _ := l.r.CommitIDBehind(); b {
return true
}
}
return false
}
func (l *Ledis) checkTTL() {
l.ttlCheckers = make([]*ttlChecker, 0, 16)
l.ttlCheckerCh = make(chan *ttlChecker, 16)
if l.cfg.TTLCheckInterval == 0 {
l.cfg.TTLCheckInterval = 1
}
l.wg.Add(1)
go func() {
defer l.wg.Done()
tick := time.NewTicker(time.Duration(l.cfg.TTLCheckInterval) * time.Second)
defer tick.Stop()
for {
select {
case <-tick.C:
if l.IsReadOnly() {
break
}
for _, c := range l.ttlCheckers {
c.check()
}
case c := <-l.ttlCheckerCh:
l.ttlCheckers = append(l.ttlCheckers, c)
c.check()
case <-l.quit:
return
}
}
}()
}
// StoreStat returns the statistics.
func (l *Ledis) StoreStat() *store.Stat {
return l.ldb.Stat()
}
// CompactStore compacts the backend storage.
func (l *Ledis) CompactStore() error {
l.wLock.Lock()
defer l.wLock.Unlock()
return l.ldb.Compact()
}

208
vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go generated vendored Normal file
View File

@@ -0,0 +1,208 @@
package ledis
import (
"bytes"
"encoding/binary"
"fmt"
"sync"
"github.com/siddontang/ledisdb/store"
)
type ibucket interface {
Get(key []byte) ([]byte, error)
GetSlice(key []byte) (store.Slice, error)
Put(key []byte, value []byte) error
Delete(key []byte) error
NewIterator() *store.Iterator
NewWriteBatch() *store.WriteBatch
RangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator
RevRangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator
RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator
RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator
}
// DB is the database.
type DB struct {
l *Ledis
sdb *store.DB
bucket ibucket
index int
// buffer to store index varint
indexVarBuf []byte
kvBatch *batch
listBatch *batch
hashBatch *batch
zsetBatch *batch
// binBatch *batch
setBatch *batch
// status uint8
ttlChecker *ttlChecker
lbkeys *lBlockKeys
}
func (l *Ledis) newDB(index int) *DB {
d := new(DB)
d.l = l
d.sdb = l.ldb
d.bucket = d.sdb
// d.status = DBAutoCommit
d.setIndex(index)
d.kvBatch = d.newBatch()
d.listBatch = d.newBatch()
d.hashBatch = d.newBatch()
d.zsetBatch = d.newBatch()
// d.binBatch = d.newBatch()
d.setBatch = d.newBatch()
d.lbkeys = newLBlockKeys()
d.ttlChecker = d.newTTLChecker()
return d
}
func decodeDBIndex(buf []byte) (int, int, error) {
index, n := binary.Uvarint(buf)
if n == 0 {
return 0, 0, fmt.Errorf("buf is too small to save index")
} else if n < 0 {
return 0, 0, fmt.Errorf("value larger than 64 bits")
} else if index > uint64(MaxDatabases) {
return 0, 0, fmt.Errorf("value %d is larger than max databases %d", index, MaxDatabases)
}
return int(index), n, nil
}
func (db *DB) setIndex(index int) {
db.index = index
// the most size for varint is 10 bytes
buf := make([]byte, 10)
n := binary.PutUvarint(buf, uint64(index))
db.indexVarBuf = buf[0:n]
}
func (db *DB) checkKeyIndex(buf []byte) (int, error) {
if len(buf) < len(db.indexVarBuf) {
return 0, fmt.Errorf("key is too small")
} else if !bytes.Equal(db.indexVarBuf, buf[0:len(db.indexVarBuf)]) {
return 0, fmt.Errorf("invalid db index")
}
return len(db.indexVarBuf), nil
}
func (db *DB) newTTLChecker() *ttlChecker {
c := new(ttlChecker)
c.db = db
c.txs = make([]*batch, maxDataType)
c.cbs = make([]onExpired, maxDataType)
c.nc = 0
c.register(KVType, db.kvBatch, db.delete)
c.register(ListType, db.listBatch, db.lDelete)
c.register(HashType, db.hashBatch, db.hDelete)
c.register(ZSetType, db.zsetBatch, db.zDelete)
// c.register(BitType, db.binBatch, db.bDelete)
c.register(SetType, db.setBatch, db.sDelete)
return c
}
func (db *DB) newBatch() *batch {
return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock})
}
// Index gets the index of database.
func (db *DB) Index() int {
return int(db.index)
}
// func (db *DB) IsAutoCommit() bool {
// return db.status == DBAutoCommit
// }
// FlushAll flushes the data.
func (db *DB) FlushAll() (drop int64, err error) {
all := [...](func() (int64, error)){
db.flush,
db.lFlush,
db.hFlush,
db.zFlush,
db.sFlush}
for _, flush := range all {
n, e := flush()
if e != nil {
err = e
return
}
drop += n
}
return
}
func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) {
var deleteFunc func(t *batch, key []byte) int64
var metaDataType byte
switch dataType {
case KVType:
deleteFunc = db.delete
metaDataType = KVType
case ListType:
deleteFunc = db.lDelete
metaDataType = LMetaType
case HashType:
deleteFunc = db.hDelete
metaDataType = HSizeType
case ZSetType:
deleteFunc = db.zDelete
metaDataType = ZSizeType
// case BitType:
// deleteFunc = db.bDelete
// metaDataType = BitMetaType
case SetType:
deleteFunc = db.sDelete
metaDataType = SSizeType
default:
return 0, fmt.Errorf("invalid data type: %s", TypeName[dataType])
}
var keys [][]byte
keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false)
for len(keys) != 0 || err != nil {
for _, key := range keys {
deleteFunc(t, key)
db.rmExpire(t, dataType, key)
}
if err = t.Commit(); err != nil {
return
}
drop += int64(len(keys))
keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false)
}
return
}

Some files were not shown because too many files have changed in this diff Show More