1
0
Fork 0

Merge v1.2.1-master

Signed-off-by: Emile Vauge <emile@vauge.com>
This commit is contained in:
Emile Vauge 2017-04-11 17:10:46 +02:00
parent a590155b0b
commit aeb17182b4
No known key found for this signature in database
GPG key ID: D808B4C167352E59
396 changed files with 27271 additions and 9969 deletions

View file

@ -30,33 +30,33 @@ const (
Chmod
)
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
func (op Op) String() string {
// Use a buffer for efficient string concatenation
var buffer bytes.Buffer
if e.Op&Create == Create {
if op&Create == Create {
buffer.WriteString("|CREATE")
}
if e.Op&Remove == Remove {
if op&Remove == Remove {
buffer.WriteString("|REMOVE")
}
if e.Op&Write == Write {
if op&Write == Write {
buffer.WriteString("|WRITE")
}
if e.Op&Rename == Rename {
if op&Rename == Rename {
buffer.WriteString("|RENAME")
}
if e.Op&Chmod == Chmod {
if op&Chmod == Chmod {
buffer.WriteString("|CHMOD")
}
// If buffer remains empty, return no event names
if buffer.Len() == 0 {
return fmt.Sprintf("%q: ", e.Name)
return ""
}
// Return a list of event names, with leading pipe character stripped
return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:])
return buffer.String()[1:] // Strip leading pipe
}
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
}

View file

@ -36,7 +36,7 @@ type Watcher struct {
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
fd, errno := unix.InotifyInit()
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
if fd == -1 {
return nil, errno
}

61
vendor/gopkg.in/ini.v1/ini.go generated vendored
View file

@ -37,7 +37,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.23.0"
_VERSION = "1.27.0"
)
// Version returns current package version literal.
@ -173,9 +173,13 @@ type LoadOptions struct {
Insensitive bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
IgnoreInlineComment bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
// AllowShadows indicates whether to keep track of keys with same name under same section.
AllowShadows bool
// Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
// conform to key/value pairs. Specify the names of those blocks here.
UnparseableSections []string
@ -219,6 +223,12 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
}
// InsensitiveLoad has exactly same functionality as Load function
// except it allows have shadow keys.
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
}
// Empty returns an empty file object.
func Empty() *File {
// Ignore error here, we sure our data is good.
@ -311,6 +321,11 @@ func (f *File) Sections() []*Section {
return sections
}
// ChildSections returns a list of child sections of given section name.
func (f *File) ChildSections(name string) []*Section {
return f.Section(name).ChildSections()
}
// SectionStrings returns list of section names.
func (f *File) SectionStrings() []string {
list := make([]string, len(f.sectionList))
@ -441,6 +456,7 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
KEY_LIST:
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
@ -467,28 +483,33 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
if _, err = buf.WriteString(kname); err != nil {
return 0, err
}
if key.isBooleanType {
continue
}
for _, val := range key.ValueWithShadows() {
if _, err = buf.WriteString(kname); err != nil {
return 0, err
}
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
if key.isBooleanType {
if kname != sec.keyList[len(sec.keyList)-1] {
buf.WriteString(LineBreak)
}
continue KEY_LIST
}
val := key.value
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
return 0, err
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
return 0, err
}
}
}

148
vendor/gopkg.in/ini.v1/key.go generated vendored
View file

@ -15,6 +15,7 @@
package ini
import (
"errors"
"fmt"
"strconv"
"strings"
@ -29,9 +30,42 @@ type Key struct {
isAutoIncrement bool
isBooleanType bool
isShadow bool
shadows []*Key
Comment string
}
// newKey simply return a key object with given values.
func newKey(s *Section, name, val string) *Key {
return &Key{
s: s,
name: name,
value: val,
}
}
func (k *Key) addShadow(val string) error {
if k.isShadow {
return errors.New("cannot add shadow to another shadow key")
} else if k.isAutoIncrement || k.isBooleanType {
return errors.New("cannot add shadow to auto-increment or boolean key")
}
shadow := newKey(k.s, k.name, val)
shadow.isShadow = true
k.shadows = append(k.shadows, shadow)
return nil
}
// AddShadow adds a new shadow key to itself.
func (k *Key) AddShadow(val string) error {
if !k.s.f.options.AllowShadows {
return errors.New("shadow key is not allowed")
}
return k.addShadow(val)
}
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
type ValueMapper func(string) string
@ -45,16 +79,29 @@ func (k *Key) Value() string {
return k.value
}
// String returns string representation of value.
func (k *Key) String() string {
val := k.value
// ValueWithShadows returns raw values of key and its shadows if any.
func (k *Key) ValueWithShadows() []string {
if len(k.shadows) == 0 {
return []string{k.value}
}
vals := make([]string, len(k.shadows)+1)
vals[0] = k.value
for i := range k.shadows {
vals[i+1] = k.shadows[i].value
}
return vals
}
// transformValue takes a raw value and transforms to its final string.
func (k *Key) transformValue(val string) string {
if k.s.f.ValueMapper != nil {
val = k.s.f.ValueMapper(val)
}
if strings.Index(val, "%") == -1 {
// Fail-fast if no indicate char found for recursive value
if !strings.Contains(val, "%") {
return val
}
for i := 0; i < _DEPTH_VALUES; i++ {
vr := varPattern.FindString(val)
if len(vr) == 0 {
@ -78,6 +125,11 @@ func (k *Key) String() string {
return val
}
// String returns string representation of value.
func (k *Key) String() string {
return k.transformValue(k.value)
}
// Validate accepts a validate function which can
// return modifed result as key value.
func (k *Key) Validate(fn func(string) string) string {
@ -394,45 +446,65 @@ func (k *Key) Strings(delim string) []string {
vals := strings.Split(str, delim)
for i := range vals {
// vals[i] = k.transformValue(strings.TrimSpace(vals[i]))
vals[i] = strings.TrimSpace(vals[i])
}
return vals
}
// StringsWithShadows returns list of string divided by given delimiter.
// Shadows will also be appended if any.
func (k *Key) StringsWithShadows(delim string) []string {
vals := k.ValueWithShadows()
results := make([]string, 0, len(vals)*2)
for i := range vals {
if len(vals) == 0 {
continue
}
results = append(results, strings.Split(vals[i], delim)...)
}
for i := range results {
results[i] = k.transformValue(strings.TrimSpace(results[i]))
}
return results
}
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Float64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, true, false)
vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
return vals
}
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Ints(delim string) []int {
vals, _ := k.getInts(delim, true, false)
vals, _ := k.parseInts(k.Strings(delim), true, false)
return vals
}
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Int64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, true, false)
vals, _ := k.parseInt64s(k.Strings(delim), true, false)
return vals
}
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uints(delim string) []uint {
vals, _ := k.getUints(delim, true, false)
vals, _ := k.parseUints(k.Strings(delim), true, false)
return vals
}
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, true, false)
vals, _ := k.parseUint64s(k.Strings(delim), true, false)
return vals
}
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) TimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, true, false)
vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
return vals
}
@ -445,41 +517,41 @@ func (k *Key) Times(delim string) []time.Time {
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
// it will not be included to result list.
func (k *Key) ValidFloat64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, false, false)
vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
return vals
}
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
// not be included to result list.
func (k *Key) ValidInts(delim string) []int {
vals, _ := k.getInts(delim, false, false)
vals, _ := k.parseInts(k.Strings(delim), false, false)
return vals
}
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
// then it will not be included to result list.
func (k *Key) ValidInt64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, false, false)
vals, _ := k.parseInt64s(k.Strings(delim), false, false)
return vals
}
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
// then it will not be included to result list.
func (k *Key) ValidUints(delim string) []uint {
vals, _ := k.getUints(delim, false, false)
vals, _ := k.parseUints(k.Strings(delim), false, false)
return vals
}
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidUint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, false, false)
vals, _ := k.parseUint64s(k.Strings(delim), false, false)
return vals
}
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, false, false)
vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
return vals
}
@ -490,33 +562,33 @@ func (k *Key) ValidTimes(delim string) []time.Time {
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
return k.getFloat64s(delim, false, true)
return k.parseFloat64s(k.Strings(delim), false, true)
}
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
func (k *Key) StrictInts(delim string) ([]int, error) {
return k.getInts(delim, false, true)
return k.parseInts(k.Strings(delim), false, true)
}
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
return k.getInt64s(delim, false, true)
return k.parseInt64s(k.Strings(delim), false, true)
}
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
func (k *Key) StrictUints(delim string) ([]uint, error) {
return k.getUints(delim, false, true)
return k.parseUints(k.Strings(delim), false, true)
}
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
return k.getUint64s(delim, false, true)
return k.parseUint64s(k.Strings(delim), false, true)
}
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
return k.getTimesFormat(format, delim, false, true)
return k.parseTimesFormat(format, k.Strings(delim), false, true)
}
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
@ -525,9 +597,8 @@ func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
return k.StrictTimesFormat(time.RFC3339, delim)
}
// getFloat64s returns list of float64 divided by given delimiter.
func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
strs := k.Strings(delim)
// parseFloat64s transforms strings to float64s.
func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
vals := make([]float64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseFloat(str, 64)
@ -541,9 +612,8 @@ func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]flo
return vals, nil
}
// getInts returns list of int divided by given delimiter.
func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
strs := k.Strings(delim)
// parseInts transforms strings to ints.
func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
vals := make([]int, 0, len(strs))
for _, str := range strs {
val, err := strconv.Atoi(str)
@ -557,9 +627,8 @@ func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, er
return vals, nil
}
// getInt64s returns list of int64 divided by given delimiter.
func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
strs := k.Strings(delim)
// parseInt64s transforms strings to int64s.
func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
vals := make([]int64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseInt(str, 10, 64)
@ -573,9 +642,8 @@ func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64
return vals, nil
}
// getUints returns list of uint divided by given delimiter.
func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
strs := k.Strings(delim)
// parseUints transforms strings to uints.
func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
vals := make([]uint, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 0)
@ -589,9 +657,8 @@ func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint,
return vals, nil
}
// getUint64s returns list of uint64 divided by given delimiter.
func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
strs := k.Strings(delim)
// parseUint64s transforms strings to uint64s.
func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
vals := make([]uint64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 64)
@ -605,9 +672,8 @@ func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint
return vals, nil
}
// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
strs := k.Strings(delim)
// parseTimesFormat transforms strings to times in given format.
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
vals := make([]time.Time, 0, len(strs))
for _, str := range strs {
val, err := time.Parse(format, str)

37
vendor/gopkg.in/ini.v1/parser.go generated vendored
View file

@ -193,7 +193,7 @@ func hasSurroundedQuote(in string, quote byte) bool {
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
return "", nil
@ -217,18 +217,21 @@ func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
return line[startIdx : pos+startIdx], nil
}
// Won't be able to reach here if value only contains whitespace.
// Won't be able to reach here if value only contains whitespace
line = strings.TrimSpace(line)
// Check continuation lines when desired.
// Check continuation lines when desired
if !ignoreContinuation && line[len(line)-1] == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
i := strings.IndexAny(line, "#;")
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
// Check if ignore inline comment
if !ignoreInlineComment {
i := strings.IndexAny(line, "#;")
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
}
}
// Trim single quotes
@ -318,11 +321,14 @@ func (f *File) parse(reader io.Reader) (err error) {
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
key, err := section.NewKey(string(line), "true")
kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
if err != nil {
return err
}
key, err := section.NewBooleanKey(kname)
if err != nil {
return err
}
key.isBooleanType = true
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
continue
@ -338,17 +344,16 @@ func (f *File) parse(reader io.Reader) (err error) {
p.count++
}
key, err := section.NewKey(kname, "")
value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
if err != nil {
return err
}
key, err := section.NewKey(kname, value)
if err != nil {
return err
}
key.isAutoIncrement = isAutoIncr
value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
if err != nil {
return err
}
key.SetValue(value)
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
}

39
vendor/gopkg.in/ini.v1/section.go generated vendored
View file

@ -68,20 +68,33 @@ func (s *Section) NewKey(name, val string) (*Key, error) {
}
if inSlice(name, s.keyList) {
s.keys[name].value = val
if s.f.options.AllowShadows {
if err := s.keys[name].addShadow(val); err != nil {
return nil, err
}
} else {
s.keys[name].value = val
}
return s.keys[name], nil
}
s.keyList = append(s.keyList, name)
s.keys[name] = &Key{
s: s,
name: name,
value: val,
}
s.keys[name] = newKey(s, name, val)
s.keysHash[name] = val
return s.keys[name], nil
}
// NewBooleanKey creates a new boolean type key to given section.
func (s *Section) NewBooleanKey(name string) (*Key, error) {
key, err := s.NewKey(name, "true")
if err != nil {
return nil, err
}
key.isBooleanType = true
return key, nil
}
// GetKey returns key in section by given name.
func (s *Section) GetKey(name string) (*Key, error) {
// FIXME: change to section level lock?
@ -219,3 +232,17 @@ func (s *Section) DeleteKey(name string) {
}
}
}
// ChildSections returns a list of child sections of current section.
// For example, "[parent.child1]" and "[parent.child12]" are child sections
// of section "[parent]".
func (s *Section) ChildSections() []*Section {
prefix := s.name + "."
children := make([]*Section, 0, 3)
for _, name := range s.f.sectionList {
if strings.HasPrefix(name, prefix) {
children = append(children, s.f.sections[name])
}
}
return children
}

47
vendor/gopkg.in/ini.v1/struct.go generated vendored
View file

@ -78,8 +78,14 @@ func parseDelim(actual string) string {
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setSliceWithProperType sets proper values to slice based on its type.
func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
strs := key.Strings(delim)
func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
var strs []string
if allowShadow {
strs = key.StringsWithShadows(delim)
} else {
strs = key.Strings(delim)
}
numVals := len(strs)
if numVals == 0 {
return nil
@ -92,17 +98,17 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
case reflect.String:
vals = strs
case reflect.Int:
vals = key.Ints(delim)
vals, _ = key.parseInts(strs, true, false)
case reflect.Int64:
vals = key.Int64s(delim)
vals, _ = key.parseInt64s(strs, true, false)
case reflect.Uint:
vals = key.Uints(delim)
vals, _ = key.parseUints(strs, true, false)
case reflect.Uint64:
vals = key.Uint64s(delim)
vals, _ = key.parseUint64s(strs, true, false)
case reflect.Float64:
vals = key.Float64s(delim)
vals, _ = key.parseFloat64s(strs, true, false)
case reflectTime:
vals = key.Times(delim)
vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
@ -133,7 +139,7 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to strcut.
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
switch t.Kind() {
case reflect.String:
if len(key.String()) == 0 {
@ -174,7 +180,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
}
field.SetUint(uintVal)
case reflect.Float64:
case reflect.Float32, reflect.Float64:
floatVal, err := key.Float64()
if err != nil {
return nil
@ -187,13 +193,25 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
}
field.Set(reflect.ValueOf(timeVal))
case reflect.Slice:
return setSliceWithProperType(key, field, delim)
return setSliceWithProperType(key, field, delim, allowShadow)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
opts := strings.SplitN(tag, ",", 3)
rawName = opts[0]
if len(opts) > 1 {
omitEmpty = opts[1] == "omitempty"
}
if len(opts) > 2 {
allowShadow = opts[2] == "allowshadow"
}
return rawName, omitEmpty, allowShadow
}
func (s *Section) mapTo(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
@ -209,8 +227,8 @@ func (s *Section) mapTo(val reflect.Value) error {
continue
}
opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty
fieldName := s.parseFieldName(tpField.Name, opts[0])
rawName, _, allowShadow := parseTagOptions(tag)
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
@ -231,7 +249,8 @@ func (s *Section) mapTo(val reflect.Value) error {
}
if key, err := s.GetKey(fieldName); err == nil {
if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
delim := parseDelim(tpField.Tag.Get("delim"))
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
}

23
vendor/gopkg.in/mgo.v2/bson/bson.go generated vendored
View file

@ -38,6 +38,7 @@ import (
"crypto/rand"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
@ -204,6 +205,7 @@ func readRandomUint32() uint32 {
// machineId stores machine id generated once and used in subsequent calls
// to NewObjectId function.
var machineId = readMachineId()
var processId = os.Getpid()
// readMachineId generates and returns a machine id.
// If this function fails to get the hostname it will cause a runtime error.
@ -234,9 +236,8 @@ func NewObjectId() ObjectId {
b[5] = machineId[1]
b[6] = machineId[2]
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
pid := os.Getpid()
b[7] = byte(pid >> 8)
b[8] = byte(pid)
b[7] = byte(processId >> 8)
b[8] = byte(processId)
// Increment, 3 bytes, big endian
i := atomic.AddUint32(&objectIdCounter, 1)
b[9] = byte(i >> 16)
@ -276,6 +277,22 @@ var nullBytes = []byte("null")
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
func (id *ObjectId) UnmarshalJSON(data []byte) error {
if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
var v struct {
Id json.RawMessage `json:"$oid"`
Func struct {
Id json.RawMessage
} `json:"$oidFunc"`
}
err := jdec(data, &v)
if err == nil {
if len(v.Id) > 0 {
data = []byte(v.Id)
} else {
data = []byte(v.Func.Id)
}
}
}
if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
*id = ""
return nil

310
vendor/gopkg.in/mgo.v2/bson/decimal.go generated vendored Normal file
View file

@ -0,0 +1,310 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package bson
import (
"fmt"
"strconv"
"strings"
)
// Decimal128 holds decimal128 BSON values.
type Decimal128 struct {
h, l uint64
}
func (d Decimal128) String() string {
var pos int // positive sign
var e int // exponent
var h, l uint64 // significand high/low
if d.h>>63&1 == 0 {
pos = 1
}
switch d.h >> 58 & (1<<5 - 1) {
case 0x1F:
return "NaN"
case 0x1E:
return "-Inf"[pos:]
}
l = d.l
if d.h>>61&3 == 3 {
// Bits: 1*sign 2*ignored 14*exponent 111*significand.
// Implicit 0b100 prefix in significand.
e = int(d.h>>47&(1<<14-1)) - 6176
//h = 4<<47 | d.h&(1<<47-1)
// Spec says all of these values are out of range.
h, l = 0, 0
} else {
// Bits: 1*sign 14*exponent 113*significand
e = int(d.h>>49&(1<<14-1)) - 6176
h = d.h & (1<<49 - 1)
}
// Would be handled by the logic below, but that's trivial and common.
if h == 0 && l == 0 && e == 0 {
return "-0"[pos:]
}
var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
var last = len(repr)
var i = len(repr)
var dot = len(repr) + e
var rem uint32
Loop:
for d9 := 0; d9 < 5; d9++ {
h, l, rem = divmod(h, l, 1e9)
for d1 := 0; d1 < 9; d1++ {
// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
e += len(repr) - i
i--
repr[i] = '.'
last = i - 1
dot = len(repr) // Unmark.
}
c := '0' + byte(rem%10)
rem /= 10
i--
repr[i] = c
// Handle "0E+3", "1E+3", etc.
if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
last = i
break Loop
}
if c != '0' {
last = i
}
// Break early. Works without it, but why.
if dot > i && l == 0 && h == 0 && rem == 0 {
break Loop
}
}
}
repr[last-1] = '-'
last--
if e > 0 {
return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
}
if e < 0 {
return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
}
return string(repr[last+pos:])
}
func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
div64 := uint64(div)
a := h >> 32
aq := a / div64
ar := a % div64
b := ar<<32 + h&(1<<32-1)
bq := b / div64
br := b % div64
c := br<<32 + l>>32
cq := c / div64
cr := c % div64
d := cr<<32 + l&(1<<32-1)
dq := d / div64
dr := d % div64
return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
}
var dNaN = Decimal128{0x1F << 58, 0}
var dPosInf = Decimal128{0x1E << 58, 0}
var dNegInf = Decimal128{0x3E << 58, 0}
func dErr(s string) (Decimal128, error) {
return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
}
func ParseDecimal128(s string) (Decimal128, error) {
orig := s
if s == "" {
return dErr(orig)
}
neg := s[0] == '-'
if neg || s[0] == '+' {
s = s[1:]
}
if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
return dNaN, nil
}
if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
if neg {
return dNegInf, nil
}
return dPosInf, nil
}
return dErr(orig)
}
var h, l uint64
var e int
var add, ovr uint32
var mul uint32 = 1
var dot = -1
var digits = 0
var i = 0
for i < len(s) {
c := s[i]
if mul == 1e9 {
h, l, ovr = muladd(h, l, mul, add)
mul, add = 1, 0
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
return dErr(orig)
}
}
if c >= '0' && c <= '9' {
i++
if c > '0' || digits > 0 {
digits++
}
if digits > 34 {
if c == '0' {
// Exact rounding.
e++
continue
}
return dErr(orig)
}
mul *= 10
add *= 10
add += uint32(c - '0')
continue
}
if c == '.' {
i++
if dot >= 0 || i == 1 && len(s) == 1 {
return dErr(orig)
}
if i == len(s) {
break
}
if s[i] < '0' || s[i] > '9' || e > 0 {
return dErr(orig)
}
dot = i
continue
}
break
}
if i == 0 {
return dErr(orig)
}
if mul > 1 {
h, l, ovr = muladd(h, l, mul, add)
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
return dErr(orig)
}
}
if dot >= 0 {
e += dot - i
}
if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
i++
eneg := s[i] == '-'
if eneg || s[i] == '+' {
i++
if i == len(s) {
return dErr(orig)
}
}
n := 0
for i < len(s) && n < 1e4 {
c := s[i]
i++
if c < '0' || c > '9' {
return dErr(orig)
}
n *= 10
n += int(c - '0')
}
if eneg {
n = -n
}
e += n
for e < -6176 {
// Subnormal.
var div uint32 = 1
for div < 1e9 && e < -6176 {
div *= 10
e++
}
var rem uint32
h, l, rem = divmod(h, l, div)
if rem > 0 {
return dErr(orig)
}
}
for e > 6111 {
// Clamped.
var mul uint32 = 1
for mul < 1e9 && e > 6111 {
mul *= 10
e--
}
h, l, ovr = muladd(h, l, mul, 0)
if ovr > 0 || h&((1<<15-1)<<49) > 0 {
return dErr(orig)
}
}
if e < -6176 || e > 6111 {
return dErr(orig)
}
}
if i < len(s) {
return dErr(orig)
}
h |= uint64(e+6176) & uint64(1<<14-1) << 49
if neg {
h |= 1 << 63
}
return Decimal128{h, l}, nil
}
func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
mul64 := uint64(mul)
a := mul64 * (l & (1<<32 - 1))
b := a>>32 + mul64*(l>>32)
c := b>>32 + mul64*(h&(1<<32-1))
d := c>>32 + mul64*(h>>32)
a = a&(1<<32-1) + uint64(add)
b = b&(1<<32-1) + a>>32
c = c&(1<<32-1) + b>>32
d = d&(1<<32-1) + c>>32
return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
}

View file

@ -539,6 +539,11 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
in = MongoTimestamp(d.readInt64())
case 0x12: // Int64
in = d.readInt64()
case 0x13: // Decimal128
in = Decimal128{
l: uint64(d.readInt64()),
h: uint64(d.readInt64()),
}
case 0x7F: // Max key
in = MaxKey
case 0xFF: // Min key

View file

@ -247,7 +247,7 @@ func (e *encoder) addElemName(kind byte, name string) {
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
if !v.IsValid() {
e.addElemName('\x0A', name)
e.addElemName(0x0A, name)
return
}
@ -276,29 +276,29 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s)) + ")")
}
e.addElemName('\x07', name)
e.addElemName(0x07, name)
e.addBytes([]byte(s)...)
case typeSymbol:
e.addElemName('\x0E', name)
e.addElemName(0x0E, name)
e.addStr(s)
case typeJSONNumber:
n := v.Interface().(json.Number)
if i, err := n.Int64(); err == nil {
e.addElemName('\x12', name)
e.addElemName(0x12, name)
e.addInt64(i)
} else if f, err := n.Float64(); err == nil {
e.addElemName('\x01', name)
e.addElemName(0x01, name)
e.addFloat64(f)
} else {
panic("failed to convert json.Number to a number: " + s)
}
default:
e.addElemName('\x02', name)
e.addElemName(0x02, name)
e.addStr(s)
}
case reflect.Float32, reflect.Float64:
e.addElemName('\x01', name)
e.addElemName(0x01, name)
e.addFloat64(v.Float())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
@ -306,40 +306,40 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
if int64(u) < 0 {
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
e.addElemName('\x10', name)
e.addElemName(0x10, name)
e.addInt32(int32(u))
} else {
e.addElemName('\x12', name)
e.addElemName(0x12, name)
e.addInt64(int64(u))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type() {
case typeMongoTimestamp:
e.addElemName('\x11', name)
e.addElemName(0x11, name)
e.addInt64(v.Int())
case typeOrderKey:
if v.Int() == int64(MaxKey) {
e.addElemName('\x7F', name)
e.addElemName(0x7F, name)
} else {
e.addElemName('\xFF', name)
e.addElemName(0xFF, name)
}
default:
i := v.Int()
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
// It fits into an int32, encode as such.
e.addElemName('\x10', name)
e.addElemName(0x10, name)
e.addInt32(int32(i))
} else {
e.addElemName('\x12', name)
e.addElemName(0x12, name)
e.addInt64(i)
}
}
case reflect.Bool:
e.addElemName('\x08', name)
e.addElemName(0x08, name)
if v.Bool() {
e.addBytes(1)
} else {
@ -347,40 +347,40 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
}
case reflect.Map:
e.addElemName('\x03', name)
e.addElemName(0x03, name)
e.addDoc(v)
case reflect.Slice:
vt := v.Type()
et := vt.Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
e.addBinary('\x00', v.Bytes())
e.addElemName(0x05, name)
e.addBinary(0x00, v.Bytes())
} else if et == typeDocElem || et == typeRawDocElem {
e.addElemName('\x03', name)
e.addElemName(0x03, name)
e.addDoc(v)
} else {
e.addElemName('\x04', name)
e.addElemName(0x04, name)
e.addDoc(v)
}
case reflect.Array:
et := v.Type().Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
e.addElemName(0x05, name)
if v.CanAddr() {
e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte))
e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
} else {
n := v.Len()
e.addInt32(int32(n))
e.addBytes('\x00')
e.addBytes(0x00)
for i := 0; i < n; i++ {
el := v.Index(i)
e.addBytes(byte(el.Uint()))
}
}
} else {
e.addElemName('\x04', name)
e.addElemName(0x04, name)
e.addDoc(v)
}
@ -399,11 +399,16 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
e.addBytes(s.Data...)
case Binary:
e.addElemName('\x05', name)
e.addElemName(0x05, name)
e.addBinary(s.Kind, s.Data)
case Decimal128:
e.addElemName(0x13, name)
e.addInt64(int64(s.l))
e.addInt64(int64(s.h))
case DBPointer:
e.addElemName('\x0C', name)
e.addElemName(0x0C, name)
e.addStr(s.Namespace)
if len(s.Id) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
@ -412,16 +417,16 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
e.addBytes([]byte(s.Id)...)
case RegEx:
e.addElemName('\x0B', name)
e.addElemName(0x0B, name)
e.addCStr(s.Pattern)
e.addCStr(s.Options)
case JavaScript:
if s.Scope == nil {
e.addElemName('\x0D', name)
e.addElemName(0x0D, name)
e.addStr(s.Code)
} else {
e.addElemName('\x0F', name)
e.addElemName(0x0F, name)
start := e.reserveInt32()
e.addStr(s.Code)
e.addDoc(reflect.ValueOf(s.Scope))
@ -430,18 +435,18 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
case time.Time:
// MongoDB handles timestamps as milliseconds.
e.addElemName('\x09', name)
e.addElemName(0x09, name)
e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
case url.URL:
e.addElemName('\x02', name)
e.addElemName(0x02, name)
e.addStr(s.String())
case undefined:
e.addElemName('\x06', name)
e.addElemName(0x06, name)
default:
e.addElemName('\x03', name)
e.addElemName(0x03, name)
e.addDoc(v)
}

380
vendor/gopkg.in/mgo.v2/bson/json.go generated vendored Normal file
View file

@ -0,0 +1,380 @@
package bson
import (
"bytes"
"encoding/base64"
"fmt"
"gopkg.in/mgo.v2/internal/json"
"strconv"
"time"
)
// UnmarshalJSON unmarshals a JSON value that may hold non-standard
// syntax as defined in BSON's extended JSON specification.
func UnmarshalJSON(data []byte, value interface{}) error {
d := json.NewDecoder(bytes.NewBuffer(data))
d.Extend(&jsonExt)
return d.Decode(value)
}
// MarshalJSON marshals a JSON value that may hold non-standard
// syntax as defined in BSON's extended JSON specification.
func MarshalJSON(value interface{}) ([]byte, error) {
var buf bytes.Buffer
e := json.NewEncoder(&buf)
e.Extend(&jsonExt)
err := e.Encode(value)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// jdec is used internally by the JSON decoding functions
// so they may unmarshal functions without getting into endless
// recursion due to keyed objects.
func jdec(data []byte, value interface{}) error {
d := json.NewDecoder(bytes.NewBuffer(data))
d.Extend(&funcExt)
return d.Decode(value)
}
var jsonExt json.Extension
var funcExt json.Extension
// TODO
// - Shell regular expressions ("/regexp/opts")
func init() {
jsonExt.DecodeUnquotedKeys(true)
jsonExt.DecodeTrailingCommas(true)
funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
jsonExt.DecodeKeyed("$binary", jdecBinary)
jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
jsonExt.EncodeType([]byte(nil), jencBinarySlice)
jsonExt.EncodeType(Binary{}, jencBinaryType)
funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
funcExt.DecodeFunc("new Date", "$dateFunc", "S")
jsonExt.DecodeKeyed("$date", jdecDate)
jsonExt.DecodeKeyed("$dateFunc", jdecDate)
jsonExt.EncodeType(time.Time{}, jencDate)
funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
funcExt.DecodeConst("undefined", Undefined)
jsonExt.DecodeKeyed("$regex", jdecRegEx)
jsonExt.EncodeType(RegEx{}, jencRegEx)
funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
jsonExt.DecodeKeyed("$oid", jdecObjectId)
jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
jsonExt.EncodeType(ObjectId(""), jencObjectId)
funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
jsonExt.EncodeType(int64(0), jencNumberLong)
jsonExt.EncodeType(int(0), jencInt)
funcExt.DecodeConst("MinKey", MinKey)
funcExt.DecodeConst("MaxKey", MaxKey)
jsonExt.DecodeKeyed("$minKey", jdecMinKey)
jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
jsonExt.DecodeKeyed("$undefined", jdecUndefined)
jsonExt.EncodeType(Undefined, jencUndefined)
jsonExt.Extend(&funcExt)
}
func fbytes(format string, args ...interface{}) []byte {
var buf bytes.Buffer
fmt.Fprintf(&buf, format, args...)
return buf.Bytes()
}
func jdecBinary(data []byte) (interface{}, error) {
var v struct {
Binary []byte `json:"$binary"`
Type string `json:"$type"`
Func struct {
Binary []byte `json:"$binary"`
Type int64 `json:"$type"`
} `json:"$binaryFunc"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
var binData []byte
var binKind int64
if v.Type == "" && v.Binary == nil {
binData = v.Func.Binary
binKind = v.Func.Type
} else if v.Type == "" {
return v.Binary, nil
} else {
binData = v.Binary
binKind, err = strconv.ParseInt(v.Type, 0, 64)
if err != nil {
binKind = -1
}
}
if binKind == 0 {
return binData, nil
}
if binKind < 0 || binKind > 255 {
return nil, fmt.Errorf("invalid type in binary object: %s", data)
}
return Binary{Kind: byte(binKind), Data: binData}, nil
}
func jencBinarySlice(v interface{}) ([]byte, error) {
in := v.([]byte)
out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
base64.StdEncoding.Encode(out, in)
return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
}
func jencBinaryType(v interface{}) ([]byte, error) {
in := v.(Binary)
out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
base64.StdEncoding.Encode(out, in.Data)
return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
}
const jdateFormat = "2006-01-02T15:04:05.999Z"
func jdecDate(data []byte) (interface{}, error) {
var v struct {
S string `json:"$date"`
Func struct {
S string
} `json:"$dateFunc"`
}
_ = jdec(data, &v)
if v.S == "" {
v.S = v.Func.S
}
if v.S != "" {
for _, format := range []string{jdateFormat, "2006-01-02"} {
t, err := time.Parse(format, v.S)
if err == nil {
return t, nil
}
}
return nil, fmt.Errorf("cannot parse date: %q", v.S)
}
var vn struct {
Date struct {
N int64 `json:"$numberLong,string"`
} `json:"$date"`
Func struct {
S int64
} `json:"$dateFunc"`
}
err := jdec(data, &vn)
if err != nil {
return nil, fmt.Errorf("cannot parse date: %q", data)
}
n := vn.Date.N
if n == 0 {
n = vn.Func.S
}
return time.Unix(n/1000, n%1000*1e6).UTC(), nil
}
func jencDate(v interface{}) ([]byte, error) {
t := v.(time.Time)
return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
}
func jdecTimestamp(data []byte) (interface{}, error) {
var v struct {
Func struct {
T int32 `json:"t"`
I int32 `json:"i"`
} `json:"$timestamp"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
}
func jencTimestamp(v interface{}) ([]byte, error) {
ts := uint64(v.(MongoTimestamp))
return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
}
func jdecRegEx(data []byte) (interface{}, error) {
var v struct {
Regex string `json:"$regex"`
Options string `json:"$options"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
return RegEx{v.Regex, v.Options}, nil
}
func jencRegEx(v interface{}) ([]byte, error) {
re := v.(RegEx)
type regex struct {
Regex string `json:"$regex"`
Options string `json:"$options"`
}
return json.Marshal(regex{re.Pattern, re.Options})
}
func jdecObjectId(data []byte) (interface{}, error) {
var v struct {
Id string `json:"$oid"`
Func struct {
Id string
} `json:"$oidFunc"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if v.Id == "" {
v.Id = v.Func.Id
}
return ObjectIdHex(v.Id), nil
}
func jencObjectId(v interface{}) ([]byte, error) {
return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
}
func jdecDBRef(data []byte) (interface{}, error) {
// TODO Support unmarshaling $ref and $id into the input value.
var v struct {
Obj map[string]interface{} `json:"$dbrefFunc"`
}
// TODO Fix this. Must not be required.
v.Obj = make(map[string]interface{})
err := jdec(data, &v)
if err != nil {
return nil, err
}
return v.Obj, nil
}
func jdecNumberLong(data []byte) (interface{}, error) {
var v struct {
N int64 `json:"$numberLong,string"`
Func struct {
N int64 `json:",string"`
} `json:"$numberLongFunc"`
}
var vn struct {
N int64 `json:"$numberLong"`
Func struct {
N int64
} `json:"$numberLongFunc"`
}
err := jdec(data, &v)
if err != nil {
err = jdec(data, &vn)
v.N = vn.N
v.Func.N = vn.Func.N
}
if err != nil {
return nil, err
}
if v.N != 0 {
return v.N, nil
}
return v.Func.N, nil
}
func jencNumberLong(v interface{}) ([]byte, error) {
n := v.(int64)
f := `{"$numberLong":"%d"}`
if n <= 1<<53 {
f = `{"$numberLong":%d}`
}
return fbytes(f, n), nil
}
func jencInt(v interface{}) ([]byte, error) {
n := v.(int)
f := `{"$numberLong":"%d"}`
if int64(n) <= 1<<53 {
f = `%d`
}
return fbytes(f, n), nil
}
func jdecMinKey(data []byte) (interface{}, error) {
var v struct {
N int64 `json:"$minKey"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if v.N != 1 {
return nil, fmt.Errorf("invalid $minKey object: %s", data)
}
return MinKey, nil
}
func jdecMaxKey(data []byte) (interface{}, error) {
var v struct {
N int64 `json:"$maxKey"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if v.N != 1 {
return nil, fmt.Errorf("invalid $maxKey object: %s", data)
}
return MaxKey, nil
}
func jencMinMaxKey(v interface{}) ([]byte, error) {
switch v.(orderKey) {
case MinKey:
return []byte(`{"$minKey":1}`), nil
case MaxKey:
return []byte(`{"$maxKey":1}`), nil
}
panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
}
func jdecUndefined(data []byte) (interface{}, error) {
var v struct {
B bool `json:"$undefined"`
}
err := jdec(data, &v)
if err != nil {
return nil, err
}
if !v.B {
return nil, fmt.Errorf("invalid $undefined object: %s", data)
}
return Undefined, nil
}
func jencUndefined(v interface{}) ([]byte, error) {
return []byte(`{"$undefined":true}`), nil
}

5
vendor/gopkg.in/mgo.v2/cluster.go generated vendored
View file

@ -588,7 +588,10 @@ func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout
mastersLen := cluster.masters.Len()
slavesLen := cluster.servers.Len() - mastersLen
debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen)
if !(slaveOk && mode == Secondary) && mastersLen > 0 || slaveOk && slavesLen > 0 {
if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk {
break
}
if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() {
break
}
if started.IsZero() {

2
vendor/gopkg.in/mgo.v2/gridfs.go generated vendored
View file

@ -359,7 +359,7 @@ func (file *GridFile) assertMode(mode gfsFileMode) {
// SetChunkSize sets size of saved chunks. Once the file is written to, it
// will be split in blocks of that size and each block saved into an
// independent chunk document. The default chunk size is 256kb.
// independent chunk document. The default chunk size is 255kb.
//
// It is a runtime error to call this function once the file has started
// being written to.

27
vendor/gopkg.in/mgo.v2/internal/json/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

1685
vendor/gopkg.in/mgo.v2/internal/json/decode.go generated vendored Normal file

File diff suppressed because it is too large Load diff

1256
vendor/gopkg.in/mgo.v2/internal/json/encode.go generated vendored Normal file

File diff suppressed because it is too large Load diff

95
vendor/gopkg.in/mgo.v2/internal/json/extension.go generated vendored Normal file
View file

@ -0,0 +1,95 @@
package json
import (
"reflect"
)
// Extension holds a set of additional rules to be used when unmarshaling
// strict JSON or JSON-like content.
type Extension struct {
funcs map[string]funcExt
consts map[string]interface{}
keyed map[string]func([]byte) (interface{}, error)
encode map[reflect.Type]func(v interface{}) ([]byte, error)
unquotedKeys bool
trailingCommas bool
}
type funcExt struct {
key string
args []string
}
// Extend changes the decoder behavior to consider the provided extension.
func (dec *Decoder) Extend(ext *Extension) { dec.d.ext = *ext }
// Extend changes the encoder behavior to consider the provided extension.
func (enc *Encoder) Extend(ext *Extension) { enc.ext = *ext }
// Extend includes in e the extensions defined in ext.
func (e *Extension) Extend(ext *Extension) {
for name, fext := range ext.funcs {
e.DecodeFunc(name, fext.key, fext.args...)
}
for name, value := range ext.consts {
e.DecodeConst(name, value)
}
for key, decode := range ext.keyed {
e.DecodeKeyed(key, decode)
}
for typ, encode := range ext.encode {
if e.encode == nil {
e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
}
e.encode[typ] = encode
}
}
// DecodeFunc defines a function call that may be observed inside JSON content.
// A function with the provided name will be unmarshaled as the document
// {key: {args[0]: ..., args[N]: ...}}.
func (e *Extension) DecodeFunc(name string, key string, args ...string) {
if e.funcs == nil {
e.funcs = make(map[string]funcExt)
}
e.funcs[name] = funcExt{key, args}
}
// DecodeConst defines a constant name that may be observed inside JSON content
// and will be decoded with the provided value.
func (e *Extension) DecodeConst(name string, value interface{}) {
if e.consts == nil {
e.consts = make(map[string]interface{})
}
e.consts[name] = value
}
// DecodeKeyed defines a key that when observed as the first element inside a
// JSON document triggers the decoding of that document via the provided
// decode function.
func (e *Extension) DecodeKeyed(key string, decode func(data []byte) (interface{}, error)) {
if e.keyed == nil {
e.keyed = make(map[string]func([]byte) (interface{}, error))
}
e.keyed[key] = decode
}
// DecodeUnquotedKeys defines whether to accept map keys that are unquoted strings.
func (e *Extension) DecodeUnquotedKeys(accept bool) {
e.unquotedKeys = accept
}
// DecodeTrailingCommas defines whether to accept trailing commas in maps and arrays.
func (e *Extension) DecodeTrailingCommas(accept bool) {
e.trailingCommas = accept
}
// EncodeType registers a function to encode values with the same type of the
// provided sample.
func (e *Extension) EncodeType(sample interface{}, encode func(v interface{}) ([]byte, error)) {
if e.encode == nil {
e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
}
e.encode[reflect.TypeOf(sample)] = encode
}

143
vendor/gopkg.in/mgo.v2/internal/json/fold.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}

141
vendor/gopkg.in/mgo.v2/internal/json/indent.go generated vendored Normal file
View file

@ -0,0 +1,141 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import "bytes"
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
var scan scanner
scan.reset()
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(&scan, c)
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, to make it easier to embed inside other formatted JSON data.
// Although leading space characters (space, tab, carriage return, newline)
// at the beginning of src are dropped, trailing space characters
// at the end of src are preserved and copied to dst.
// For example, if src has no trailing spaces, neither will dst;
// if src ends in a trailing newline, so will dst.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
var scan scanner
scan.reset()
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(&scan, c)
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}

697
vendor/gopkg.in/mgo.v2/internal/json/scanner.go generated vendored Normal file
View file

@ -0,0 +1,697 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, nextValue, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import "strconv"
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, c) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// nextValue splits data after the next whole JSON value,
// returning that value and the bytes that follow it as separate slices.
// scan is passed in for use by nextValue to avoid an allocation.
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
scan.reset()
for i, c := range data {
v := scan.step(scan, c)
if v >= scanEndObject {
switch v {
// probe the scanner with a space to determine whether we will
// get scanEnd on the next character. Otherwise, if the next character
// is not a space, scanEndTop allocates a needless error.
case scanEndObject, scanEndArray, scanEndParams:
if scan.step(scan, ' ') == scanEnd {
return data[:i+1], data[i+1:], nil
}
case scanError:
return nil, nil, scan.err
case scanEnd:
return data[:i], data[i:], nil
}
}
}
if scan.eof() == scanError {
return nil, nil, scan.err
}
return data, nil, nil
}
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset() and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, byte) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// 1-byte redo (see undo method)
redo bool
redoCode int
redoState func(*scanner, byte) int
// total bytes consumed, updated by decoder.Decode
bytes int64
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanBeginName // begin function call
scanParam // begin function argument
scanEndParams // end function call
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
parseName // parsing unquoted name
parseParam // parsing function argument value
)
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.redo = false
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
func (s *scanner) pushParseState(p int) {
s.parseState = append(s.parseState, p)
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
s.redo = false
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c byte) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
s.pushParseState(parseObjectKey)
return scanBeginObject
case '[':
s.step = stateBeginValueOrEmpty
s.pushParseState(parseArrayValue)
return scanBeginArray
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 'n':
s.step = stateNew0
return scanBeginName
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
if isName(c) {
s.step = stateName
return scanBeginName
}
return s.error(c, "looking for beginning of value")
}
func isName(c byte) bool {
return c == '$' || c == '_' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9'
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
if isName(c) {
s.step = stateName
return scanBeginName
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c byte) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if c <= ' ' && isSpace(c) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginStringOrEmpty
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValueOrEmpty
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
case parseParam:
if c == ',' {
s.step = stateBeginValue
return scanParam
}
if c == ')' {
s.popParseState()
return scanEndParams
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c byte) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c byte) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
case 'u':
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c byte) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c byte) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c byte) int {
if c == '+' || c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
return stateEndValue(s, c)
}
// stateNew0 is the state after reading `n`.
func stateNew0(s *scanner, c byte) int {
if c == 'e' {
s.step = stateNew1
return scanContinue
}
s.step = stateName
return stateName(s, c)
}
// stateNew1 is the state after reading `ne`.
func stateNew1(s *scanner, c byte) int {
if c == 'w' {
s.step = stateNew2
return scanContinue
}
s.step = stateName
return stateName(s, c)
}
// stateNew2 is the state after reading `new`.
func stateNew2(s *scanner, c byte) int {
s.step = stateName
if c == ' ' {
return scanContinue
}
return stateName(s, c)
}
// stateName is the state while reading an unquoted function name.
func stateName(s *scanner, c byte) int {
if isName(c) {
return scanContinue
}
if c == '(' {
s.step = stateParamOrEmpty
s.pushParseState(parseParam)
return scanParam
}
return stateEndValue(s, c)
}
// stateParamOrEmpty is the state after reading `(`.
func stateParamOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == ')' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c byte) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c byte) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c byte) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c byte) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c byte) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c byte) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c byte) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c byte) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c byte) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c byte, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}
// undo causes the scanner to return scanCode from the next state transition.
// This gives callers a simple 1-byte undo mechanism.
func (s *scanner) undo(scanCode int) {
if s.redo {
panic("json: invalid use of scanner")
}
s.redoCode = scanCode
s.redoState = s.step
s.step = stateRedo
s.redo = true
}
// stateRedo helps implement the scanner's 1-byte undo.
func stateRedo(s *scanner, c byte) int {
s.redo = false
s.step = s.redoState
return s.redoCode
}

510
vendor/gopkg.in/mgo.v2/internal/json/stream.go generated vendored Normal file
View file

@ -0,0 +1,510 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON values from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v interface{}) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value"}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
for {
// Look in the buffer for a new value.
for i, c := range dec.buf[scanp:] {
dec.scan.bytes++
v := dec.scan.step(&dec.scan, c)
if v == scanEnd {
scanp += i
break Input
}
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
scanp += i + 1
break Input
}
if v == scanError {
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
scanp = len(dec.buf)
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(c) {
return true
}
}
return false
}
// An Encoder writes JSON values to an output stream.
type Encoder struct {
w io.Writer
err error
escapeHTML bool
indentBuf *bytes.Buffer
indentPrefix string
indentValue string
ext Extension
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w, escapeHTML: true}
}
// Encode writes the JSON encoding of v to the stream,
// followed by a newline character.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState()
e.ext = enc.ext
err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
if err != nil {
return err
}
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
b := e.Bytes()
if enc.indentBuf != nil {
enc.indentBuf.Reset()
err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
if err != nil {
return err
}
b = enc.indentBuf.Bytes()
}
if _, err = enc.w.Write(b); err != nil {
enc.err = err
}
encodeStatePool.Put(e)
return err
}
// Indent sets the encoder to format each encoded value with Indent.
func (enc *Encoder) Indent(prefix, indent string) {
enc.indentBuf = new(bytes.Buffer)
enc.indentPrefix = prefix
enc.indentValue = indent
}
// DisableHTMLEscaping causes the encoder not to escape angle brackets
// ("<" and ">") or ampersands ("&") in JSON strings.
func (enc *Encoder) DisableHTMLEscaping() {
enc.escapeHTML = false
}
// RawMessage is a raw encoded JSON value.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns *m as the JSON encoding of m.
func (m *RawMessage) MarshalJSON() ([]byte, error) {
return *m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types:
//
// Delim, for the four JSON delimiters [ ] { }
// bool, for JSON booleans
// float64, for JSON numbers
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
//
type Token interface{}
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", 0}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", 0}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type Delim
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
clearOffset(err)
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x interface{}
if err := dec.Decode(&x); err != nil {
clearOffset(err)
return nil, err
}
return x, nil
}
}
}
func clearOffset(err error) {
if s, ok := err.(*SyntaxError); ok {
s.Offset = 0
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(c) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
/*
TODO
// EncodeToken writes the given JSON token to the stream.
// It returns an error if the delimiters [ ] { } are not properly used.
//
// EncodeToken does not call Flush, because usually it is part of
// a larger operation such as Encode, and those will call Flush when finished.
// Callers that create an Encoder and then invoke EncodeToken directly,
// without using Encode, need to call Flush when finished to ensure that
// the JSON is written to the underlying writer.
func (e *Encoder) EncodeToken(t Token) error {
...
}
*/

44
vendor/gopkg.in/mgo.v2/internal/json/tags.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

11
vendor/gopkg.in/mgo.v2/server.go generated vendored
View file

@ -402,6 +402,15 @@ func (servers *mongoServers) Empty() bool {
return len(servers.slice) == 0
}
func (servers *mongoServers) HasMongos() bool {
for _, s := range servers.slice {
if s.Info().Mongos {
return true
}
}
return false
}
// BestFit returns the best guess of what would be the most interesting
// server to perform operations on at this point in time.
func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServer {
@ -421,6 +430,8 @@ func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServe
switch {
case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags):
// Must have requested tags.
case mode == Secondary && next.info.Master && !next.info.Mongos:
// Must be a secondary or mongos.
case next.info.Master != best.info.Master && mode != Nearest:
// Prefer slaves, unless the mode is PrimaryPreferred.
swap = (mode == PrimaryPreferred) != best.info.Master

137
vendor/gopkg.in/mgo.v2/session.go generated vendored
View file

@ -146,7 +146,10 @@ var (
ErrCursor = errors.New("invalid cursor")
)
const defaultPrefetch = 0.25
const (
defaultPrefetch = 0.25
maxUpsertRetries = 5
)
// Dial establishes a new session to the cluster identified by the given seed
// server(s). The session will enable communication with all of the servers in
@ -1008,6 +1011,8 @@ type indexSpec struct {
DefaultLanguage string "default_language,omitempty"
LanguageOverride string "language_override,omitempty"
TextIndexVersion int "textIndexVersion,omitempty"
Collation *Collation "collation,omitempty"
}
type Index struct {
@ -1046,6 +1051,54 @@ type Index struct {
// from the weighted sum of the frequency for each of the indexed fields in
// that document. The default field weight is 1.
Weights map[string]int
// Collation defines the collation to use for the index.
Collation *Collation
}
type Collation struct {
// Locale defines the collation locale.
Locale string `bson:"locale"`
// CaseLevel defines whether to turn case sensitivity on at strength 1 or 2.
CaseLevel bool `bson:"caseLevel,omitempty"`
// CaseFirst may be set to "upper" or "lower" to define whether
// to have uppercase or lowercase items first. Default is "off".
CaseFirst string `bson:"caseFirst,omitempty"`
// Strength defines the priority of comparison properties, as follows:
//
// 1 (primary) - Strongest level, denote difference between base characters
// 2 (secondary) - Accents in characters are considered secondary differences
// 3 (tertiary) - Upper and lower case differences in characters are
// distinguished at the tertiary level
// 4 (quaternary) - When punctuation is ignored at level 1-3, an additional
// level can be used to distinguish words with and without
// punctuation. Should only be used if ignoring punctuation
// is required or when processing Japanese text.
// 5 (identical) - When all other levels are equal, the identical level is
// used as a tiebreaker. The Unicode code point values of
// the NFD form of each string are compared at this level,
// just in case there is no difference at levels 1-4
//
// Strength defaults to 3.
Strength int `bson:"strength,omitempty"`
// NumericOrdering defines whether to order numbers based on numerical
// order and not collation order.
NumericOrdering bool `bson:"numericOrdering,omitempty"`
// Alternate controls whether spaces and punctuation are considered base characters.
// May be set to "non-ignorable" (spaces and punctuation considered base characters)
// or "shifted" (spaces and punctuation not considered base characters, and only
// distinguished at strength > 3). Defaults to "non-ignorable".
Alternate string `bson:"alternate,omitempty"`
// Backwards defines whether to have secondary differences considered in reverse order,
// as done in the French language.
Backwards bool `bson:"backwards,omitempty"`
}
// mgo.v3: Drop Minf and Maxf and transform Min and Max to floats.
@ -1239,6 +1292,7 @@ func (c *Collection) EnsureIndex(index Index) error {
Weights: keyInfo.weights,
DefaultLanguage: index.DefaultLanguage,
LanguageOverride: index.LanguageOverride,
Collation: index.Collation,
}
if spec.Min == 0 && spec.Max == 0 {
@ -1453,6 +1507,7 @@ func indexFromSpec(spec indexSpec) Index {
DefaultLanguage: spec.DefaultLanguage,
LanguageOverride: spec.LanguageOverride,
ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second,
Collation: spec.Collation,
}
if float64(int(spec.Min)) == spec.Min && float64(int(spec.Max)) == spec.Max {
index.Min = int(spec.Min)
@ -1582,6 +1637,8 @@ func (s *Session) Refresh() {
// SetMode changes the consistency mode for the session.
//
// The default mode is Strong.
//
// In the Strong consistency mode reads and writes will always be made to
// the primary server using a unique connection so that reads and writes are
// fully consistent, ordered, and observing the most up-to-date data.
@ -1655,6 +1712,8 @@ func (s *Session) SetSyncTimeout(d time.Duration) {
// SetSocketTimeout sets the amount of time to wait for a non-responding
// socket to the database before it is forcefully closed.
//
// The default timeout is 1 minute.
func (s *Session) SetSocketTimeout(d time.Duration) {
s.m.Lock()
s.sockTimeout = d
@ -1785,6 +1844,9 @@ func (s *Session) Safe() (safe *Safe) {
// will be followed by a getLastError command with the specified parameters,
// to ensure the request was correctly processed.
//
// The default is &Safe{}, meaning check for errors and use the default
// behavior for all fields.
//
// The safe.W parameter determines how many servers should confirm a write
// before the operation is considered successful. If set to 0 or 1, the
// command will return as soon as the primary is done with the request.
@ -2332,8 +2394,7 @@ type queryError struct {
ErrMsg string
Assertion string
Code int
AssertionCode int "assertionCode"
LastError *LastError "lastErrorObject"
AssertionCode int "assertionCode"
}
type QueryError struct {
@ -2478,7 +2539,15 @@ func (c *Collection) Upsert(selector interface{}, update interface{}) (info *Cha
Flags: 1,
Upsert: true,
}
lerr, err := c.writeOp(&op, true)
var lerr *LastError
for i := 0; i < maxUpsertRetries; i++ {
lerr, err = c.writeOp(&op, true)
// Retry duplicate key errors on upserts.
// https://docs.mongodb.com/v3.2/reference/method/db.collection.update/#use-unique-indexes
if !IsDup(err) {
break
}
}
if err == nil && lerr != nil {
info = &ChangeInfo{}
if lerr.UpdatedExisting {
@ -2983,9 +3052,6 @@ func checkQueryError(fullname string, d []byte) error {
Error:
result := &queryError{}
bson.Unmarshal(d, result)
if result.LastError != nil {
return result.LastError
}
if result.Err == "" && result.ErrMsg == "" {
return nil
}
@ -3201,13 +3267,14 @@ func (db *Database) run(socket *mongoSocket, cmd, result interface{}) (err error
}
if result != nil {
err = bson.Unmarshal(data, result)
if err == nil {
if err != nil {
debugf("Run command unmarshaling failed: %#v", op, err)
return err
}
if globalDebug && globalLogger != nil {
var res bson.M
bson.Unmarshal(data, &res)
debugf("Run command unmarshaled: %#v, result: %#v", op, res)
} else {
debugf("Run command unmarshaling failed: %#v", op, err)
return err
}
}
return checkQueryError(op.collection, data)
@ -3554,6 +3621,34 @@ func (iter *Iter) Close() error {
return err
}
// Done returns true only if a follow up Next call is guaranteed
// to return false.
//
// For an iterator created with Tail, Done may return false for
// an iterator that has no more data. Otherwise it's guaranteed
// to return false only if there is data or an error happened.
//
// Done may block waiting for a pending query to verify whether
// more data is actually available or not.
func (iter *Iter) Done() bool {
iter.m.Lock()
defer iter.m.Unlock()
for {
if iter.docData.Len() > 0 {
return false
}
if iter.docsToReceive > 1 {
return true
}
if iter.docsToReceive > 0 {
iter.gotReply.Wait()
continue
}
return iter.op.cursorId == 0
}
}
// Timeout returns true if Next returned false due to a timeout of
// a tailable cursor. In those cases, Next may be called again to continue
// the iteration at the previous cursor position.
@ -4208,8 +4303,16 @@ func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err
session.SetMode(Strong, false)
var doc valueResult
err = session.DB(dbname).Run(&cmd, &doc)
if err != nil {
for i := 0; i < maxUpsertRetries; i++ {
err = session.DB(dbname).Run(&cmd, &doc)
if err == nil {
break
}
if change.Upsert && IsDup(err) && i+1 < maxUpsertRetries {
// Retry duplicate key errors on upserts.
// https://docs.mongodb.com/v3.2/reference/method/db.collection.update/#use-unique-indexes
continue
}
if qerr, ok := err.(*QueryError); ok && qerr.Message == "No matching object found" {
return nil, ErrNotFound
}
@ -4258,12 +4361,12 @@ type BuildInfo struct {
// equal to the provided version number. If more than one number is
// provided, numbers will be considered as major, minor, and so on.
func (bi *BuildInfo) VersionAtLeast(version ...int) bool {
for i := range version {
for i, vi := range version {
if i == len(bi.VersionArray) {
return false
}
if bi.VersionArray[i] < version[i] {
return false
if bivi := bi.VersionArray[i]; bivi != vi {
return bivi >= vi
}
}
return true
@ -4525,7 +4628,7 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err
lerr.N += oplerr.N
lerr.modified += oplerr.modified
if err != nil {
for ei := range lerr.ecases {
for ei := range oplerr.ecases {
oplerr.ecases[ei].Index += i
}
lerr.ecases = append(lerr.ecases, oplerr.ecases...)

View file

@ -49,9 +49,9 @@ func (s *APIKeysService) Get(keyID string) (*account.APIKey, *http.Response, err
if err.(*Error).Message == "unknown api key" {
return nil, resp, ErrKeyMissing
}
default:
return nil, resp, err
}
return nil, resp, err
}
return &a, resp, nil
@ -74,9 +74,8 @@ func (s *APIKeysService) Create(a *account.APIKey) (*http.Response, error) {
if err.(*Error).Message == fmt.Sprintf("api key with name \"%s\" exists", a.Name) {
return resp, ErrKeyExists
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -101,9 +100,8 @@ func (s *APIKeysService) Update(a *account.APIKey) (*http.Response, error) {
if err.(*Error).Message == "unknown api key" {
return resp, ErrKeyMissing
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -127,9 +125,8 @@ func (s *APIKeysService) Delete(keyID string) (*http.Response, error) {
if err.(*Error).Message == "unknown api key" {
return resp, ErrKeyMissing
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -137,7 +134,7 @@ func (s *APIKeysService) Delete(keyID string) (*http.Response, error) {
var (
// ErrKeyExists bundles PUT create error.
ErrKeyExists = errors.New("Key already exists.")
ErrKeyExists = errors.New("key already exists")
// ErrKeyMissing bundles GET/POST/DELETE error.
ErrKeyMissing = errors.New("Key does not exist.")
ErrKeyMissing = errors.New("key does not exist")
)

View file

@ -48,9 +48,8 @@ func (s *TeamsService) Get(id string) (*account.Team, *http.Response, error) {
if err.(*Error).Message == "Unknown team id" {
return nil, resp, ErrTeamMissing
}
default:
return nil, resp, err
}
return nil, resp, err
}
return &t, resp, nil
@ -73,9 +72,8 @@ func (s *TeamsService) Create(t *account.Team) (*http.Response, error) {
if err.(*Error).Message == fmt.Sprintf("team with name \"%s\" exists", t.Name) {
return resp, ErrTeamExists
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -100,9 +98,8 @@ func (s *TeamsService) Update(t *account.Team) (*http.Response, error) {
if err.(*Error).Message == "unknown team id" {
return resp, ErrTeamMissing
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -126,9 +123,8 @@ func (s *TeamsService) Delete(id string) (*http.Response, error) {
if err.(*Error).Message == "unknown team id" {
return resp, ErrTeamMissing
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -136,7 +132,7 @@ func (s *TeamsService) Delete(id string) (*http.Response, error) {
var (
// ErrTeamExists bundles PUT create error.
ErrTeamExists = errors.New("Team already exists.")
ErrTeamExists = errors.New("team already exists")
// ErrTeamMissing bundles GET/POST/DELETE error.
ErrTeamMissing = errors.New("Team does not exist.")
ErrTeamMissing = errors.New("team does not exist")
)

View file

@ -48,9 +48,8 @@ func (s *UsersService) Get(username string) (*account.User, *http.Response, erro
if err.(*Error).Message == "Unknown user" {
return nil, resp, ErrUserMissing
}
default:
return nil, resp, err
}
return nil, resp, err
}
return &u, resp, nil
@ -73,9 +72,8 @@ func (s *UsersService) Create(u *account.User) (*http.Response, error) {
if err.(*Error).Message == "request failed:Login Name is already in use." {
return resp, ErrUserExists
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -100,9 +98,8 @@ func (s *UsersService) Update(u *account.User) (*http.Response, error) {
if err.(*Error).Message == "Unknown user" {
return resp, ErrUserMissing
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -126,9 +123,8 @@ func (s *UsersService) Delete(username string) (*http.Response, error) {
if err.(*Error).Message == "Unknown user" {
return resp, ErrUserMissing
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -136,7 +132,7 @@ func (s *UsersService) Delete(username string) (*http.Response, error) {
var (
// ErrUserExists bundles PUT create error.
ErrUserExists = errors.New("User already exists.")
ErrUserExists = errors.New("user already exists")
// ErrUserMissing bundles GET/POST/DELETE error.
ErrUserMissing = errors.New("User does not exist.")
ErrUserMissing = errors.New("user does not exist")
)

View file

@ -271,3 +271,23 @@ func parseRate(resp *http.Response) RateLimit {
return rl
}
// SetTimeParam sets a url timestamp query param given the parameters name.
func SetTimeParam(key string, t time.Time) func(*url.Values) {
return func(v *url.Values) { v.Set(key, strconv.Itoa(int(t.Unix()))) }
}
// SetBoolParam sets a url boolean query param given the parameters name.
func SetBoolParam(key string, b bool) func(*url.Values) {
return func(v *url.Values) { v.Set(key, strconv.FormatBool(b)) }
}
// SetStringParam sets a url string query param given the parameters name.
func SetStringParam(key, val string) func(*url.Values) {
return func(v *url.Values) { v.Set(key, val) }
}
// SetIntParam sets a url integer query param given the parameters name.
func SetIntParam(key string, val int) func(*url.Values) {
return func(v *url.Values) { v.Set(key, strconv.Itoa(val)) }
}

View file

@ -19,10 +19,10 @@ type Job struct {
Config Config `json:"config"`
// The current status of the monitor.
Status map[string]Status `json:"status,omitempty"`
Status map[string]*Status `json:"status,omitempty"`
// Rules for determining failure conditions.
Rules []*Rule `json:"rules"`
Rules []*Rule `json:"rules,omitempty"`
// List of regions in which to run the monitor.
// eg, ["dal", "sin", "sjc", "lga", "ams"]
@ -66,7 +66,7 @@ type Job struct {
// If true, notifications are sent for any regional failure (and failback if desired),
// in addition to global state notifications.
NotifyRegional bool `json:"notidy_regional"`
NotifyRegional bool `json:"notify_regional"`
// If true, a notification is sent when a job returns to an "up" state.
NotifyFailback bool `json:"notify_failback"`
@ -99,6 +99,15 @@ type Status struct {
Status string `json:"status"`
}
// StatusLog wraps an NS1 /monitoring/history resource
type StatusLog struct {
Job string `json:"job"`
Region string `json:"region"`
Status string `json:"status"`
Since int `json:"since"`
Until int `json:"until"`
}
// Rule wraps an element of a Job's "rules" attribute
type Rule struct {
Key string `json:"key"`

View file

@ -3,6 +3,7 @@ package rest
import (
"fmt"
"net/http"
"net/url"
"gopkg.in/ns1/ns1-go.v2/rest/model/monitor"
)
@ -106,3 +107,28 @@ func (s *JobsService) Delete(id string) (*http.Response, error) {
return resp, nil
}
// History takes an ID and returns status log history for a specific monitoring job.
//
// NS1 API docs: https://ns1.com/api/#history-get
func (s *JobsService) History(id string, opts ...func(*url.Values)) ([]*monitor.StatusLog, *http.Response, error) {
v := url.Values{}
for _, opt := range opts {
opt(&v)
}
path := fmt.Sprintf("%s/%s?%s", "monitoring/history", id, v.Encode())
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var slgs []*monitor.StatusLog
resp, err := s.client.Do(req, &slgs)
if err != nil {
return nil, resp, err
}
return slgs, resp, nil
}

View file

@ -48,9 +48,8 @@ func (s *NotificationsService) Get(listID string) (*monitor.NotifyList, *http.Re
if err.(*Error).Message == "unknown notification list" {
return nil, resp, ErrListMissing
}
default:
return nil, resp, err
}
return nil, resp, err
}
return &nl, resp, nil
@ -73,9 +72,8 @@ func (s *NotificationsService) Create(nl *monitor.NotifyList) (*http.Response, e
if err.(*Error).Message == fmt.Sprintf("notification list with name \"%s\" exists", nl.Name) {
return resp, ErrListExists
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -122,7 +120,7 @@ func (s *NotificationsService) Delete(listID string) (*http.Response, error) {
var (
// ErrListExists bundles PUT create error.
ErrListExists = errors.New("Notify List already exists.")
ErrListExists = errors.New("notify List already exists")
// ErrListMissing bundles GET/POST/DELETE error.
ErrListMissing = errors.New("Notify List does not exist.")
ErrListMissing = errors.New("notify List does not exist")
)

View file

@ -30,9 +30,8 @@ func (s *RecordsService) Get(zone, domain, t string) (*dns.Record, *http.Respons
if err.(*Error).Message == "record not found" {
return nil, resp, ErrRecordMissing
}
default:
return nil, resp, err
}
return nil, resp, err
}
return &r, resp, nil
@ -61,9 +60,8 @@ func (s *RecordsService) Create(r *dns.Record) (*http.Response, error) {
case "record already exists":
return resp, ErrRecordExists
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -92,9 +90,8 @@ func (s *RecordsService) Update(r *dns.Record) (*http.Response, error) {
case "record already exists":
return resp, ErrRecordExists
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -118,9 +115,8 @@ func (s *RecordsService) Delete(zone string, domain string, t string) (*http.Res
if err.(*Error).Message == "record not found" {
return resp, ErrRecordMissing
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -128,7 +124,7 @@ func (s *RecordsService) Delete(zone string, domain string, t string) (*http.Res
var (
// ErrRecordExists bundles PUT create error.
ErrRecordExists = errors.New("Record already exists.")
ErrRecordExists = errors.New("record already exists")
// ErrRecordMissing bundles GET/POST/DELETE error.
ErrRecordMissing = errors.New("Record does not exist.")
ErrRecordMissing = errors.New("record does not exist")
)

View file

@ -48,9 +48,8 @@ func (s *ZonesService) Get(zone string) (*dns.Zone, *http.Response, error) {
if err.(*Error).Message == "zone not found" {
return nil, resp, ErrZoneMissing
}
default:
return nil, resp, err
}
return nil, resp, err
}
return &z, resp, nil
@ -75,9 +74,8 @@ func (s *ZonesService) Create(z *dns.Zone) (*http.Response, error) {
if err.(*Error).Message == "zone already exists" {
return resp, ErrZoneExists
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -102,9 +100,8 @@ func (s *ZonesService) Update(z *dns.Zone) (*http.Response, error) {
if err.(*Error).Message == "zone not found" {
return resp, ErrZoneMissing
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -128,9 +125,8 @@ func (s *ZonesService) Delete(zone string) (*http.Response, error) {
if err.(*Error).Message == "zone not found" {
return resp, ErrZoneMissing
}
default:
return resp, err
}
return resp, err
}
return resp, nil
@ -138,7 +134,7 @@ func (s *ZonesService) Delete(zone string) (*http.Response, error) {
var (
// ErrZoneExists bundles PUT create error.
ErrZoneExists = errors.New("Zone already exists.")
ErrZoneExists = errors.New("zone already exists")
// ErrZoneMissing bundles GET/POST/DELETE error.
ErrZoneMissing = errors.New("Zone does not exist.")
ErrZoneMissing = errors.New("zone does not exist")
)

View file

@ -67,6 +67,10 @@ func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKe
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
}
if publicKey == nil {
return recipientKeyInfo{}, errors.New("invalid public key")
}
return recipientKeyInfo{
keyAlg: keyAlg,
keyEncrypter: &rsaEncrypterVerifier{
@ -84,6 +88,10 @@ func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipi
return recipientSigInfo{}, ErrUnsupportedAlgorithm
}
if privateKey == nil {
return recipientSigInfo{}, errors.New("invalid private key")
}
return recipientSigInfo{
sigAlg: sigAlg,
publicKey: &JsonWebKey{
@ -104,6 +112,10 @@ func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipien
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
}
if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
return recipientKeyInfo{}, errors.New("invalid public key")
}
return recipientKeyInfo{
keyAlg: keyAlg,
keyEncrypter: &ecEncrypterVerifier{
@ -121,6 +133,10 @@ func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (re
return recipientSigInfo{}, ErrUnsupportedAlgorithm
}
if privateKey == nil {
return recipientSigInfo{}, errors.New("invalid private key")
}
return recipientSigInfo{
sigAlg: sigAlg,
publicKey: &JsonWebKey{
@ -199,7 +215,7 @@ func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator ke
// When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
// prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
// the Million Message Attack on Cryptographic Message Syntax". We are
// therefore deliberatly ignoring errors here.
// therefore deliberately ignoring errors here.
_ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
return cek, nil
@ -370,6 +386,10 @@ func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientI
return nil, errors.New("square/go-jose: invalid epk header")
}
if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
return nil, errors.New("square/go-jose: invalid public key in epk header")
}
apuData := headers.Apu.bytes()
apvData := headers.Apv.bytes()
@ -474,6 +494,8 @@ func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, a
case ES512:
keySize = 66
hash = crypto.SHA512
default:
return ErrUnsupportedAlgorithm
}
if len(signature) != 2*keySize {

View file

@ -82,7 +82,7 @@ func (ctx *cbcAEAD) Overhead() int {
// Seal encrypts and authenticates the plaintext.
func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
// Output buffer -- must take care not to mangle plaintext input.
ciphertext := make([]byte, len(plaintext)+ctx.Overhead())[:len(plaintext)]
ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
copy(ciphertext, plaintext)
ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
@ -91,7 +91,7 @@ func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
cbc.CryptBlocks(ciphertext, ciphertext)
authtag := ctx.computeAuthTag(data, nonce, ciphertext)
ret, out := resize(dst, len(dst)+len(ciphertext)+len(authtag))
ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
copy(out, ciphertext)
copy(out[len(ciphertext):], authtag)
@ -128,7 +128,7 @@ func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
return nil, err
}
ret, out := resize(dst, len(dst)+len(plaintext))
ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
copy(out, plaintext)
return ret, nil
@ -136,12 +136,12 @@ func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
// Compute an authentication tag
func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
buffer := make([]byte, len(aad)+len(nonce)+len(ciphertext)+8)
buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
n := 0
n += copy(buffer, aad)
n += copy(buffer[n:], nonce)
n += copy(buffer[n:], ciphertext)
binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad)*8))
binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
// According to documentation, Write() on hash.Hash never fails.
hmac := hmac.New(ctx.hash, ctx.integrityKey)
@ -153,8 +153,8 @@ func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
// resize ensures the the given slice has a capacity of at least n bytes.
// If the capacity of the slice is less than n, a new slice is allocated
// and the existing data will be copied.
func resize(in []byte, n int) (head, tail []byte) {
if cap(in) >= n {
func resize(in []byte, n uint64) (head, tail []byte) {
if uint64(cap(in)) >= n {
head = in[:n]
} else {
head = make([]byte, n)
@ -168,7 +168,7 @@ func resize(in []byte, n int) (head, tail []byte) {
// Apply padding
func padBuffer(buffer []byte, blockSize int) []byte {
missing := blockSize - (len(buffer) % blockSize)
ret, out := resize(buffer, len(buffer)+missing)
ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
padding := bytes.Repeat([]byte{byte(missing)}, missing)
copy(out, padding)
return ret

View file

@ -32,7 +32,7 @@ type concatKDF struct {
// NewConcatKDF builds a KDF reader based on the given inputs.
func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
buffer := make([]byte, len(algID)+len(ptyUInfo)+len(ptyVInfo)+len(supPubInfo)+len(supPrivInfo))
buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
n := 0
n += copy(buffer, algID)
n += copy(buffer[n:], ptyUInfo)

View file

@ -23,7 +23,14 @@ import (
)
// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
// It is an error to call this function with a private/public key that are not on the same
// curve. Callers must ensure that the keys are valid before calling this function. Output
// size may be at most 1<<16 bytes (64 KiB).
func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
if size > 1<<16 {
panic("ECDH-ES output size too large, must be less than 1<<16")
}
// algId, partyUInfo, partyVInfo inputs must be prefixed with the length
algID := lengthPrefixed([]byte(alg))
ptyUInfo := lengthPrefixed(apuData)
@ -33,6 +40,10 @@ func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, p
supPubInfo := make([]byte, 4)
binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
panic("public key not on same curve as private key")
}
z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})

View file

@ -19,6 +19,7 @@ package jose
import (
"crypto/ecdsa"
"crypto/rsa"
"errors"
"fmt"
"reflect"
)
@ -292,10 +293,16 @@ func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWe
return obj, nil
}
// Decrypt and validate the object and return the plaintext.
// Decrypt and validate the object and return the plaintext. Note that this
// function does not support multi-recipient, if you desire multi-recipient
// decryption use DecryptMulti instead.
func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
headers := obj.mergedHeaders(nil)
if len(obj.recipients) > 1 {
return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one")
}
if len(headers.Crit) > 0 {
return nil, fmt.Errorf("square/go-jose: unsupported crit header")
}
@ -323,7 +330,65 @@ func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
authData := obj.computeAuthData()
var plaintext []byte
for _, recipient := range obj.recipients {
recipient := obj.recipients[0]
recipientHeaders := obj.mergedHeaders(&recipient)
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
if err == nil {
// Found a valid CEK -- let's try to decrypt.
plaintext, err = cipher.decrypt(cek, authData, parts)
}
if plaintext == nil {
return nil, ErrCryptoFailure
}
// The "zip" header parameter may only be present in the protected header.
if obj.protected.Zip != "" {
plaintext, err = decompress(obj.protected.Zip, plaintext)
}
return plaintext, err
}
// DecryptMulti decrypts and validates the object and returns the plaintexts,
// with support for multiple recipients. It returns the index of the recipient
// for which the decryption was successful, the merged headers for that recipient,
// and the plaintext.
func (obj JsonWebEncryption) DecryptMulti(decryptionKey interface{}) (int, JoseHeader, []byte, error) {
globalHeaders := obj.mergedHeaders(nil)
if len(globalHeaders.Crit) > 0 {
return -1, JoseHeader{}, nil, fmt.Errorf("square/go-jose: unsupported crit header")
}
decrypter, err := newDecrypter(decryptionKey)
if err != nil {
return -1, JoseHeader{}, nil, err
}
cipher := getContentCipher(globalHeaders.Enc)
if cipher == nil {
return -1, JoseHeader{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(globalHeaders.Enc))
}
generator := randomKeyGenerator{
size: cipher.keySize(),
}
parts := &aeadParts{
iv: obj.iv,
ciphertext: obj.ciphertext,
tag: obj.tag,
}
authData := obj.computeAuthData()
index := -1
var plaintext []byte
var headers rawHeader
for i, recipient := range obj.recipients {
recipientHeaders := obj.mergedHeaders(&recipient)
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
@ -331,19 +396,21 @@ func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
// Found a valid CEK -- let's try to decrypt.
plaintext, err = cipher.decrypt(cek, authData, parts)
if err == nil {
index = i
headers = recipientHeaders
break
}
}
}
if plaintext == nil {
return nil, ErrCryptoFailure
if plaintext == nil || err != nil {
return -1, JoseHeader{}, nil, ErrCryptoFailure
}
// The "zip" header paramter may only be present in the protected header.
// The "zip" header parameter may only be present in the protected header.
if obj.protected.Zip != "" {
plaintext, err = decompress(obj.protected.Zip, plaintext)
}
return plaintext, err
return index, headers.sanitized(), plaintext, err
}

View file

@ -25,6 +25,8 @@ import (
"math/big"
"regexp"
"strings"
"gopkg.in/square/go-jose.v1/json"
)
var stripWhitespaceRegex = regexp.MustCompile("\\s")
@ -45,7 +47,7 @@ func base64URLDecode(data string) ([]byte, error) {
// Helper function to serialize known-good objects.
// Precondition: value is not a nil pointer.
func mustSerializeJSON(value interface{}) []byte {
out, err := MarshalJSON(value)
out, err := json.Marshal(value)
if err != nil {
panic(err)
}
@ -146,12 +148,12 @@ func newBufferFromInt(num uint64) *byteBuffer {
}
func (b *byteBuffer) MarshalJSON() ([]byte, error) {
return MarshalJSON(b.base64())
return json.Marshal(b.base64())
}
func (b *byteBuffer) UnmarshalJSON(data []byte) error {
var encoded string
err := UnmarshalJSON(data, &encoded)
err := json.Unmarshal(data, &encoded)
if err != nil {
return err
}

View file

@ -1,31 +0,0 @@
// +build !std_json
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"gopkg.in/square/go-jose.v1/json"
)
func MarshalJSON(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
func UnmarshalJSON(data []byte, v interface{}) error {
return json.Unmarshal(data, v)
}

View file

@ -1,31 +0,0 @@
// +build std_json
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"encoding/json"
)
func MarshalJSON(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
func UnmarshalJSON(data []byte, v interface{}) error {
return json.Unmarshal(data, v)
}

View file

@ -19,6 +19,8 @@ package jose
import (
"fmt"
"strings"
"gopkg.in/square/go-jose.v1/json"
)
// rawJsonWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
@ -111,7 +113,7 @@ func ParseEncrypted(input string) (*JsonWebEncryption, error) {
// parseEncryptedFull parses a message in compact format.
func parseEncryptedFull(input string) (*JsonWebEncryption, error) {
var parsed rawJsonWebEncryption
err := UnmarshalJSON([]byte(input), &parsed)
err := json.Unmarshal([]byte(input), &parsed)
if err != nil {
return nil, err
}
@ -133,7 +135,7 @@ func (parsed *rawJsonWebEncryption) sanitized() (*JsonWebEncryption, error) {
}
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
err := UnmarshalJSON(parsed.Protected.bytes(), &obj.protected)
err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
if err != nil {
return nil, fmt.Errorf("square/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
}

View file

@ -21,10 +21,15 @@ import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"math/big"
"reflect"
"strings"
"gopkg.in/square/go-jose.v1/json"
)
// rawJsonWebKey represents a public or private key in JWK format, used for parsing/serializing.
@ -49,14 +54,17 @@ type rawJsonWebKey struct {
Dp *byteBuffer `json:"dp,omitempty"`
Dq *byteBuffer `json:"dq,omitempty"`
Qi *byteBuffer `json:"qi,omitempty"`
// Certificates
X5c []string `json:"x5c,omitempty"`
}
// JsonWebKey represents a public or private key in JWK format.
type JsonWebKey struct {
Key interface{}
KeyID string
Algorithm string
Use string
Key interface{}
Certificates []*x509.Certificate
KeyID string
Algorithm string
Use string
}
// MarshalJSON serializes the given key to its JSON representation.
@ -87,13 +95,17 @@ func (k JsonWebKey) MarshalJSON() ([]byte, error) {
raw.Alg = k.Algorithm
raw.Use = k.Use
return MarshalJSON(raw)
for _, cert := range k.Certificates {
raw.X5c = append(raw.X5c, base64.StdEncoding.EncodeToString(cert.Raw))
}
return json.Marshal(raw)
}
// UnmarshalJSON reads a key from its JSON representation.
func (k *JsonWebKey) UnmarshalJSON(data []byte) (err error) {
var raw rawJsonWebKey
err = UnmarshalJSON(data, &raw)
err = json.Unmarshal(data, &raw)
if err != nil {
return err
}
@ -121,6 +133,19 @@ func (k *JsonWebKey) UnmarshalJSON(data []byte) (err error) {
if err == nil {
*k = JsonWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use}
}
k.Certificates = make([]*x509.Certificate, len(raw.X5c))
for i, cert := range raw.X5c {
raw, err := base64.StdEncoding.DecodeString(cert)
if err != nil {
return err
}
k.Certificates[i], err = x509.ParseCertificate(raw)
if err != nil {
return err
}
}
return
}
@ -192,7 +217,17 @@ func (k *JsonWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
return h.Sum(nil), nil
}
// Valid checks that the key contains the expected parameters
// IsPublic returns true if the JWK represents a public key (not symmetric, not private).
func (k *JsonWebKey) IsPublic() bool {
switch k.Key.(type) {
case *ecdsa.PublicKey, *rsa.PublicKey:
return true
default:
return false
}
}
// Valid checks that the key contains the expected parameters.
func (k *JsonWebKey) Valid() bool {
if k.Key == nil {
return false
@ -253,13 +288,20 @@ func (key rawJsonWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
}
if key.X == nil || key.Y == nil {
return nil, fmt.Errorf("square/go-jose: invalid EC key, missing x/y values")
return nil, errors.New("square/go-jose: invalid EC key, missing x/y values")
}
x := key.X.bigInt()
y := key.Y.bigInt()
if !curve.IsOnCurve(x, y) {
return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
}
return &ecdsa.PublicKey{
Curve: curve,
X: key.X.bigInt(),
Y: key.Y.bigInt(),
X: x,
Y: y,
}, nil
}
@ -368,11 +410,18 @@ func (key rawJsonWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
return nil, fmt.Errorf("square/go-jose: invalid EC private key, missing x/y/d values")
}
x := key.X.bigInt()
y := key.Y.bigInt()
if !curve.IsOnCurve(x, y) {
return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve")
}
return &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: curve,
X: key.X.bigInt(),
Y: key.Y.bigInt(),
X: x,
Y: y,
},
D: key.D.bigInt(),
}, nil

View file

@ -17,8 +17,11 @@
package jose
import (
"errors"
"fmt"
"strings"
"gopkg.in/square/go-jose.v1/json"
)
// rawJsonWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
@ -39,7 +42,10 @@ type rawSignatureInfo struct {
// JsonWebSignature represents a signed JWS object after parsing.
type JsonWebSignature struct {
payload []byte
payload []byte
// Signatures attached to this object (may be more than one for multi-sig).
// Be careful about accessing these directly, prefer to use Verify() or
// VerifyMulti() to ensure that the data you're getting is verified.
Signatures []Signature
}
@ -56,7 +62,7 @@ type Signature struct {
original *rawSignatureInfo
}
// ParseSigned parses an encrypted message in compact or full serialization format.
// ParseSigned parses a signed message in compact or full serialization format.
func ParseSigned(input string) (*JsonWebSignature, error) {
input = stripWhitespace(input)
if strings.HasPrefix(input, "{") {
@ -94,7 +100,7 @@ func (obj JsonWebSignature) computeAuthData(signature *Signature) []byte {
// parseSignedFull parses a message in full format.
func parseSignedFull(input string) (*JsonWebSignature, error) {
var parsed rawJsonWebSignature
err := UnmarshalJSON([]byte(input), &parsed)
err := json.Unmarshal([]byte(input), &parsed)
if err != nil {
return nil, err
}
@ -118,12 +124,13 @@ func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) {
signature := Signature{}
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
signature.protected = &rawHeader{}
err := UnmarshalJSON(parsed.Protected.bytes(), signature.protected)
err := json.Unmarshal(parsed.Protected.bytes(), signature.protected)
if err != nil {
return nil, err
}
}
// Check that there is not a nonce in the unprotected header
if parsed.Header != nil && parsed.Header.Nonce != "" {
return nil, ErrUnprotectedNonce
}
@ -146,13 +153,20 @@ func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) {
}
signature.Header = signature.mergedHeaders().sanitized()
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
jwk := signature.Header.JsonWebKey
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
}
obj.Signatures = append(obj.Signatures, signature)
}
for i, sig := range parsed.Signatures {
if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
obj.Signatures[i].protected = &rawHeader{}
err := UnmarshalJSON(sig.Protected.bytes(), obj.Signatures[i].protected)
err := json.Unmarshal(sig.Protected.bytes(), obj.Signatures[i].protected)
if err != nil {
return nil, err
}
@ -163,14 +177,20 @@ func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) {
return nil, ErrUnprotectedNonce
}
obj.Signatures[i].Header = obj.Signatures[i].mergedHeaders().sanitized()
obj.Signatures[i].Signature = sig.Signature.bytes()
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
jwk := obj.Signatures[i].Header.JsonWebKey
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key")
}
// Copy value of sig
original := sig
obj.Signatures[i].header = sig.Header
obj.Signatures[i].original = &original
obj.Signatures[i].Header = obj.Signatures[i].mergedHeaders().sanitized()
}
return obj, nil

View file

@ -19,6 +19,7 @@ package jose
import (
"crypto/ecdsa"
"crypto/rsa"
"errors"
"fmt"
)
@ -186,20 +187,59 @@ func (ctx *genericSigner) SetNonceSource(source NonceSource) {
ctx.nonceSource = source
}
// SetEmbedJwk specifies if the signing key should be embedded in the protected header,
// if any. It defaults to 'true'.
// SetEmbedJwk specifies if the signing key should be embedded in the protected
// header, if any. It defaults to 'true', though that may change in the future.
// Note that the use of embedded JWKs in the signature header can be dangerous,
// as you cannot assume that the key received in a payload is trusted.
func (ctx *genericSigner) SetEmbedJwk(embed bool) {
ctx.embedJwk = embed
}
// Verify validates the signature on the object and returns the payload.
// This function does not support multi-signature, if you desire multi-sig
// verification use VerifyMulti instead.
//
// Be careful when verifying signatures based on embedded JWKs inside the
// payload header. You cannot assume that the key received in a payload is
// trusted.
func (obj JsonWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
verifier, err := newVerifier(verificationKey)
if err != nil {
return nil, err
}
for _, signature := range obj.Signatures {
if len(obj.Signatures) > 1 {
return nil, errors.New("square/go-jose: too many signatures in payload; expecting only one")
}
signature := obj.Signatures[0]
headers := signature.mergedHeaders()
if len(headers.Crit) > 0 {
// Unsupported crit header
return nil, ErrCryptoFailure
}
input := obj.computeAuthData(&signature)
alg := SignatureAlgorithm(headers.Alg)
err = verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {
return obj.payload, nil
}
return nil, ErrCryptoFailure
}
// VerifyMulti validates (one of the multiple) signatures on the object and
// returns the index of the signature that was verified, along with the signature
// object and the payload. We return the signature and index to guarantee that
// callers are getting the verified value.
func (obj JsonWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
verifier, err := newVerifier(verificationKey)
if err != nil {
return -1, Signature{}, nil, err
}
for i, signature := range obj.Signatures {
headers := signature.mergedHeaders()
if len(headers.Crit) > 0 {
// Unsupported crit header
@ -210,9 +250,9 @@ func (obj JsonWebSignature) Verify(verificationKey interface{}) ([]byte, error)
alg := SignatureAlgorithm(headers.Alg)
err := verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {
return obj.payload, nil
return i, signature, obj.payload, nil
}
}
return nil, ErrCryptoFailure
return -1, Signature{}, nil, ErrCryptoFailure
}