1
0
Fork 0

Vendor main dependencies.

This commit is contained in:
Timo Reimann 2017-02-07 22:33:23 +01:00
parent 49a09ab7dd
commit dd5e3fba01
2738 changed files with 1045689 additions and 0 deletions

28
vendor/gopkg.in/fsnotify.v1/LICENSE generated vendored Normal file
View file

@ -0,0 +1,28 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Copyright (c) 2012 fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

37
vendor/gopkg.in/fsnotify.v1/fen.go generated vendored Normal file
View file

@ -0,0 +1,37 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build solaris
package fsnotify
import (
"errors"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
return nil
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
return nil
}

62
vendor/gopkg.in/fsnotify.v1/fsnotify.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
// Package fsnotify provides a platform-independent interface for file system notifications.
package fsnotify
import (
"bytes"
"fmt"
)
// Event represents a single file system notification.
type Event struct {
Name string // Relative path to the file or directory.
Op Op // File operation that triggered the event.
}
// Op describes a set of file operations.
type Op uint32
// These are the generalized file operations that can trigger a notification.
const (
Create Op = 1 << iota
Write
Remove
Rename
Chmod
)
// String returns a string representation of the event in the form
// "file: REMOVE|WRITE|..."
func (e Event) String() string {
// Use a buffer for efficient string concatenation
var buffer bytes.Buffer
if e.Op&Create == Create {
buffer.WriteString("|CREATE")
}
if e.Op&Remove == Remove {
buffer.WriteString("|REMOVE")
}
if e.Op&Write == Write {
buffer.WriteString("|WRITE")
}
if e.Op&Rename == Rename {
buffer.WriteString("|RENAME")
}
if e.Op&Chmod == Chmod {
buffer.WriteString("|CHMOD")
}
// If buffer remains empty, return no event names
if buffer.Len() == 0 {
return fmt.Sprintf("%q: ", e.Name)
}
// Return a list of event names, with leading pipe character stripped
return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:])
}

325
vendor/gopkg.in/fsnotify.v1/inotify.go generated vendored Normal file
View file

@ -0,0 +1,325 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"unsafe"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
paths map[int]string // Map of watched paths (key: watch descriptor)
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
doneResp chan struct{} // Channel to respond to Close
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
// Create inotify fd
fd, errno := unix.InotifyInit()
if fd == -1 {
return nil, errno
}
// Create epoll
poller, err := newFdPoller(fd)
if err != nil {
unix.Close(fd)
return nil, err
}
w := &Watcher{
fd: fd,
poller: poller,
watches: make(map[string]*watch),
paths: make(map[int]string),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
w.cv = sync.NewCond(&w.mu)
go w.readEvents()
return w, nil
}
func (w *Watcher) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed() {
return nil
}
// Send 'close' signal to goroutine, and set the Watcher to closed.
close(w.done)
// Wake up goroutine
w.poller.wake()
// Wait for goroutine to close
<-w.doneResp
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
name = filepath.Clean(name)
if w.isClosed() {
return errors.New("inotify instance already closed")
}
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
var flags uint32 = agnosticEvents
w.mu.Lock()
watchEntry, found := w.watches[name]
w.mu.Unlock()
if found {
watchEntry.flags |= flags
flags |= unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
w.mu.Lock()
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
w.paths[wd] = name
w.mu.Unlock()
return nil
}
// Remove stops watching the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
// Fetch the watch.
w.mu.Lock()
defer w.mu.Unlock()
watch, ok := w.watches[name]
// Remove it from inotify.
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
// by another thread and we have not received IN_IGNORE event.
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
if success == -1 {
// TODO: Perhaps it's not helpful to return an error here in every case.
// the only two possible errors are:
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
return errno
}
// wait until ignoreLinux() deleting maps
exists := true
for exists {
w.cv.Wait()
_, exists = w.watches[name]
}
return nil
}
type watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *Watcher) readEvents() {
var (
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
n int // Number of bytes read with read()
errno error // Syscall errno
ok bool // For poller.wait
)
defer close(w.doneResp)
defer close(w.Errors)
defer close(w.Events)
defer unix.Close(w.fd)
defer w.poller.close()
for {
// See if we have been closed.
if w.isClosed() {
return
}
ok, errno = w.poller.wait()
if errno != nil {
select {
case w.Errors <- errno:
case <-w.done:
return
}
continue
}
if !ok {
continue
}
n, errno = unix.Read(w.fd, buf[:])
// If a signal interrupted execution, see if we've been asked to close, and try again.
// http://man7.org/linux/man-pages/man7/signal.7.html :
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
if errno == unix.EINTR {
continue
}
// unix.Read might have been woken up by Close. If so, we're done.
if w.isClosed() {
return
}
if n < unix.SizeofInotifyEvent {
var err error
if n == 0 {
// If EOF is received. This should really never happen.
err = io.EOF
} else if n < 0 {
// If an error occurred while reading.
err = errno
} else {
// Read was too short.
err = errors.New("notify: short read in readEvents()")
}
select {
case w.Errors <- err:
case <-w.done:
return
}
continue
}
var offset uint32
// We don't know how many events we just read into the buffer
// While the offset points to at least one whole event...
for offset <= uint32(n-unix.SizeofInotifyEvent) {
// Point "raw" to the event in the buffer
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
mask := uint32(raw.Mask)
nameLen := uint32(raw.Len)
// If the event happened to the watched directory or the watched file, the kernel
// doesn't append the filename to the event, but we would like to always fill the
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
name := w.paths[int(raw.Wd)]
w.mu.Unlock()
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
}
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
if !event.ignoreLinux(w, raw.Wd, mask) {
select {
case w.Events <- event:
case <-w.done:
return
}
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + nameLen
}
}
}
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
w.mu.Lock()
defer w.mu.Unlock()
name := w.paths[int(wd)]
delete(w.paths, int(wd))
delete(w.watches, name)
w.cv.Broadcast()
return true
}
// If the event is not a DELETE or RENAME, the file must exist.
// Otherwise the event is ignored.
// *Note*: this was put in place because it was seen that a MODIFY
// event was sent after the DELETE. This ignores that MODIFY and
// assumes a DELETE will come or has come if the file doesn't exist.
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
_, statErr := os.Lstat(e.Name)
return os.IsNotExist(statErr)
}
return false
}
// newEvent returns an platform-independent Event based on an inotify mask.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
return e
}

187
vendor/gopkg.in/fsnotify.v1/inotify_poller.go generated vendored Normal file
View file

@ -0,0 +1,187 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package fsnotify
import (
"errors"
"golang.org/x/sys/unix"
)
type fdPoller struct {
fd int // File descriptor (as returned by the inotify_init() syscall)
epfd int // Epoll file descriptor
pipe [2]int // Pipe for waking up
}
func emptyPoller(fd int) *fdPoller {
poller := new(fdPoller)
poller.fd = fd
poller.epfd = -1
poller.pipe[0] = -1
poller.pipe[1] = -1
return poller
}
// Create a new inotify poller.
// This creates an inotify handler, and an epoll handler.
func newFdPoller(fd int) (*fdPoller, error) {
var errno error
poller := emptyPoller(fd)
defer func() {
if errno != nil {
poller.close()
}
}()
poller.fd = fd
// Create epoll fd
poller.epfd, errno = unix.EpollCreate1(0)
if poller.epfd == -1 {
return nil, errno
}
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
if errno != nil {
return nil, errno
}
// Register inotify fd with epoll
event := unix.EpollEvent{
Fd: int32(poller.fd),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
if errno != nil {
return nil, errno
}
// Register pipe fd with epoll
event = unix.EpollEvent{
Fd: int32(poller.pipe[0]),
Events: unix.EPOLLIN,
}
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
if errno != nil {
return nil, errno
}
return poller, nil
}
// Wait using epoll.
// Returns true if something is ready to be read,
// false if there is not.
func (poller *fdPoller) wait() (bool, error) {
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
// I don't know whether epoll_wait returns the number of events returned,
// or the total number of events ready.
// I decided to catch both by making the buffer one larger than the maximum.
events := make([]unix.EpollEvent, 7)
for {
n, errno := unix.EpollWait(poller.epfd, events, -1)
if n == -1 {
if errno == unix.EINTR {
continue
}
return false, errno
}
if n == 0 {
// If there are no events, try again.
continue
}
if n > 6 {
// This should never happen. More events were returned than should be possible.
return false, errors.New("epoll_wait returned more events than I know what to do with")
}
ready := events[:n]
epollhup := false
epollerr := false
epollin := false
for _, event := range ready {
if event.Fd == int32(poller.fd) {
if event.Events&unix.EPOLLHUP != 0 {
// This should not happen, but if it does, treat it as a wakeup.
epollhup = true
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the file descriptor, we should pretend
// something is ready to read, and let unix.Read pick up the error.
epollerr = true
}
if event.Events&unix.EPOLLIN != 0 {
// There is data to read.
epollin = true
}
}
if event.Fd == int32(poller.pipe[0]) {
if event.Events&unix.EPOLLHUP != 0 {
// Write pipe descriptor was closed, by us. This means we're closing down the
// watcher, and we should wake up.
}
if event.Events&unix.EPOLLERR != 0 {
// If an error is waiting on the pipe file descriptor.
// This is an absolute mystery, and should never ever happen.
return false, errors.New("Error on the pipe descriptor.")
}
if event.Events&unix.EPOLLIN != 0 {
// This is a regular wakeup, so we have to clear the buffer.
err := poller.clearWake()
if err != nil {
return false, err
}
}
}
}
if epollhup || epollerr || epollin {
return true, nil
}
return false, nil
}
}
// Close the write end of the poller.
func (poller *fdPoller) wake() error {
buf := make([]byte, 1)
n, errno := unix.Write(poller.pipe[1], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is full, poller will wake.
return nil
}
return errno
}
return nil
}
func (poller *fdPoller) clearWake() error {
// You have to be woken up a LOT in order to get to 100!
buf := make([]byte, 100)
n, errno := unix.Read(poller.pipe[0], buf)
if n == -1 {
if errno == unix.EAGAIN {
// Buffer is empty, someone else cleared our wake.
return nil
}
return errno
}
return nil
}
// Close all poller file descriptors, but not the one passed to it.
func (poller *fdPoller) close() {
if poller.pipe[1] != -1 {
unix.Close(poller.pipe[1])
}
if poller.pipe[0] != -1 {
unix.Close(poller.pipe[0])
}
if poller.epfd != -1 {
unix.Close(poller.epfd)
}
}

503
vendor/gopkg.in/fsnotify.v1/kqueue.go generated vendored Normal file
View file

@ -0,0 +1,503 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly darwin
package fsnotify
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
"time"
"golang.org/x/sys/unix"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
done chan bool // Channel for sending a "quit message" to the reader goroutine
kq int // File descriptor (as returned by the kqueue() syscall).
mu sync.Mutex // Protects access to watcher data
watches map[string]int // Map of watched file descriptors (key: path).
externalWatches map[string]bool // Map of watches added by user of the library.
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
isClosed bool // Set to true when Close() is first called
}
type pathInfo struct {
name string
isDir bool
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
kq, err := kqueue()
if err != nil {
return nil, err
}
w := &Watcher{
kq: kq,
watches: make(map[string]int),
dirFlags: make(map[string]uint32),
paths: make(map[int]pathInfo),
fileExists: make(map[string]bool),
externalWatches: make(map[string]bool),
Events: make(chan Event),
Errors: make(chan error),
done: make(chan bool),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return nil
}
w.isClosed = true
w.mu.Unlock()
// copy paths to remove while locked
w.mu.Lock()
var pathsToRemove = make([]string, 0, len(w.watches))
for name := range w.watches {
pathsToRemove = append(pathsToRemove, name)
}
w.mu.Unlock()
// unlock before calling Remove, which also locks
var err error
for _, name := range pathsToRemove {
if e := w.Remove(name); e != nil && err == nil {
err = e
}
}
// Send "quit" message to the reader goroutine:
w.done <- true
return nil
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
w.mu.Lock()
w.externalWatches[name] = true
w.mu.Unlock()
_, err := w.addWatch(name, noteAllEvents)
return err
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
name = filepath.Clean(name)
w.mu.Lock()
watchfd, ok := w.watches[name]
w.mu.Unlock()
if !ok {
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
}
const registerRemove = unix.EV_DELETE
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
return err
}
unix.Close(watchfd)
w.mu.Lock()
isDir := w.paths[watchfd].isDir
delete(w.watches, name)
delete(w.paths, watchfd)
delete(w.dirFlags, name)
w.mu.Unlock()
// Find all watched paths that are in this directory that are not external.
if isDir {
var pathsToRemove []string
w.mu.Lock()
for _, path := range w.paths {
wdir, _ := filepath.Split(path.name)
if filepath.Clean(wdir) == name {
if !w.externalWatches[path.name] {
pathsToRemove = append(pathsToRemove, path.name)
}
}
}
w.mu.Unlock()
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error
// to the user, as that will just confuse them with an error about
// a path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// keventWaitTime to block on each read from kevent
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
// addWatch adds name to the watched file set.
// The flags are interpreted as described in kevent(2).
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
var isDir bool
// Make ./name and name equivalent
name = filepath.Clean(name)
w.mu.Lock()
if w.isClosed {
w.mu.Unlock()
return "", errors.New("kevent instance already closed")
}
watchfd, alreadyWatching := w.watches[name]
// We already have a watch, but we can still override flags.
if alreadyWatching {
isDir = w.paths[watchfd].isDir
}
w.mu.Unlock()
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets.
if fi.Mode()&os.ModeSocket == os.ModeSocket {
return "", nil
}
// Don't watch named pipes.
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
return "", nil
}
// Follow Symlinks
// Unfortunately, Linux can add bogus symlinks to watch list without
// issue, and Windows can't do symlinks period (AFAIK). To maintain
// consistency, we will act like everything is fine. There will simply
// be no file events for broken symlinks.
// Hence the returns of nil on errors.
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
name, err = filepath.EvalSymlinks(name)
if err != nil {
return "", nil
}
w.mu.Lock()
_, alreadyWatching = w.watches[name]
w.mu.Unlock()
if alreadyWatching {
return name, nil
}
fi, err = os.Lstat(name)
if err != nil {
return "", nil
}
}
watchfd, err = unix.Open(name, openMode, 0700)
if watchfd == -1 {
return "", err
}
isDir = fi.IsDir()
}
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
unix.Close(watchfd)
return "", err
}
if !alreadyWatching {
w.mu.Lock()
w.watches[name] = watchfd
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
w.mu.Unlock()
}
if isDir {
// Watch the directory if it has not been watched before,
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
w.mu.Lock()
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
// Store flags so this watch can be updated later
w.dirFlags[name] = flags
w.mu.Unlock()
if watchDir {
if err := w.watchDirectoryFiles(name); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *Watcher) readEvents() {
eventBuffer := make([]unix.Kevent_t, 10)
for {
// See if there is a message on the "done" channel
select {
case <-w.done:
err := unix.Close(w.kq)
if err != nil {
w.Errors <- err
}
close(w.Events)
close(w.Errors)
return
default:
}
// Get new events
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
w.Errors <- err
continue
}
// Flush the events we received to the Events channel
for len(kevents) > 0 {
kevent := &kevents[0]
watchfd := int(kevent.Ident)
mask := uint32(kevent.Fflags)
w.mu.Lock()
path := w.paths[watchfd]
w.mu.Unlock()
event := newEvent(path.name, mask)
if path.isDir && !(event.Op&Remove == Remove) {
// Double check to make sure the directory exists. This can happen when
// we do a rm -fr on a recursively watched folders and we receive a
// modification event first but the folder has been deleted and later
// receive the delete event
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
// mark is as delete event
event.Op |= Remove
}
}
if event.Op&Rename == Rename || event.Op&Remove == Remove {
w.Remove(event.Name)
w.mu.Lock()
delete(w.fileExists, event.Name)
w.mu.Unlock()
}
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
w.sendDirectoryChangeEvents(event.Name)
} else {
// Send the event on the Events channel
w.Events <- event
}
if event.Op&Remove == Remove {
// Look for a file that may have overwritten this.
// For example, mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
w.mu.Lock()
_, found := w.watches[fileDir]
w.mu.Unlock()
if found {
// make sure the directory exists before we watch for changes. When we
// do a recursive watch and perform rm -fr, the parent directory might
// have gone missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the parent directory.
if _, err := os.Lstat(fileDir); err == nil {
w.sendDirectoryChangeEvents(fileDir)
}
}
} else {
filePath := filepath.Clean(event.Name)
if fileInfo, err := os.Lstat(filePath); err == nil {
w.sendFileCreatedEventIfNew(filePath, fileInfo)
}
}
}
// Move to next event
kevents = kevents[1:]
}
}
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
return e
}
func newCreateEvent(name string) Event {
return Event{Name: name, Op: Create}
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
return err
}
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
}
return nil
}
// sendDirectoryEvents searches the directory for newly created files
// and sends them over the event channel. This functionality is to have
// the BSD version of fsnotify match Linux inotify which provides a
// create event for files created in a watched directory.
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
// Get all files
files, err := ioutil.ReadDir(dirPath)
if err != nil {
w.Errors <- err
}
// Search for new files
for _, fileInfo := range files {
filePath := filepath.Join(dirPath, fileInfo.Name())
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
if err != nil {
return
}
}
}
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
w.mu.Lock()
_, doesExist := w.fileExists[filePath]
w.mu.Unlock()
if !doesExist {
// Send create event
w.Events <- newCreateEvent(filePath)
}
// like watchDirectoryFiles (but without doing another ReadDir)
filePath, err = w.internalWatch(filePath, fileInfo)
if err != nil {
return err
}
w.mu.Lock()
w.fileExists[filePath] = true
w.mu.Unlock()
return nil
}
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
if fileInfo.IsDir() {
// mimic Linux providing delete events for subdirectories
// but preserve the flags used if currently watching subdirectory
w.mu.Lock()
flags := w.dirFlags[name]
w.mu.Unlock()
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
return w.addWatch(name, flags)
}
// watch file to mimic Linux inotify
return w.addWatch(name, noteAllEvents)
}
// kqueue creates a new kernel event queue and returns a descriptor.
func kqueue() (kq int, err error) {
kq, err = unix.Kqueue()
if kq == -1 {
return kq, err
}
return kq, nil
}
// register events with the queue
func register(kq int, fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types:
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// register the events
success, err := unix.Kevent(kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
// A timeout of nil blocks indefinitely, while 0 polls the queue.
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(kq, nil, events, timeout)
if err != nil {
return nil, err
}
return events[0:n], nil
}
// durationToTimespec prepares a timeout value
func durationToTimespec(d time.Duration) unix.Timespec {
return unix.NsecToTimespec(d.Nanoseconds())
}

11
vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd dragonfly
package fsnotify
import "golang.org/x/sys/unix"
const openMode = unix.O_NONBLOCK | unix.O_RDONLY

12
vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go generated vendored Normal file
View file

@ -0,0 +1,12 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin
package fsnotify
import "golang.org/x/sys/unix"
// note: this constant is not defined on BSD
const openMode = unix.O_EVTONLY

561
vendor/gopkg.in/fsnotify.v1/windows.go generated vendored Normal file
View file

@ -0,0 +1,561 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"syscall"
"unsafe"
)
// Watcher watches a set of files, delivering events to a channel.
type Watcher struct {
Events chan Event
Errors chan error
isClosed bool // Set to true when Close() is first called
mu sync.Mutex // Map access
port syscall.Handle // Handle to completion port
watches watchMap // Map of watches (key: i-number)
input chan *input // Inputs to the reader are sent on this channel
quit chan chan<- error
}
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
func NewWatcher() (*Watcher, error) {
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
if e != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
}
w := &Watcher{
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
Events: make(chan Event, 50),
Errors: make(chan error),
quit: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
// Close removes all watches and closes the events channel.
func (w *Watcher) Close() error {
if w.isClosed {
return nil
}
w.isClosed = true
// Send "quit" message to the reader goroutine
ch := make(chan error)
w.quit <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
// Add starts watching the named file or directory (non-recursively).
func (w *Watcher) Add(name string) error {
if w.isClosed {
return errors.New("watcher already closed")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
// Remove stops watching the the named file or directory (non-recursively).
func (w *Watcher) Remove(name string) error {
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
const (
// Options for AddWatch
sysFSONESHOT = 0x80000000
sysFSONLYDIR = 0x1000000
// Events
sysFSACCESS = 0x1
sysFSALLEVENTS = 0xfff
sysFSATTRIB = 0x4
sysFSCLOSE = 0x18
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
// Special events
sysFSIGNORED = 0x8000
sysFSQOVERFLOW = 0x4000
)
func newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
if mask&sysFSATTRIB == sysFSATTRIB {
e.Op |= Chmod
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
reply chan error
}
type inode struct {
handle syscall.Handle
volume uint32
index uint64
}
type watch struct {
ov syscall.Overlapped
ino *inode // i-number
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf [4096]byte
}
type indexMap map[uint64]*watch
type watchMap map[uint32]indexMap
func (w *Watcher) wakeupReader() error {
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if e != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", e)
}
return nil
}
func getDir(pathname string) (dir string, err error) {
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
if e != nil {
return "", os.NewSyscallError("GetFileAttributes", e)
}
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func getIno(path string) (ino *inode, err error) {
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
syscall.FILE_LIST_DIRECTORY,
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
if e != nil {
return nil, os.NewSyscallError("CreateFile", e)
}
var fi syscall.ByHandleFileInformation
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
syscall.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *Watcher) addWatch(pathname string, flags uint64) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
if flags&sysFSONLYDIR != 0 && pathname != dir {
return nil
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
syscall.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", e)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
syscall.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
if err = w.startRead(watchEntry); err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *Watcher) remWatch(pathname string) error {
dir, err := getDir(pathname)
if err != nil {
return err
}
ino, err := getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if watch == nil {
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
}
if pathname == dir {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *Watcher) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *Watcher) startRead(watch *watch) error {
if e := syscall.CancelIo(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CancelIo", e)
w.deleteWatch(watch)
}
mask := toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= toWindowsFlags(m)
}
if mask == 0 {
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
w.Errors <- os.NewSyscallError("CloseHandle", e)
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
if e != nil {
err := os.NewSyscallError("ReadDirectoryChanges", e)
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *Watcher) readEvents() {
var (
n, key uint32
ov *syscall.Overlapped
)
runtime.LockOSThread()
for {
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.quit:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
var err error
if e := syscall.CloseHandle(w.port); e != nil {
err = os.NewSyscallError("CloseHandle", e)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags))
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch e {
case syscall.ERROR_MORE_DATA:
if watch == nil {
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case syscall.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case syscall.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
continue
case nil:
}
var offset uint32
for {
if n == 0 {
w.Events <- newEvent("", sysFSQOVERFLOW)
w.Errors <- errors.New("short read in readEvents()")
break
}
// Point "raw" to the event in the buffer
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
fullname := filepath.Join(watch.path, name)
var mask uint64
switch raw.Action {
case syscall.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case syscall.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
sendNameEvent := func() {
if w.sendEvent(fullname, watch.names[name]&mask) {
if watch.names[name]&sysFSONESHOT != 0 {
delete(watch.names, name)
}
}
}
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
sendNameEvent()
}
if raw.Action == syscall.FILE_ACTION_REMOVED {
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
if watch.mask&sysFSONESHOT != 0 {
watch.mask = 0
}
}
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
fullname = filepath.Join(watch.path, watch.rename)
sendNameEvent()
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
break
}
}
if err := w.startRead(watch); err != nil {
w.Errors <- err
}
}
}
func (w *Watcher) sendEvent(name string, mask uint64) bool {
if mask == 0 {
return false
}
event := newEvent(name, uint32(mask))
select {
case ch := <-w.quit:
w.quit <- ch
case w.Events <- event:
}
return true
}
func toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSACCESS != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
}
if mask&sysFSMODIFY != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&sysFSATTRIB != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func toFSnotifyFlags(action uint32) uint64 {
switch action {
case syscall.FILE_ACTION_ADDED:
return sysFSCREATE
case syscall.FILE_ACTION_REMOVED:
return sysFSDELETE
case syscall.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}

28
vendor/gopkg.in/inf.v0/LICENSE generated vendored Normal file
View file

@ -0,0 +1,28 @@
Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go
Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

615
vendor/gopkg.in/inf.v0/dec.go generated vendored Normal file
View file

@ -0,0 +1,615 @@
// Package inf (type inf.Dec) implements "infinite-precision" decimal
// arithmetic.
// "Infinite precision" describes two characteristics: practically unlimited
// precision for decimal number representation and no support for calculating
// with any specific fixed precision.
// (Although there is no practical limit on precision, inf.Dec can only
// represent finite decimals.)
//
// This package is currently in experimental stage and the API may change.
//
// This package does NOT support:
// - rounding to specific precisions (as opposed to specific decimal positions)
// - the notion of context (each rounding must be explicit)
// - NaN and Inf values, and distinguishing between positive and negative zero
// - conversions to and from float32/64 types
//
// Features considered for possible addition:
// + formatting options
// + Exp method
// + combined operations such as AddRound/MulAdd etc
// + exchanging data in decimal32/64/128 formats
//
package inf // import "gopkg.in/inf.v0"
// TODO:
// - avoid excessive deep copying (quo and rounders)
import (
"fmt"
"io"
"math/big"
"strings"
)
// A Dec represents a signed arbitrary-precision decimal.
// It is a combination of a sign, an arbitrary-precision integer coefficient
// value, and a signed fixed-precision exponent value.
// The sign and the coefficient value are handled together as a signed value
// and referred to as the unscaled value.
// (Positive and negative zero values are not distinguished.)
// Since the exponent is most commonly non-positive, it is handled in negated
// form and referred to as scale.
//
// The mathematical value of a Dec equals:
//
// unscaled * 10**(-scale)
//
// Note that different Dec representations may have equal mathematical values.
//
// unscaled scale String()
// -------------------------
// 0 0 "0"
// 0 2 "0.00"
// 0 -2 "0"
// 1 0 "1"
// 100 2 "1.00"
// 10 0 "10"
// 1 -1 "10"
//
// The zero value for a Dec represents the value 0 with scale 0.
//
// Operations are typically performed through the *Dec type.
// The semantics of the assignment operation "=" for "bare" Dec values is
// undefined and should not be relied on.
//
// Methods are typically of the form:
//
// func (z *Dec) Op(x, y *Dec) *Dec
//
// and implement operations z = x Op y with the result as receiver; if it
// is one of the operands it may be overwritten (and its memory reused).
// To enable chaining of operations, the result is also returned. Methods
// returning a result other than *Dec take one of the operands as the receiver.
//
// A "bare" Quo method (quotient / division operation) is not provided, as the
// result is not always a finite decimal and thus in general cannot be
// represented as a Dec.
// Instead, in the common case when rounding is (potentially) necessary,
// QuoRound should be used with a Scale and a Rounder.
// QuoExact or QuoRound with RoundExact can be used in the special cases when it
// is known that the result is always a finite decimal.
//
type Dec struct {
unscaled big.Int
scale Scale
}
// Scale represents the type used for the scale of a Dec.
type Scale int32
const scaleSize = 4 // bytes in a Scale value
// Scaler represents a method for obtaining the scale to use for the result of
// an operation on x and y.
type scaler interface {
Scale(x *Dec, y *Dec) Scale
}
var bigInt = [...]*big.Int{
big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4),
big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9),
big.NewInt(10),
}
var exp10cache [64]big.Int = func() [64]big.Int {
e10, e10i := [64]big.Int{}, bigInt[1]
for i, _ := range e10 {
e10[i].Set(e10i)
e10i = new(big.Int).Mul(e10i, bigInt[10])
}
return e10
}()
// NewDec allocates and returns a new Dec set to the given int64 unscaled value
// and scale.
func NewDec(unscaled int64, scale Scale) *Dec {
return new(Dec).SetUnscaled(unscaled).SetScale(scale)
}
// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled
// value and scale.
func NewDecBig(unscaled *big.Int, scale Scale) *Dec {
return new(Dec).SetUnscaledBig(unscaled).SetScale(scale)
}
// Scale returns the scale of x.
func (x *Dec) Scale() Scale {
return x.scale
}
// Unscaled returns the unscaled value of x for u and true for ok when the
// unscaled value can be represented as int64; otherwise it returns an undefined
// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid
// checking the validity of the value when the check is known to be redundant.
func (x *Dec) Unscaled() (u int64, ok bool) {
u = x.unscaled.Int64()
var i big.Int
ok = i.SetInt64(u).Cmp(&x.unscaled) == 0
return
}
// UnscaledBig returns the unscaled value of x as *big.Int.
func (x *Dec) UnscaledBig() *big.Int {
return &x.unscaled
}
// SetScale sets the scale of z, with the unscaled value unchanged, and returns
// z.
// The mathematical value of the Dec changes as if it was multiplied by
// 10**(oldscale-scale).
func (z *Dec) SetScale(scale Scale) *Dec {
z.scale = scale
return z
}
// SetUnscaled sets the unscaled value of z, with the scale unchanged, and
// returns z.
func (z *Dec) SetUnscaled(unscaled int64) *Dec {
z.unscaled.SetInt64(unscaled)
return z
}
// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and
// returns z.
func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec {
z.unscaled.Set(unscaled)
return z
}
// Set sets z to the value of x and returns z.
// It does nothing if z == x.
func (z *Dec) Set(x *Dec) *Dec {
if z != x {
z.SetUnscaledBig(x.UnscaledBig())
z.SetScale(x.Scale())
}
return z
}
// Sign returns:
//
// -1 if x < 0
// 0 if x == 0
// +1 if x > 0
//
func (x *Dec) Sign() int {
return x.UnscaledBig().Sign()
}
// Neg sets z to -x and returns z.
func (z *Dec) Neg(x *Dec) *Dec {
z.SetScale(x.Scale())
z.UnscaledBig().Neg(x.UnscaledBig())
return z
}
// Cmp compares x and y and returns:
//
// -1 if x < y
// 0 if x == y
// +1 if x > y
//
func (x *Dec) Cmp(y *Dec) int {
xx, yy := upscale(x, y)
return xx.UnscaledBig().Cmp(yy.UnscaledBig())
}
// Abs sets z to |x| (the absolute value of x) and returns z.
func (z *Dec) Abs(x *Dec) *Dec {
z.SetScale(x.Scale())
z.UnscaledBig().Abs(x.UnscaledBig())
return z
}
// Add sets z to the sum x+y and returns z.
// The scale of z is the greater of the scales of x and y.
func (z *Dec) Add(x, y *Dec) *Dec {
xx, yy := upscale(x, y)
z.SetScale(xx.Scale())
z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig())
return z
}
// Sub sets z to the difference x-y and returns z.
// The scale of z is the greater of the scales of x and y.
func (z *Dec) Sub(x, y *Dec) *Dec {
xx, yy := upscale(x, y)
z.SetScale(xx.Scale())
z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig())
return z
}
// Mul sets z to the product x*y and returns z.
// The scale of z is the sum of the scales of x and y.
func (z *Dec) Mul(x, y *Dec) *Dec {
z.SetScale(x.Scale() + y.Scale())
z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig())
return z
}
// Round sets z to the value of x rounded to Scale s using Rounder r, and
// returns z.
func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec {
return z.QuoRound(x, NewDec(1, 0), s, r)
}
// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the
// specified scale.
//
// If the rounder is RoundExact but the result can not be expressed exactly at
// the specified scale, QuoRound returns nil, and the value of z is undefined.
//
// There is no corresponding Div method; the equivalent can be achieved through
// the choice of Rounder used.
//
func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec {
return z.quo(x, y, sclr{s}, r)
}
func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec {
scl := s.Scale(x, y)
var zzz *Dec
if r.UseRemainder() {
zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int))
zzz = r.Round(new(Dec), zz, rA, rB)
} else {
zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil)
zzz = r.Round(new(Dec), zz, nil, nil)
}
if zzz == nil {
return nil
}
return z.Set(zzz)
}
// QuoExact sets z to the quotient x/y and returns z when x/y is a finite
// decimal. Otherwise it returns nil and the value of z is undefined.
//
// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is
// calculated so that the remainder will be zero whenever x/y is a finite
// decimal.
func (z *Dec) QuoExact(x, y *Dec) *Dec {
return z.quo(x, y, scaleQuoExact{}, RoundExact)
}
// quoRem sets z to the quotient x/y with the scale s, and if useRem is true,
// it sets remNum and remDen to the numerator and denominator of the remainder.
// It returns z, remNum and remDen.
//
// The remainder is normalized to the range -1 < r < 1 to simplify rounding;
// that is, the results satisfy the following equation:
//
// x / y = z + (remNum/remDen) * 10**(-z.Scale())
//
// See Rounder for more details about rounding.
//
func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool,
remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) {
// difference (required adjustment) compared to "canonical" result scale
shift := s - (x.Scale() - y.Scale())
// pointers to adjusted unscaled dividend and divisor
var ix, iy *big.Int
switch {
case shift > 0:
// increased scale: decimal-shift dividend left
ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift))
iy = y.UnscaledBig()
case shift < 0:
// decreased scale: decimal-shift divisor left
ix = x.UnscaledBig()
iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift))
default:
ix = x.UnscaledBig()
iy = y.UnscaledBig()
}
// save a copy of iy in case it to be overwritten with the result
iy2 := iy
if iy == z.UnscaledBig() {
iy2 = new(big.Int).Set(iy)
}
// set scale
z.SetScale(s)
// set unscaled
if useRem {
// Int division
_, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int))
// set remainder
remNum.Set(intr)
remDen.Set(iy2)
} else {
z.UnscaledBig().Quo(ix, iy)
}
return z, remNum, remDen
}
type sclr struct{ s Scale }
func (s sclr) Scale(x, y *Dec) Scale {
return s.s
}
type scaleQuoExact struct{}
func (sqe scaleQuoExact) Scale(x, y *Dec) Scale {
rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig())
f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5])
var f10 Scale
if f2 > f5 {
f10 = Scale(f2)
} else {
f10 = Scale(f5)
}
return x.Scale() - y.Scale() + f10
}
func factor(n *big.Int, p *big.Int) int {
// could be improved for large factors
d, f := n, 0
for {
dd, dm := new(big.Int).DivMod(d, p, new(big.Int))
if dm.Sign() == 0 {
f++
d = dd
} else {
break
}
}
return f
}
func factor2(n *big.Int) int {
// could be improved for large factors
f := 0
for ; n.Bit(f) == 0; f++ {
}
return f
}
func upscale(a, b *Dec) (*Dec, *Dec) {
if a.Scale() == b.Scale() {
return a, b
}
if a.Scale() > b.Scale() {
bb := b.rescale(a.Scale())
return a, bb
}
aa := a.rescale(b.Scale())
return aa, b
}
func exp10(x Scale) *big.Int {
if int(x) < len(exp10cache) {
return &exp10cache[int(x)]
}
return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil)
}
func (x *Dec) rescale(newScale Scale) *Dec {
shift := newScale - x.Scale()
switch {
case shift < 0:
e := exp10(-shift)
return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale)
case shift > 0:
e := exp10(shift)
return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale)
}
return x
}
var zeros = []byte("00000000000000000000000000000000" +
"00000000000000000000000000000000")
var lzeros = Scale(len(zeros))
func appendZeros(s []byte, n Scale) []byte {
for i := Scale(0); i < n; i += lzeros {
if n > i+lzeros {
s = append(s, zeros...)
} else {
s = append(s, zeros[0:n-i]...)
}
}
return s
}
func (x *Dec) String() string {
if x == nil {
return "<nil>"
}
scale := x.Scale()
s := []byte(x.UnscaledBig().String())
if scale <= 0 {
if scale != 0 && x.unscaled.Sign() != 0 {
s = appendZeros(s, -scale)
}
return string(s)
}
negbit := Scale(-((x.Sign() - 1) / 2))
// scale > 0
lens := Scale(len(s))
if lens-negbit <= scale {
ss := make([]byte, 0, scale+2)
if negbit == 1 {
ss = append(ss, '-')
}
ss = append(ss, '0', '.')
ss = appendZeros(ss, scale-lens+negbit)
ss = append(ss, s[negbit:]...)
return string(ss)
}
// lens > scale
ss := make([]byte, 0, lens+1)
ss = append(ss, s[:lens-scale]...)
ss = append(ss, '.')
ss = append(ss, s[lens-scale:]...)
return string(ss)
}
// Format is a support routine for fmt.Formatter. It accepts the decimal
// formats 'd' and 'f', and handles both equivalently.
// Width, precision, flags and bases 2, 8, 16 are not supported.
func (x *Dec) Format(s fmt.State, ch rune) {
if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' {
fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String())
return
}
fmt.Fprintf(s, x.String())
}
func (z *Dec) scan(r io.RuneScanner) (*Dec, error) {
unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes
dp, dg := -1, -1 // indexes of decimal point, first digit
loop:
for {
ch, _, err := r.ReadRune()
if err == io.EOF {
break loop
}
if err != nil {
return nil, err
}
switch {
case ch == '+' || ch == '-':
if len(unscaled) > 0 || dp >= 0 { // must be first character
r.UnreadRune()
break loop
}
case ch == '.':
if dp >= 0 {
r.UnreadRune()
break loop
}
dp = len(unscaled)
continue // don't add to unscaled
case ch >= '0' && ch <= '9':
if dg == -1 {
dg = len(unscaled)
}
default:
r.UnreadRune()
break loop
}
unscaled = append(unscaled, byte(ch))
}
if dg == -1 {
return nil, fmt.Errorf("no digits read")
}
if dp >= 0 {
z.SetScale(Scale(len(unscaled) - dp))
} else {
z.SetScale(0)
}
_, ok := z.UnscaledBig().SetString(string(unscaled), 10)
if !ok {
return nil, fmt.Errorf("invalid decimal: %s", string(unscaled))
}
return z, nil
}
// SetString sets z to the value of s, interpreted as a decimal (base 10),
// and returns z and a boolean indicating success. The scale of z is the
// number of digits after the decimal point (including any trailing 0s),
// or 0 if there is no decimal point. If SetString fails, the value of z
// is undefined but the returned value is nil.
func (z *Dec) SetString(s string) (*Dec, bool) {
r := strings.NewReader(s)
_, err := z.scan(r)
if err != nil {
return nil, false
}
_, _, err = r.ReadRune()
if err != io.EOF {
return nil, false
}
// err == io.EOF => scan consumed all of s
return z, true
}
// Scan is a support routine for fmt.Scanner; it sets z to the value of
// the scanned number. It accepts the decimal formats 'd' and 'f', and
// handles both equivalently. Bases 2, 8, 16 are not supported.
// The scale of z is the number of digits after the decimal point
// (including any trailing 0s), or 0 if there is no decimal point.
func (z *Dec) Scan(s fmt.ScanState, ch rune) error {
if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' {
return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch)
}
s.SkipSpace()
_, err := z.scan(s)
return err
}
// Gob encoding version
const decGobVersion byte = 1
func scaleBytes(s Scale) []byte {
buf := make([]byte, scaleSize)
i := scaleSize
for j := 0; j < scaleSize; j++ {
i--
buf[i] = byte(s)
s >>= 8
}
return buf
}
func scale(b []byte) (s Scale) {
for j := 0; j < scaleSize; j++ {
s <<= 8
s |= Scale(b[j])
}
return
}
// GobEncode implements the gob.GobEncoder interface.
func (x *Dec) GobEncode() ([]byte, error) {
buf, err := x.UnscaledBig().GobEncode()
if err != nil {
return nil, err
}
buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion)
return buf, nil
}
// GobDecode implements the gob.GobDecoder interface.
func (z *Dec) GobDecode(buf []byte) error {
if len(buf) == 0 {
return fmt.Errorf("Dec.GobDecode: no data")
}
b := buf[len(buf)-1]
if b != decGobVersion {
return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b)
}
l := len(buf) - scaleSize - 1
err := z.UnscaledBig().GobDecode(buf[:l])
if err != nil {
return err
}
z.SetScale(scale(buf[l : l+scaleSize]))
return nil
}
// MarshalText implements the encoding.TextMarshaler interface.
func (x *Dec) MarshalText() ([]byte, error) {
return []byte(x.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (z *Dec) UnmarshalText(data []byte) error {
_, ok := z.SetString(string(data))
if !ok {
return fmt.Errorf("invalid inf.Dec")
}
return nil
}

145
vendor/gopkg.in/inf.v0/rounder.go generated vendored Normal file
View file

@ -0,0 +1,145 @@
package inf
import (
"math/big"
)
// Rounder represents a method for rounding the (possibly infinite decimal)
// result of a division to a finite Dec. It is used by Dec.Round() and
// Dec.Quo().
//
// See the Example for results of using each Rounder with some sample values.
//
type Rounder rounder
// See http://speleotrove.com/decimal/damodel.html#refround for more detailed
// definitions of these rounding modes.
var (
RoundDown Rounder // towards 0
RoundUp Rounder // away from 0
RoundFloor Rounder // towards -infinity
RoundCeil Rounder // towards +infinity
RoundHalfDown Rounder // to nearest; towards 0 if same distance
RoundHalfUp Rounder // to nearest; away from 0 if same distance
RoundHalfEven Rounder // to nearest; even last digit if same distance
)
// RoundExact is to be used in the case when rounding is not necessary.
// When used with Quo or Round, it returns the result verbatim when it can be
// expressed exactly with the given precision, and it returns nil otherwise.
// QuoExact is a shorthand for using Quo with RoundExact.
var RoundExact Rounder
type rounder interface {
// When UseRemainder() returns true, the Round() method is passed the
// remainder of the division, expressed as the numerator and denominator of
// a rational.
UseRemainder() bool
// Round sets the rounded value of a quotient to z, and returns z.
// quo is rounded down (truncated towards zero) to the scale obtained from
// the Scaler in Quo().
//
// When the remainder is not used, remNum and remDen are nil.
// When used, the remainder is normalized between -1 and 1; that is:
//
// -|remDen| < remNum < |remDen|
//
// remDen has the same sign as y, and remNum is zero or has the same sign
// as x.
Round(z, quo *Dec, remNum, remDen *big.Int) *Dec
}
type rndr struct {
useRem bool
round func(z, quo *Dec, remNum, remDen *big.Int) *Dec
}
func (r rndr) UseRemainder() bool {
return r.useRem
}
func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec {
return r.round(z, quo, remNum, remDen)
}
var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)}
func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec {
return func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
brA, brB := rA.BitLen(), rB.BitLen()
if brA < brB-1 {
// brA < brB-1 => |rA| < |rB/2|
return z
}
roundUp := false
srA, srB := rA.Sign(), rB.Sign()
s := srA * srB
if brA == brB-1 {
rA2 := new(big.Int).Lsh(rA, 1)
if s < 0 {
rA2.Neg(rA2)
}
roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0))
} else {
// brA > brB-1 => |rA| > |rB/2|
roundUp = true
}
if roundUp {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1])
}
return z
}
}
func init() {
RoundExact = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
if rA.Sign() != 0 {
return nil
}
return z.Set(q)
}}
RoundDown = rndr{false,
func(z, q *Dec, rA, rB *big.Int) *Dec {
return z.Set(q)
}}
RoundUp = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
if rA.Sign() != 0 {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1])
}
return z
}}
RoundFloor = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
if rA.Sign()*rB.Sign() < 0 {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[0])
}
return z
}}
RoundCeil = rndr{true,
func(z, q *Dec, rA, rB *big.Int) *Dec {
z.Set(q)
if rA.Sign()*rB.Sign() > 0 {
z.UnscaledBig().Add(z.UnscaledBig(), intSign[2])
}
return z
}}
RoundHalfDown = rndr{true, roundHalf(
func(c int, odd uint) bool {
return c > 0
})}
RoundHalfUp = rndr{true, roundHalf(
func(c int, odd uint) bool {
return c >= 0
})}
RoundHalfEven = rndr{true, roundHalf(
func(c int, odd uint) bool {
return c > 0 || c == 0 && odd == 1
})}
}

191
vendor/gopkg.in/ini.v1/LICENSE generated vendored Normal file
View file

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

32
vendor/gopkg.in/ini.v1/error.go generated vendored Normal file
View file

@ -0,0 +1,32 @@
// Copyright 2016 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"fmt"
)
type ErrDelimiterNotFound struct {
Line string
}
func IsErrDelimiterNotFound(err error) bool {
_, ok := err.(ErrDelimiterNotFound)
return ok
}
func (err ErrDelimiterNotFound) Error() string {
return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
}

535
vendor/gopkg.in/ini.v1/ini.go generated vendored Normal file
View file

@ -0,0 +1,535 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// Package ini provides INI file read and write functionality in Go.
package ini
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
)
const (
// Name for default section. You can use this constant or the string literal.
// In most of cases, an empty string is all you need to access the section.
DEFAULT_SECTION = "DEFAULT"
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.23.0"
)
// Version returns current package version literal.
func Version() string {
return _VERSION
}
var (
// Delimiter to determine or compose a new line.
// This variable will be changed to "\r\n" automatically on Windows
// at package init time.
LineBreak = "\n"
// Variable regexp pattern: %(variable)s
varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
// Indicate whether to align "=" sign with spaces to produce pretty output
// or reduce all possible spaces for compact format.
PrettyFormat = true
// Explicitly write DEFAULT section header
DefaultHeader = false
)
func init() {
if runtime.GOOS == "windows" {
LineBreak = "\r\n"
}
}
func inSlice(str string, s []string) bool {
for _, v := range s {
if str == v {
return true
}
}
return false
}
// dataSource is an interface that returns object which can be read and closed.
type dataSource interface {
ReadCloser() (io.ReadCloser, error)
}
// sourceFile represents an object that contains content on the local file system.
type sourceFile struct {
name string
}
func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
return os.Open(s.name)
}
type bytesReadCloser struct {
reader io.Reader
}
func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
return rc.reader.Read(p)
}
func (rc *bytesReadCloser) Close() error {
return nil
}
// sourceData represents an object that contains content in memory.
type sourceData struct {
data []byte
}
func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(s.data)), nil
}
// sourceReadCloser represents an input stream with Close method.
type sourceReadCloser struct {
reader io.ReadCloser
}
func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
return s.reader, nil
}
// File represents a combination of a or more INI file(s) in memory.
type File struct {
// Should make things safe, but sometimes doesn't matter.
BlockMode bool
// Make sure data is safe in multiple goroutines.
lock sync.RWMutex
// Allow combination of multiple data sources.
dataSources []dataSource
// Actual data is stored here.
sections map[string]*Section
// To keep data in order.
sectionList []string
options LoadOptions
NameMapper
ValueMapper
}
// newFile initializes File object with given data sources.
func newFile(dataSources []dataSource, opts LoadOptions) *File {
return &File{
BlockMode: true,
dataSources: dataSources,
sections: make(map[string]*Section),
sectionList: make([]string, 0, 10),
options: opts,
}
}
func parseDataSource(source interface{}) (dataSource, error) {
switch s := source.(type) {
case string:
return sourceFile{s}, nil
case []byte:
return &sourceData{s}, nil
case io.ReadCloser:
return &sourceReadCloser{s}, nil
default:
return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
}
}
type LoadOptions struct {
// Loose indicates whether the parser should ignore nonexistent files or return error.
Loose bool
// Insensitive indicates whether the parser forces all section and key names to lowercase.
Insensitive bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
// Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
// conform to key/value pairs. Specify the names of those blocks here.
UnparseableSections []string
}
func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
sources := make([]dataSource, len(others)+1)
sources[0], err = parseDataSource(source)
if err != nil {
return nil, err
}
for i := range others {
sources[i+1], err = parseDataSource(others[i])
if err != nil {
return nil, err
}
}
f := newFile(sources, opts)
if err = f.Reload(); err != nil {
return nil, err
}
return f, nil
}
// Load loads and parses from INI data sources.
// Arguments can be mixed of file name with string type, or raw data in []byte.
// It will return error if list contains nonexistent files.
func Load(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{}, source, others...)
}
// LooseLoad has exactly same functionality as Load function
// except it ignores nonexistent files instead of returning error.
func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Loose: true}, source, others...)
}
// InsensitiveLoad has exactly same functionality as Load function
// except it forces all section and key names to be lowercased.
func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
}
// Empty returns an empty file object.
func Empty() *File {
// Ignore error here, we sure our data is good.
f, _ := Load([]byte(""))
return f
}
// NewSection creates a new section.
func (f *File) NewSection(name string) (*Section, error) {
if len(name) == 0 {
return nil, errors.New("error creating new section: empty section name")
} else if f.options.Insensitive && name != DEFAULT_SECTION {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if inSlice(name, f.sectionList) {
return f.sections[name], nil
}
f.sectionList = append(f.sectionList, name)
f.sections[name] = newSection(f, name)
return f.sections[name], nil
}
// NewRawSection creates a new section with an unparseable body.
func (f *File) NewRawSection(name, body string) (*Section, error) {
section, err := f.NewSection(name)
if err != nil {
return nil, err
}
section.isRawSection = true
section.rawBody = body
return section, nil
}
// NewSections creates a list of sections.
func (f *File) NewSections(names ...string) (err error) {
for _, name := range names {
if _, err = f.NewSection(name); err != nil {
return err
}
}
return nil
}
// GetSection returns section by given name.
func (f *File) GetSection(name string) (*Section, error) {
if len(name) == 0 {
name = DEFAULT_SECTION
} else if f.options.Insensitive {
name = strings.ToLower(name)
}
if f.BlockMode {
f.lock.RLock()
defer f.lock.RUnlock()
}
sec := f.sections[name]
if sec == nil {
return nil, fmt.Errorf("section '%s' does not exist", name)
}
return sec, nil
}
// Section assumes named section exists and returns a zero-value when not.
func (f *File) Section(name string) *Section {
sec, err := f.GetSection(name)
if err != nil {
// Note: It's OK here because the only possible error is empty section name,
// but if it's empty, this piece of code won't be executed.
sec, _ = f.NewSection(name)
return sec
}
return sec
}
// Section returns list of Section.
func (f *File) Sections() []*Section {
sections := make([]*Section, len(f.sectionList))
for i := range f.sectionList {
sections[i] = f.Section(f.sectionList[i])
}
return sections
}
// SectionStrings returns list of section names.
func (f *File) SectionStrings() []string {
list := make([]string, len(f.sectionList))
copy(list, f.sectionList)
return list
}
// DeleteSection deletes a section.
func (f *File) DeleteSection(name string) {
if f.BlockMode {
f.lock.Lock()
defer f.lock.Unlock()
}
if len(name) == 0 {
name = DEFAULT_SECTION
}
for i, s := range f.sectionList {
if s == name {
f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
delete(f.sections, name)
return
}
}
}
func (f *File) reload(s dataSource) error {
r, err := s.ReadCloser()
if err != nil {
return err
}
defer r.Close()
return f.parse(r)
}
// Reload reloads and parses all data sources.
func (f *File) Reload() (err error) {
for _, s := range f.dataSources {
if err = f.reload(s); err != nil {
// In loose mode, we create an empty default section for nonexistent files.
if os.IsNotExist(err) && f.options.Loose {
f.parse(bytes.NewBuffer(nil))
continue
}
return err
}
}
return nil
}
// Append appends one or more data sources and reloads automatically.
func (f *File) Append(source interface{}, others ...interface{}) error {
ds, err := parseDataSource(source)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
for _, s := range others {
ds, err = parseDataSource(s)
if err != nil {
return err
}
f.dataSources = append(f.dataSources, ds)
}
return f.Reload()
}
// WriteToIndent writes content into io.Writer with given indention.
// If PrettyFormat has been set to be true,
// it will align "=" sign with spaces under each section.
func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
equalSign := "="
if PrettyFormat {
equalSign = " = "
}
// Use buffer to make sure target is safe until finish encoding.
buf := bytes.NewBuffer(nil)
for i, sname := range f.sectionList {
sec := f.Section(sname)
if len(sec.Comment) > 0 {
if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
sec.Comment = "; " + sec.Comment
}
if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
return 0, err
}
}
if i > 0 || DefaultHeader {
if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
return 0, err
}
} else {
// Write nothing if default section is empty
if len(sec.keyList) == 0 {
continue
}
}
if sec.isRawSection {
if _, err = buf.WriteString(sec.rawBody); err != nil {
return 0, err
}
continue
}
// Count and generate alignment length and buffer spaces using the
// longest key. Keys may be modifed if they contain certain characters so
// we need to take that into account in our calculation.
alignLength := 0
if PrettyFormat {
for _, kname := range sec.keyList {
keyLength := len(kname)
// First case will surround key by ` and second by """
if strings.ContainsAny(kname, "\"=:") {
keyLength += 2
} else if strings.Contains(kname, "`") {
keyLength += 6
}
if keyLength > alignLength {
alignLength = keyLength
}
}
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
if len(indent) > 0 && sname != DEFAULT_SECTION {
buf.WriteString(indent)
}
if key.Comment[0] != '#' && key.Comment[0] != ';' {
key.Comment = "; " + key.Comment
}
if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
return 0, err
}
}
if len(indent) > 0 && sname != DEFAULT_SECTION {
buf.WriteString(indent)
}
switch {
case key.isAutoIncrement:
kname = "-"
case strings.ContainsAny(kname, "\"=:"):
kname = "`" + kname + "`"
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
if _, err = buf.WriteString(kname); err != nil {
return 0, err
}
if key.isBooleanType {
continue
}
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
val := key.value
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
return 0, err
}
}
// Put a line between sections
if _, err = buf.WriteString(LineBreak); err != nil {
return 0, err
}
}
return buf.WriteTo(w)
}
// WriteTo writes file content into io.Writer.
func (f *File) WriteTo(w io.Writer) (int64, error) {
return f.WriteToIndent(w, "")
}
// SaveToIndent writes content to file system with given value indention.
func (f *File) SaveToIndent(filename, indent string) error {
// Note: Because we are truncating with os.Create,
// so it's safer to save to a temporary file location and rename afte done.
tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
defer os.Remove(tmpPath)
fw, err := os.Create(tmpPath)
if err != nil {
return err
}
if _, err = f.WriteToIndent(fw, indent); err != nil {
fw.Close()
return err
}
fw.Close()
// Remove old file and rename the new one.
os.Remove(filename)
return os.Rename(tmpPath, filename)
}
// SaveTo writes content to file system.
func (f *File) SaveTo(filename string) error {
return f.SaveToIndent(filename, "")
}

633
vendor/gopkg.in/ini.v1/key.go generated vendored Normal file
View file

@ -0,0 +1,633 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"fmt"
"strconv"
"strings"
"time"
)
// Key represents a key under a section.
type Key struct {
s *Section
name string
value string
isAutoIncrement bool
isBooleanType bool
Comment string
}
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
type ValueMapper func(string) string
// Name returns name of key.
func (k *Key) Name() string {
return k.name
}
// Value returns raw value of key for performance purpose.
func (k *Key) Value() string {
return k.value
}
// String returns string representation of value.
func (k *Key) String() string {
val := k.value
if k.s.f.ValueMapper != nil {
val = k.s.f.ValueMapper(val)
}
if strings.Index(val, "%") == -1 {
return val
}
for i := 0; i < _DEPTH_VALUES; i++ {
vr := varPattern.FindString(val)
if len(vr) == 0 {
break
}
// Take off leading '%(' and trailing ')s'.
noption := strings.TrimLeft(vr, "%(")
noption = strings.TrimRight(noption, ")s")
// Search in the same section.
nk, err := k.s.GetKey(noption)
if err != nil {
// Search again in default section.
nk, _ = k.s.f.Section("").GetKey(noption)
}
// Substitute by new value and take off leading '%(' and trailing ')s'.
val = strings.Replace(val, vr, nk.value, -1)
}
return val
}
// Validate accepts a validate function which can
// return modifed result as key value.
func (k *Key) Validate(fn func(string) string) string {
return fn(k.String())
}
// parseBool returns the boolean value represented by the string.
//
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
// Any other value returns an error.
func parseBool(str string) (value bool, err error) {
switch str {
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
return true, nil
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
return false, nil
}
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
}
// Bool returns bool type value.
func (k *Key) Bool() (bool, error) {
return parseBool(k.String())
}
// Float64 returns float64 type value.
func (k *Key) Float64() (float64, error) {
return strconv.ParseFloat(k.String(), 64)
}
// Int returns int type value.
func (k *Key) Int() (int, error) {
return strconv.Atoi(k.String())
}
// Int64 returns int64 type value.
func (k *Key) Int64() (int64, error) {
return strconv.ParseInt(k.String(), 10, 64)
}
// Uint returns uint type valued.
func (k *Key) Uint() (uint, error) {
u, e := strconv.ParseUint(k.String(), 10, 64)
return uint(u), e
}
// Uint64 returns uint64 type value.
func (k *Key) Uint64() (uint64, error) {
return strconv.ParseUint(k.String(), 10, 64)
}
// Duration returns time.Duration type value.
func (k *Key) Duration() (time.Duration, error) {
return time.ParseDuration(k.String())
}
// TimeFormat parses with given format and returns time.Time type value.
func (k *Key) TimeFormat(format string) (time.Time, error) {
return time.Parse(format, k.String())
}
// Time parses with RFC3339 format and returns time.Time type value.
func (k *Key) Time() (time.Time, error) {
return k.TimeFormat(time.RFC3339)
}
// MustString returns default value if key value is empty.
func (k *Key) MustString(defaultVal string) string {
val := k.String()
if len(val) == 0 {
k.value = defaultVal
return defaultVal
}
return val
}
// MustBool always returns value without error,
// it returns false if error occurs.
func (k *Key) MustBool(defaultVal ...bool) bool {
val, err := k.Bool()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatBool(defaultVal[0])
return defaultVal[0]
}
return val
}
// MustFloat64 always returns value without error,
// it returns 0.0 if error occurs.
func (k *Key) MustFloat64(defaultVal ...float64) float64 {
val, err := k.Float64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
return defaultVal[0]
}
return val
}
// MustInt always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt(defaultVal ...int) int {
val, err := k.Int()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
return defaultVal[0]
}
return val
}
// MustInt64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustInt64(defaultVal ...int64) int64 {
val, err := k.Int64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatInt(defaultVal[0], 10)
return defaultVal[0]
}
return val
}
// MustUint always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint(defaultVal ...uint) uint {
val, err := k.Uint()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
return defaultVal[0]
}
return val
}
// MustUint64 always returns value without error,
// it returns 0 if error occurs.
func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
val, err := k.Uint64()
if len(defaultVal) > 0 && err != nil {
k.value = strconv.FormatUint(defaultVal[0], 10)
return defaultVal[0]
}
return val
}
// MustDuration always returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
val, err := k.Duration()
if len(defaultVal) > 0 && err != nil {
k.value = defaultVal[0].String()
return defaultVal[0]
}
return val
}
// MustTimeFormat always parses with given format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
val, err := k.TimeFormat(format)
if len(defaultVal) > 0 && err != nil {
k.value = defaultVal[0].Format(format)
return defaultVal[0]
}
return val
}
// MustTime always parses with RFC3339 format and returns value without error,
// it returns zero value if error occurs.
func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
return k.MustTimeFormat(time.RFC3339, defaultVal...)
}
// In always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) In(defaultVal string, candidates []string) string {
val := k.String()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InFloat64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
val := k.MustFloat64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt(defaultVal int, candidates []int) int {
val := k.MustInt()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InInt64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
val := k.MustInt64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
val := k.MustUint()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InUint64 always returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
val := k.MustUint64()
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTimeFormat always parses with given format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
val := k.MustTimeFormat(format)
for _, cand := range candidates {
if val == cand {
return val
}
}
return defaultVal
}
// InTime always parses with RFC3339 format and returns value without error,
// it returns default value if error occurs or doesn't fit into candidates.
func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
}
// RangeFloat64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
val := k.MustFloat64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt(defaultVal, min, max int) int {
val := k.MustInt()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeInt64 checks if value is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
val := k.MustInt64()
if val < min || val > max {
return defaultVal
}
return val
}
// RangeTimeFormat checks if value with given format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
val := k.MustTimeFormat(format)
if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
return defaultVal
}
return val
}
// RangeTime checks if value with RFC3339 format is in given range inclusively,
// and returns default value if it's not.
func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
}
// Strings returns list of string divided by given delimiter.
func (k *Key) Strings(delim string) []string {
str := k.String()
if len(str) == 0 {
return []string{}
}
vals := strings.Split(str, delim)
for i := range vals {
vals[i] = strings.TrimSpace(vals[i])
}
return vals
}
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Float64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, true, false)
return vals
}
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Ints(delim string) []int {
vals, _ := k.getInts(delim, true, false)
return vals
}
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Int64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, true, false)
return vals
}
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uints(delim string) []uint {
vals, _ := k.getUints(delim, true, false)
return vals
}
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, true, false)
return vals
}
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) TimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, true, false)
return vals
}
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) Times(delim string) []time.Time {
return k.TimesFormat(time.RFC3339, delim)
}
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
// it will not be included to result list.
func (k *Key) ValidFloat64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, false, false)
return vals
}
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
// not be included to result list.
func (k *Key) ValidInts(delim string) []int {
vals, _ := k.getInts(delim, false, false)
return vals
}
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
// then it will not be included to result list.
func (k *Key) ValidInt64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, false, false)
return vals
}
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
// then it will not be included to result list.
func (k *Key) ValidUints(delim string) []uint {
vals, _ := k.getUints(delim, false, false)
return vals
}
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidUint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, false, false)
return vals
}
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, false, false)
return vals
}
// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimes(delim string) []time.Time {
return k.ValidTimesFormat(time.RFC3339, delim)
}
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
return k.getFloat64s(delim, false, true)
}
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
func (k *Key) StrictInts(delim string) ([]int, error) {
return k.getInts(delim, false, true)
}
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
return k.getInt64s(delim, false, true)
}
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
func (k *Key) StrictUints(delim string) ([]uint, error) {
return k.getUints(delim, false, true)
}
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
return k.getUint64s(delim, false, true)
}
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
return k.getTimesFormat(format, delim, false, true)
}
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
return k.StrictTimesFormat(time.RFC3339, delim)
}
// getFloat64s returns list of float64 divided by given delimiter.
func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
strs := k.Strings(delim)
vals := make([]float64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseFloat(str, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getInts returns list of int divided by given delimiter.
func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
strs := k.Strings(delim)
vals := make([]int, 0, len(strs))
for _, str := range strs {
val, err := strconv.Atoi(str)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getInt64s returns list of int64 divided by given delimiter.
func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
strs := k.Strings(delim)
vals := make([]int64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseInt(str, 10, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getUints returns list of uint divided by given delimiter.
func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
strs := k.Strings(delim)
vals := make([]uint, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 0)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, uint(val))
}
}
return vals, nil
}
// getUint64s returns list of uint64 divided by given delimiter.
func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
strs := k.Strings(delim)
vals := make([]uint64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 64)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
strs := k.Strings(delim)
vals := make([]time.Time, 0, len(strs))
for _, str := range strs {
val, err := time.Parse(format, str)
if err != nil && returnOnInvalid {
return nil, err
}
if err == nil || addInvalid {
vals = append(vals, val)
}
}
return vals, nil
}
// SetValue changes key value.
func (k *Key) SetValue(v string) {
if k.s.f.BlockMode {
k.s.f.lock.Lock()
defer k.s.f.lock.Unlock()
}
k.value = v
k.s.keysHash[k.name] = v
}

356
vendor/gopkg.in/ini.v1/parser.go generated vendored Normal file
View file

@ -0,0 +1,356 @@
// Copyright 2015 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"unicode"
)
type tokenType int
const (
_TOKEN_INVALID tokenType = iota
_TOKEN_COMMENT
_TOKEN_SECTION
_TOKEN_KEY
)
type parser struct {
buf *bufio.Reader
isEOF bool
count int
comment *bytes.Buffer
}
func newParser(r io.Reader) *parser {
return &parser{
buf: bufio.NewReader(r),
count: 1,
comment: &bytes.Buffer{},
}
}
// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
func (p *parser) BOM() error {
mask, err := p.buf.Peek(2)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 2 {
return nil
}
switch {
case mask[0] == 254 && mask[1] == 255:
fallthrough
case mask[0] == 255 && mask[1] == 254:
p.buf.Read(mask)
case mask[0] == 239 && mask[1] == 187:
mask, err := p.buf.Peek(3)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 3 {
return nil
}
if mask[2] == 191 {
p.buf.Read(mask)
}
}
return nil
}
func (p *parser) readUntil(delim byte) ([]byte, error) {
data, err := p.buf.ReadBytes(delim)
if err != nil {
if err == io.EOF {
p.isEOF = true
} else {
return nil, err
}
}
return data, nil
}
func cleanComment(in []byte) ([]byte, bool) {
i := bytes.IndexAny(in, "#;")
if i == -1 {
return nil, false
}
return in[i:], true
}
func readKeyName(in []byte) (string, int, error) {
line := string(in)
// Check if key name surrounded by quotes.
var keyQuote string
if line[0] == '"' {
if len(line) > 6 && string(line[0:3]) == `"""` {
keyQuote = `"""`
} else {
keyQuote = `"`
}
} else if line[0] == '`' {
keyQuote = "`"
}
// Get out key name
endIdx := -1
if len(keyQuote) > 0 {
startIdx := len(keyQuote)
// FIXME: fail case -> """"""name"""=value
pos := strings.Index(line[startIdx:], keyQuote)
if pos == -1 {
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
}
pos += startIdx
// Find key-value delimiter
i := strings.IndexAny(line[pos+startIdx:], "=:")
if i < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
endIdx = pos + i
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
}
endIdx = strings.IndexAny(line, "=:")
if endIdx < 0 {
return "", -1, ErrDelimiterNotFound{line}
}
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
}
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := string(data)
pos := strings.LastIndex(next, valQuote)
if pos > -1 {
val += next[:pos]
comment, has := cleanComment([]byte(next[pos:]))
if has {
p.comment.Write(bytes.TrimSpace(comment))
}
break
}
val += next
if p.isEOF {
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
}
}
return val, nil
}
func (p *parser) readContinuationLines(val string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := strings.TrimSpace(string(data))
if len(next) == 0 {
break
}
val += next
if val[len(val)-1] != '\\' {
break
}
val = val[:len(val)-1]
}
return val, nil
}
// hasSurroundedQuote check if and only if the first and last characters
// are quotes \" or \'.
// It returns false if any other parts also contain same kind of quotes.
func hasSurroundedQuote(in string, quote byte) bool {
return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
return "", nil
}
var valQuote string
if len(line) > 3 && string(line[0:3]) == `"""` {
valQuote = `"""`
} else if line[0] == '`' {
valQuote = "`"
}
if len(valQuote) > 0 {
startIdx := len(valQuote)
pos := strings.LastIndex(line[startIdx:], valQuote)
// Check for multi-line value
if pos == -1 {
return p.readMultilines(line, line[startIdx:], valQuote)
}
return line[startIdx : pos+startIdx], nil
}
// Won't be able to reach here if value only contains whitespace.
line = strings.TrimSpace(line)
// Check continuation lines when desired.
if !ignoreContinuation && line[len(line)-1] == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
i := strings.IndexAny(line, "#;")
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
}
// Trim single quotes
if hasSurroundedQuote(line, '\'') ||
hasSurroundedQuote(line, '"') {
line = line[1 : len(line)-1]
}
return line, nil
}
// parse parses data through an io.Reader.
func (f *File) parse(reader io.Reader) (err error) {
p := newParser(reader)
if err = p.BOM(); err != nil {
return fmt.Errorf("BOM: %v", err)
}
// Ignore error because default section name is never empty string.
section, _ := f.NewSection(DEFAULT_SECTION)
var line []byte
var inUnparseableSection bool
for !p.isEOF {
line, err = p.readUntil('\n')
if err != nil {
return err
}
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
if len(line) == 0 {
continue
}
// Comments
if line[0] == '#' || line[0] == ';' {
// Note: we do not care ending line break,
// it is needed for adding second line,
// so just clean it once at the end when set to value.
p.comment.Write(line)
continue
}
// Section
if line[0] == '[' {
// Read to the next ']' (TODO: support quoted strings)
// TODO(unknwon): use LastIndexByte when stop supporting Go1.4
closeIdx := bytes.LastIndex(line, []byte("]"))
if closeIdx == -1 {
return fmt.Errorf("unclosed section: %s", line)
}
name := string(line[1:closeIdx])
section, err = f.NewSection(name)
if err != nil {
return err
}
comment, has := cleanComment(line[closeIdx+1:])
if has {
p.comment.Write(comment)
}
section.Comment = strings.TrimSpace(p.comment.String())
// Reset aotu-counter and comments
p.comment.Reset()
p.count = 1
inUnparseableSection = false
for i := range f.options.UnparseableSections {
if f.options.UnparseableSections[i] == name ||
(f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
inUnparseableSection = true
continue
}
}
continue
}
if inUnparseableSection {
section.isRawSection = true
section.rawBody += string(line)
continue
}
kname, offset, err := readKeyName(line)
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
key, err := section.NewKey(string(line), "true")
if err != nil {
return err
}
key.isBooleanType = true
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
continue
}
return err
}
// Auto increment.
isAutoIncr := false
if kname == "-" {
isAutoIncr = true
kname = "#" + strconv.Itoa(p.count)
p.count++
}
key, err := section.NewKey(kname, "")
if err != nil {
return err
}
key.isAutoIncrement = isAutoIncr
value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
if err != nil {
return err
}
key.SetValue(value)
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
}
return nil
}

221
vendor/gopkg.in/ini.v1/section.go generated vendored Normal file
View file

@ -0,0 +1,221 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"errors"
"fmt"
"strings"
)
// Section represents a config section.
type Section struct {
f *File
Comment string
name string
keys map[string]*Key
keyList []string
keysHash map[string]string
isRawSection bool
rawBody string
}
func newSection(f *File, name string) *Section {
return &Section{
f: f,
name: name,
keys: make(map[string]*Key),
keyList: make([]string, 0, 10),
keysHash: make(map[string]string),
}
}
// Name returns name of Section.
func (s *Section) Name() string {
return s.name
}
// Body returns rawBody of Section if the section was marked as unparseable.
// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
func (s *Section) Body() string {
return strings.TrimSpace(s.rawBody)
}
// NewKey creates a new key to given section.
func (s *Section) NewKey(name, val string) (*Key, error) {
if len(name) == 0 {
return nil, errors.New("error creating new key: empty key name")
} else if s.f.options.Insensitive {
name = strings.ToLower(name)
}
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
if inSlice(name, s.keyList) {
s.keys[name].value = val
return s.keys[name], nil
}
s.keyList = append(s.keyList, name)
s.keys[name] = &Key{
s: s,
name: name,
value: val,
}
s.keysHash[name] = val
return s.keys[name], nil
}
// GetKey returns key in section by given name.
func (s *Section) GetKey(name string) (*Key, error) {
// FIXME: change to section level lock?
if s.f.BlockMode {
s.f.lock.RLock()
}
if s.f.options.Insensitive {
name = strings.ToLower(name)
}
key := s.keys[name]
if s.f.BlockMode {
s.f.lock.RUnlock()
}
if key == nil {
// Check if it is a child-section.
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
return sec.GetKey(name)
} else {
break
}
}
return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
}
return key, nil
}
// HasKey returns true if section contains a key with given name.
func (s *Section) HasKey(name string) bool {
key, _ := s.GetKey(name)
return key != nil
}
// Haskey is a backwards-compatible name for HasKey.
func (s *Section) Haskey(name string) bool {
return s.HasKey(name)
}
// HasValue returns true if section contains given raw value.
func (s *Section) HasValue(value string) bool {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
for _, k := range s.keys {
if value == k.value {
return true
}
}
return false
}
// Key assumes named Key exists in section and returns a zero-value when not.
func (s *Section) Key(name string) *Key {
key, err := s.GetKey(name)
if err != nil {
// It's OK here because the only possible error is empty key name,
// but if it's empty, this piece of code won't be executed.
key, _ = s.NewKey(name, "")
return key
}
return key
}
// Keys returns list of keys of section.
func (s *Section) Keys() []*Key {
keys := make([]*Key, len(s.keyList))
for i := range s.keyList {
keys[i] = s.Key(s.keyList[i])
}
return keys
}
// ParentKeys returns list of keys of parent section.
func (s *Section) ParentKeys() []*Key {
var parentKeys []*Key
sname := s.name
for {
if i := strings.LastIndex(sname, "."); i > -1 {
sname = sname[:i]
sec, err := s.f.GetSection(sname)
if err != nil {
continue
}
parentKeys = append(parentKeys, sec.Keys()...)
} else {
break
}
}
return parentKeys
}
// KeyStrings returns list of key names of section.
func (s *Section) KeyStrings() []string {
list := make([]string, len(s.keyList))
copy(list, s.keyList)
return list
}
// KeysHash returns keys hash consisting of names and values.
func (s *Section) KeysHash() map[string]string {
if s.f.BlockMode {
s.f.lock.RLock()
defer s.f.lock.RUnlock()
}
hash := map[string]string{}
for key, value := range s.keysHash {
hash[key] = value
}
return hash
}
// DeleteKey deletes a key from section.
func (s *Section) DeleteKey(name string) {
if s.f.BlockMode {
s.f.lock.Lock()
defer s.f.lock.Unlock()
}
for i, k := range s.keyList {
if k == name {
s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
delete(s.keys, name)
return
}
}
}

431
vendor/gopkg.in/ini.v1/struct.go generated vendored Normal file
View file

@ -0,0 +1,431 @@
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bytes"
"errors"
"fmt"
"reflect"
"strings"
"time"
"unicode"
)
// NameMapper represents a ini tag name mapper.
type NameMapper func(string) string
// Built-in name getters.
var (
// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
AllCapsUnderscore NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
}
newstr = append(newstr, unicode.ToUpper(chr))
}
return string(newstr)
}
// TitleUnderscore converts to format title_underscore.
TitleUnderscore NameMapper = func(raw string) string {
newstr := make([]rune, 0, len(raw))
for i, chr := range raw {
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
if i > 0 {
newstr = append(newstr, '_')
}
chr -= ('A' - 'a')
}
newstr = append(newstr, chr)
}
return string(newstr)
}
)
func (s *Section) parseFieldName(raw, actual string) string {
if len(actual) > 0 {
return actual
}
if s.f.NameMapper != nil {
return s.f.NameMapper(raw)
}
return raw
}
func parseDelim(actual string) string {
if len(actual) > 0 {
return actual
}
return ","
}
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setSliceWithProperType sets proper values to slice based on its type.
func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
strs := key.Strings(delim)
numVals := len(strs)
if numVals == 0 {
return nil
}
var vals interface{}
sliceOf := field.Type().Elem().Kind()
switch sliceOf {
case reflect.String:
vals = strs
case reflect.Int:
vals = key.Ints(delim)
case reflect.Int64:
vals = key.Int64s(delim)
case reflect.Uint:
vals = key.Uints(delim)
case reflect.Uint64:
vals = key.Uint64s(delim)
case reflect.Float64:
vals = key.Float64s(delim)
case reflectTime:
vals = key.Times(delim)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
for i := 0; i < numVals; i++ {
switch sliceOf {
case reflect.String:
slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
case reflect.Int:
slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
case reflect.Int64:
slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
case reflect.Uint:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
case reflect.Uint64:
slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
case reflect.Float64:
slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
case reflectTime:
slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
}
}
field.Set(slice)
return nil
}
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to strcut.
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
switch t.Kind() {
case reflect.String:
if len(key.String()) == 0 {
return nil
}
field.SetString(key.String())
case reflect.Bool:
boolVal, err := key.Bool()
if err != nil {
return nil
}
field.SetBool(boolVal)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
durationVal, err := key.Duration()
// Skip zero value
if err == nil && int(durationVal) > 0 {
field.Set(reflect.ValueOf(durationVal))
return nil
}
intVal, err := key.Int64()
if err != nil || intVal == 0 {
return nil
}
field.SetInt(intVal)
// byte is an alias for uint8, so supporting uint8 breaks support for byte
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
durationVal, err := key.Duration()
// Skip zero value
if err == nil && int(durationVal) > 0 {
field.Set(reflect.ValueOf(durationVal))
return nil
}
uintVal, err := key.Uint64()
if err != nil {
return nil
}
field.SetUint(uintVal)
case reflect.Float64:
floatVal, err := key.Float64()
if err != nil {
return nil
}
field.SetFloat(floatVal)
case reflectTime:
timeVal, err := key.Time()
if err != nil {
return nil
}
field.Set(reflect.ValueOf(timeVal))
case reflect.Slice:
return setSliceWithProperType(key, field, delim)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
func (s *Section) mapTo(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty
fieldName := s.parseFieldName(tpField.Name, opts[0])
if len(fieldName) == 0 || !field.CanSet() {
continue
}
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
isStruct := tpField.Type.Kind() == reflect.Struct
if isAnonymous {
field.Set(reflect.New(tpField.Type.Elem()))
}
if isAnonymous || isStruct {
if sec, err := s.f.GetSection(fieldName); err == nil {
if err = sec.mapTo(field); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
continue
}
}
if key, err := s.GetKey(fieldName); err == nil {
if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
}
}
return nil
}
// MapTo maps section to given struct.
func (s *Section) MapTo(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("cannot map to non-pointer struct")
}
return s.mapTo(val)
}
// MapTo maps file to given struct.
func (f *File) MapTo(v interface{}) error {
return f.Section("").MapTo(v)
}
// MapTo maps data sources to given struct with name mapper.
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
cfg, err := Load(source, others...)
if err != nil {
return err
}
cfg.NameMapper = mapper
return cfg.MapTo(v)
}
// MapTo maps data sources to given struct.
func MapTo(v, source interface{}, others ...interface{}) error {
return MapToWithMapper(v, nil, source, others...)
}
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
slice := field.Slice(0, field.Len())
if field.Len() == 0 {
return nil
}
var buf bytes.Buffer
sliceOf := field.Type().Elem().Kind()
for i := 0; i < field.Len(); i++ {
switch sliceOf {
case reflect.String:
buf.WriteString(slice.Index(i).String())
case reflect.Int, reflect.Int64:
buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
case reflect.Uint, reflect.Uint64:
buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
case reflect.Float64:
buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
case reflectTime:
buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
buf.WriteString(delim)
}
key.SetValue(buf.String()[:buf.Len()-1])
return nil
}
// reflectWithProperType does the opposite thing as setWithProperType.
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
switch t.Kind() {
case reflect.String:
key.SetValue(field.String())
case reflect.Bool:
key.SetValue(fmt.Sprint(field.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
key.SetValue(fmt.Sprint(field.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
key.SetValue(fmt.Sprint(field.Uint()))
case reflect.Float32, reflect.Float64:
key.SetValue(fmt.Sprint(field.Float()))
case reflectTime:
key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
case reflect.Slice:
return reflectSliceWithProperType(key, field, delim)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
// CR: copied from encoding/json/encode.go with modifications of time.Time support.
// TODO: add more test coverage.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflectTime:
return v.Interface().(time.Time).IsZero()
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (s *Section) reflectFrom(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
typ := val.Type()
for i := 0; i < typ.NumField(); i++ {
field := val.Field(i)
tpField := typ.Field(i)
tag := tpField.Tag.Get("ini")
if tag == "-" {
continue
}
opts := strings.SplitN(tag, ",", 2)
if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
continue
}
fieldName := s.parseFieldName(tpField.Name, opts[0])
if len(fieldName) == 0 || !field.CanSet() {
continue
}
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
(tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
// Note: The only error here is section doesn't exist.
sec, err := s.f.GetSection(fieldName)
if err != nil {
// Note: fieldName can never be empty here, ignore error.
sec, _ = s.f.NewSection(fieldName)
}
if err = sec.reflectFrom(field); err != nil {
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
}
continue
}
// Note: Same reason as secion.
key, err := s.GetKey(fieldName)
if err != nil {
key, _ = s.NewKey(fieldName, "")
}
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
}
}
return nil
}
// ReflectFrom reflects secion from given struct.
func (s *Section) ReflectFrom(v interface{}) error {
typ := reflect.TypeOf(v)
val := reflect.ValueOf(v)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
val = val.Elem()
} else {
return errors.New("cannot reflect from non-pointer struct")
}
return s.reflectFrom(val)
}
// ReflectFrom reflects file from given struct.
func (f *File) ReflectFrom(v interface{}) error {
return f.Section("").ReflectFrom(v)
}
// ReflectFrom reflects data sources from given struct with name mapper.
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
cfg.NameMapper = mapper
return cfg.ReflectFrom(v)
}
// ReflectFrom reflects data sources from given struct.
func ReflectFrom(cfg *File, v interface{}) error {
return ReflectFromWithMapper(cfg, v, nil)
}

25
vendor/gopkg.in/mgo.v2/LICENSE generated vendored Normal file
View file

@ -0,0 +1,25 @@
mgo - MongoDB driver for Go
Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

467
vendor/gopkg.in/mgo.v2/auth.go generated vendored Normal file
View file

@ -0,0 +1,467 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"crypto/md5"
"crypto/sha1"
"encoding/hex"
"errors"
"fmt"
"sync"
"gopkg.in/mgo.v2/bson"
"gopkg.in/mgo.v2/internal/scram"
)
type authCmd struct {
Authenticate int
Nonce string
User string
Key string
}
type startSaslCmd struct {
StartSASL int `bson:"startSasl"`
}
type authResult struct {
ErrMsg string
Ok bool
}
type getNonceCmd struct {
GetNonce int
}
type getNonceResult struct {
Nonce string
Err string "$err"
Code int
}
type logoutCmd struct {
Logout int
}
type saslCmd struct {
Start int `bson:"saslStart,omitempty"`
Continue int `bson:"saslContinue,omitempty"`
ConversationId int `bson:"conversationId,omitempty"`
Mechanism string `bson:"mechanism,omitempty"`
Payload []byte
}
type saslResult struct {
Ok bool `bson:"ok"`
NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
Done bool
ConversationId int `bson:"conversationId"`
Payload []byte
ErrMsg string
}
type saslStepper interface {
Step(serverData []byte) (clientData []byte, done bool, err error)
Close()
}
func (socket *mongoSocket) getNonce() (nonce string, err error) {
socket.Lock()
for socket.cachedNonce == "" && socket.dead == nil {
debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
socket.gotNonce.Wait()
}
if socket.cachedNonce == "mongos" {
socket.Unlock()
return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
}
debugf("Socket %p to %s: got nonce", socket, socket.addr)
nonce, err = socket.cachedNonce, socket.dead
socket.cachedNonce = ""
socket.Unlock()
if err != nil {
nonce = ""
}
return
}
func (socket *mongoSocket) resetNonce() {
debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
op := &queryOp{}
op.query = &getNonceCmd{GetNonce: 1}
op.collection = "admin.$cmd"
op.limit = -1
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
if err != nil {
socket.kill(errors.New("getNonce: "+err.Error()), true)
return
}
result := &getNonceResult{}
err = bson.Unmarshal(docData, &result)
if err != nil {
socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
return
}
debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
if result.Code == 13390 {
// mongos doesn't yet support auth (see http://j.mp/mongos-auth)
result.Nonce = "mongos"
} else if result.Nonce == "" {
var msg string
if result.Err != "" {
msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
} else {
msg = "Got an empty nonce"
}
socket.kill(errors.New(msg), true)
return
}
socket.Lock()
if socket.cachedNonce != "" {
socket.Unlock()
panic("resetNonce: nonce already cached")
}
socket.cachedNonce = result.Nonce
socket.gotNonce.Signal()
socket.Unlock()
}
err := socket.Query(op)
if err != nil {
socket.kill(errors.New("resetNonce: "+err.Error()), true)
}
}
func (socket *mongoSocket) Login(cred Credential) error {
socket.Lock()
if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
cred.Mechanism = "SCRAM-SHA-1"
}
for _, sockCred := range socket.creds {
if sockCred == cred {
debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
socket.Unlock()
return nil
}
}
if socket.dropLogout(cred) {
debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
}
socket.Unlock()
debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
var err error
switch cred.Mechanism {
case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
err = socket.loginClassic(cred)
case "PLAIN":
err = socket.loginPlain(cred)
case "MONGODB-X509":
err = socket.loginX509(cred)
default:
// Try SASL for everything else, if it is available.
err = socket.loginSASL(cred)
}
if err != nil {
debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
} else {
debugf("Socket %p to %s: login successful", socket, socket.addr)
}
return err
}
func (socket *mongoSocket) loginClassic(cred Credential) error {
// Note that this only works properly because this function is
// synchronous, which means the nonce won't get reset while we're
// using it and any other login requests will block waiting for a
// new nonce provided in the defer call below.
nonce, err := socket.getNonce()
if err != nil {
return err
}
defer socket.resetNonce()
psum := md5.New()
psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
ksum := md5.New()
ksum.Write([]byte(nonce + cred.Username))
ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
key := hex.EncodeToString(ksum.Sum(nil))
cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
type authX509Cmd struct {
Authenticate int
User string
Mechanism string
}
func (socket *mongoSocket) loginX509(cred Credential) error {
cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
func (socket *mongoSocket) loginPlain(cred Credential) error {
cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
res := authResult{}
return socket.loginRun(cred.Source, &cmd, &res, func() error {
if !res.Ok {
return errors.New(res.ErrMsg)
}
socket.Lock()
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
socket.Unlock()
return nil
})
}
func (socket *mongoSocket) loginSASL(cred Credential) error {
var sasl saslStepper
var err error
if cred.Mechanism == "SCRAM-SHA-1" {
// SCRAM is handled without external libraries.
sasl = saslNewScram(cred)
} else if len(cred.ServiceHost) > 0 {
sasl, err = saslNew(cred, cred.ServiceHost)
} else {
sasl, err = saslNew(cred, socket.Server().Addr)
}
if err != nil {
return err
}
defer sasl.Close()
// The goal of this logic is to carry a locked socket until the
// local SASL step confirms the auth is valid; the socket needs to be
// locked so that concurrent action doesn't leave the socket in an
// auth state that doesn't reflect the operations that took place.
// As a simple case, imagine inverting login=>logout to logout=>login.
//
// The logic below works because the lock func isn't called concurrently.
locked := false
lock := func(b bool) {
if locked != b {
locked = b
if b {
socket.Lock()
} else {
socket.Unlock()
}
}
}
lock(true)
defer lock(false)
start := 1
cmd := saslCmd{}
res := saslResult{}
for {
payload, done, err := sasl.Step(res.Payload)
if err != nil {
return err
}
if done && res.Done {
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
break
}
lock(false)
cmd = saslCmd{
Start: start,
Continue: 1 - start,
ConversationId: res.ConversationId,
Mechanism: cred.Mechanism,
Payload: payload,
}
start = 0
err = socket.loginRun(cred.Source, &cmd, &res, func() error {
// See the comment on lock for why this is necessary.
lock(true)
if !res.Ok || res.NotOk {
return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
}
return nil
})
if err != nil {
return err
}
if done && res.Done {
socket.dropAuth(cred.Source)
socket.creds = append(socket.creds, cred)
break
}
}
return nil
}
func saslNewScram(cred Credential) *saslScram {
credsum := md5.New()
credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
return &saslScram{cred: cred, client: client}
}
type saslScram struct {
cred Credential
client *scram.Client
}
func (s *saslScram) Close() {}
func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
more := s.client.Step(serverData)
return s.client.Out(), !more, s.client.Err()
}
func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
var mutex sync.Mutex
var replyErr error
mutex.Lock()
op := queryOp{}
op.query = query
op.collection = db + ".$cmd"
op.limit = -1
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
defer mutex.Unlock()
if err != nil {
replyErr = err
return
}
err = bson.Unmarshal(docData, result)
if err != nil {
replyErr = err
} else {
// Must handle this within the read loop for the socket, so
// that concurrent login requests are properly ordered.
replyErr = f()
}
}
err := socket.Query(&op)
if err != nil {
return err
}
mutex.Lock() // Wait.
return replyErr
}
func (socket *mongoSocket) Logout(db string) {
socket.Lock()
cred, found := socket.dropAuth(db)
if found {
debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
socket.logout = append(socket.logout, cred)
}
socket.Unlock()
}
func (socket *mongoSocket) LogoutAll() {
socket.Lock()
if l := len(socket.creds); l > 0 {
debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
socket.logout = append(socket.logout, socket.creds...)
socket.creds = socket.creds[0:0]
}
socket.Unlock()
}
func (socket *mongoSocket) flushLogout() (ops []interface{}) {
socket.Lock()
if l := len(socket.logout); l > 0 {
debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
for i := 0; i != l; i++ {
op := queryOp{}
op.query = &logoutCmd{1}
op.collection = socket.logout[i].Source + ".$cmd"
op.limit = -1
ops = append(ops, &op)
}
socket.logout = socket.logout[0:0]
}
socket.Unlock()
return
}
func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
for i, sockCred := range socket.creds {
if sockCred.Source == db {
copy(socket.creds[i:], socket.creds[i+1:])
socket.creds = socket.creds[:len(socket.creds)-1]
return sockCred, true
}
}
return cred, false
}
func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
for i, sockCred := range socket.logout {
if sockCred == cred {
copy(socket.logout[i:], socket.logout[i+1:])
socket.logout = socket.logout[:len(socket.logout)-1]
return true
}
}
return false
}

25
vendor/gopkg.in/mgo.v2/bson/LICENSE generated vendored Normal file
View file

@ -0,0 +1,25 @@
BSON library for Go
Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

721
vendor/gopkg.in/mgo.v2/bson/bson.go generated vendored Normal file
View file

@ -0,0 +1,721 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package bson is an implementation of the BSON specification for Go:
//
// http://bsonspec.org
//
// It was created as part of the mgo MongoDB driver for Go, but is standalone
// and may be used on its own without the driver.
package bson
import (
"bytes"
"crypto/md5"
"crypto/rand"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
// --------------------------------------------------------------------------
// The public API.
// A value implementing the bson.Getter interface will have its GetBSON
// method called when the given value has to be marshalled, and the result
// of this method will be marshaled in place of the actual object.
//
// If GetBSON returns return a non-nil error, the marshalling procedure
// will stop and error out with the provided value.
type Getter interface {
GetBSON() (interface{}, error)
}
// A value implementing the bson.Setter interface will receive the BSON
// value via the SetBSON method during unmarshaling, and the object
// itself will not be changed as usual.
//
// If setting the value works, the method should return nil or alternatively
// bson.SetZero to set the respective field to its zero value (nil for
// pointer types). If SetBSON returns a value of type bson.TypeError, the
// BSON value will be omitted from a map or slice being decoded and the
// unmarshalling will continue. If it returns any other non-nil error, the
// unmarshalling procedure will stop and error out with the provided value.
//
// This interface is generally useful in pointer receivers, since the method
// will want to change the receiver. A type field that implements the Setter
// interface doesn't have to be a pointer, though.
//
// Unlike the usual behavior, unmarshalling onto a value that implements a
// Setter interface will NOT reset the value to its zero state. This allows
// the value to decide by itself how to be unmarshalled.
//
// For example:
//
// type MyString string
//
// func (s *MyString) SetBSON(raw bson.Raw) error {
// return raw.Unmarshal(s)
// }
//
type Setter interface {
SetBSON(raw Raw) error
}
// SetZero may be returned from a SetBSON method to have the value set to
// its respective zero value. When used in pointer values, this will set the
// field to nil rather than to the pre-allocated value.
var SetZero = errors.New("set to zero")
// M is a convenient alias for a map[string]interface{} map, useful for
// dealing with BSON in a native way. For instance:
//
// bson.M{"a": 1, "b": true}
//
// There's no special handling for this type in addition to what's done anyway
// for an equivalent map type. Elements in the map will be dumped in an
// undefined ordered. See also the bson.D type for an ordered alternative.
type M map[string]interface{}
// D represents a BSON document containing ordered elements. For example:
//
// bson.D{{"a", 1}, {"b", true}}
//
// In some situations, such as when creating indexes for MongoDB, the order in
// which the elements are defined is important. If the order is not important,
// using a map is generally more comfortable. See bson.M and bson.RawD.
type D []DocElem
// DocElem is an element of the bson.D document representation.
type DocElem struct {
Name string
Value interface{}
}
// Map returns a map out of the ordered element name/value pairs in d.
func (d D) Map() (m M) {
m = make(M, len(d))
for _, item := range d {
m[item.Name] = item.Value
}
return m
}
// The Raw type represents raw unprocessed BSON documents and elements.
// Kind is the kind of element as defined per the BSON specification, and
// Data is the raw unprocessed data for the respective element.
// Using this type it is possible to unmarshal or marshal values partially.
//
// Relevant documentation:
//
// http://bsonspec.org/#/specification
//
type Raw struct {
Kind byte
Data []byte
}
// RawD represents a BSON document containing raw unprocessed elements.
// This low-level representation may be useful when lazily processing
// documents of uncertain content, or when manipulating the raw content
// documents in general.
type RawD []RawDocElem
// See the RawD type.
type RawDocElem struct {
Name string
Value Raw
}
// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
// long. MongoDB objects by default have such a property set in their "_id"
// property.
//
// http://www.mongodb.org/display/DOCS/Object+IDs
type ObjectId string
// ObjectIdHex returns an ObjectId from the provided hex representation.
// Calling this function with an invalid hex representation will
// cause a runtime panic. See the IsObjectIdHex function.
func ObjectIdHex(s string) ObjectId {
d, err := hex.DecodeString(s)
if err != nil || len(d) != 12 {
panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
}
return ObjectId(d)
}
// IsObjectIdHex returns whether s is a valid hex representation of
// an ObjectId. See the ObjectIdHex function.
func IsObjectIdHex(s string) bool {
if len(s) != 24 {
return false
}
_, err := hex.DecodeString(s)
return err == nil
}
// objectIdCounter is atomically incremented when generating a new ObjectId
// using NewObjectId() function. It's used as a counter part of an id.
var objectIdCounter uint32 = readRandomUint32()
// readRandomUint32 returns a random objectIdCounter.
func readRandomUint32() uint32 {
var b [4]byte
_, err := io.ReadFull(rand.Reader, b[:])
if err != nil {
panic(fmt.Errorf("cannot read random object id: %v", err))
}
return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
}
// machineId stores machine id generated once and used in subsequent calls
// to NewObjectId function.
var machineId = readMachineId()
// readMachineId generates and returns a machine id.
// If this function fails to get the hostname it will cause a runtime error.
func readMachineId() []byte {
var sum [3]byte
id := sum[:]
hostname, err1 := os.Hostname()
if err1 != nil {
_, err2 := io.ReadFull(rand.Reader, id)
if err2 != nil {
panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
}
return id
}
hw := md5.New()
hw.Write([]byte(hostname))
copy(id, hw.Sum(nil))
return id
}
// NewObjectId returns a new unique ObjectId.
func NewObjectId() ObjectId {
var b [12]byte
// Timestamp, 4 bytes, big endian
binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
// Machine, first 3 bytes of md5(hostname)
b[4] = machineId[0]
b[5] = machineId[1]
b[6] = machineId[2]
// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
pid := os.Getpid()
b[7] = byte(pid >> 8)
b[8] = byte(pid)
// Increment, 3 bytes, big endian
i := atomic.AddUint32(&objectIdCounter, 1)
b[9] = byte(i >> 16)
b[10] = byte(i >> 8)
b[11] = byte(i)
return ObjectId(b[:])
}
// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
// with the provided number of seconds from epoch UTC, and all other parts
// filled with zeroes. It's not safe to insert a document with an id generated
// by this method, it is useful only for queries to find documents with ids
// generated before or after the specified timestamp.
func NewObjectIdWithTime(t time.Time) ObjectId {
var b [12]byte
binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
return ObjectId(string(b[:]))
}
// String returns a hex string representation of the id.
// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
func (id ObjectId) String() string {
return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
}
// Hex returns a hex representation of the ObjectId.
func (id ObjectId) Hex() string {
return hex.EncodeToString([]byte(id))
}
// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
func (id ObjectId) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
}
var nullBytes = []byte("null")
// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
func (id *ObjectId) UnmarshalJSON(data []byte) error {
if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
*id = ""
return nil
}
if len(data) != 26 || data[0] != '"' || data[25] != '"' {
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
}
var buf [12]byte
_, err := hex.Decode(buf[:], data[1:25])
if err != nil {
return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
}
*id = ObjectId(string(buf[:]))
return nil
}
// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
func (id ObjectId) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%x", string(id))), nil
}
// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
func (id *ObjectId) UnmarshalText(data []byte) error {
if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
*id = ""
return nil
}
if len(data) != 24 {
return fmt.Errorf("invalid ObjectId: %s", data)
}
var buf [12]byte
_, err := hex.Decode(buf[:], data[:])
if err != nil {
return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
}
*id = ObjectId(string(buf[:]))
return nil
}
// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
func (id ObjectId) Valid() bool {
return len(id) == 12
}
// byteSlice returns byte slice of id from start to end.
// Calling this function with an invalid id will cause a runtime panic.
func (id ObjectId) byteSlice(start, end int) []byte {
if len(id) != 12 {
panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
}
return []byte(string(id)[start:end])
}
// Time returns the timestamp part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Time() time.Time {
// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
return time.Unix(secs, 0)
}
// Machine returns the 3-byte machine id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Machine() []byte {
return id.byteSlice(4, 7)
}
// Pid returns the process id part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Pid() uint16 {
return binary.BigEndian.Uint16(id.byteSlice(7, 9))
}
// Counter returns the incrementing value part of the id.
// It's a runtime error to call this method with an invalid id.
func (id ObjectId) Counter() int32 {
b := id.byteSlice(9, 12)
// Counter is stored as big-endian 3-byte value
return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
}
// The Symbol type is similar to a string and is used in languages with a
// distinct symbol type.
type Symbol string
// Now returns the current time with millisecond precision. MongoDB stores
// timestamps with the same precision, so a Time returned from this method
// will not change after a roundtrip to the database. That's the only reason
// why this function exists. Using the time.Now function also works fine
// otherwise.
func Now() time.Time {
return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
}
// MongoTimestamp is a special internal type used by MongoDB that for some
// strange reason has its own datatype defined in BSON.
type MongoTimestamp int64
type orderKey int64
// MaxKey is a special value that compares higher than all other possible BSON
// values in a MongoDB database.
var MaxKey = orderKey(1<<63 - 1)
// MinKey is a special value that compares lower than all other possible BSON
// values in a MongoDB database.
var MinKey = orderKey(-1 << 63)
type undefined struct{}
// Undefined represents the undefined BSON value.
var Undefined undefined
// Binary is a representation for non-standard binary values. Any kind should
// work, but the following are known as of this writing:
//
// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
// 0x01 - Function (!?)
// 0x02 - Obsolete generic.
// 0x03 - UUID
// 0x05 - MD5
// 0x80 - User defined.
//
type Binary struct {
Kind byte
Data []byte
}
// RegEx represents a regular expression. The Options field may contain
// individual characters defining the way in which the pattern should be
// applied, and must be sorted. Valid options as of this writing are 'i' for
// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
// unicode. The value of the Options parameter is not verified before being
// marshaled into the BSON format.
type RegEx struct {
Pattern string
Options string
}
// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
// will be marshaled as a mapping from identifiers to values that may be
// used when evaluating the provided Code.
type JavaScript struct {
Code string
Scope interface{}
}
// DBPointer refers to a document id in a namespace.
//
// This type is deprecated in the BSON specification and should not be used
// except for backwards compatibility with ancient applications.
type DBPointer struct {
Namespace string
Id ObjectId
}
const initialBufferSize = 64
func handleErr(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
} else if _, ok := r.(externalPanic); ok {
panic(r)
} else if s, ok := r.(string); ok {
*err = errors.New(s)
} else if e, ok := r.(error); ok {
*err = e
} else {
panic(r)
}
}
}
// Marshal serializes the in value, which may be a map or a struct value.
// In the case of struct values, only exported fields will be serialized,
// and the order of serialized fields will match that of the struct itself.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
//
// minsize Marshal an int64 value as an int32, if that's feasible
// while preserving the numeric value.
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the bson keys of other struct fields.
//
// Some examples:
//
// type T struct {
// A bool
// B int "myb"
// C string "myc,omitempty"
// D string `bson:",omitempty" json:"jsonkey"`
// E int64 ",minsize"
// F int64 "myf,omitempty,minsize"
// }
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := &encoder{make([]byte, 0, initialBufferSize)}
e.addDoc(reflect.ValueOf(in))
return e.out, nil
}
// Unmarshal deserializes data from in into the out value. The out value
// must be a map, a pointer to a struct, or a pointer to a bson.D value.
// In the case of struct values, only exported fields will be deserialized.
// The lowercased field name is used as the key for each exported field,
// but this behavior may be changed using the respective field tag.
// The tag may also contain flags to tweak the marshalling behavior for
// the field. The tag formats accepted are:
//
// "[<key>][,<flag1>[,<flag2>]]"
//
// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported during unmarshal (see the
// Marshal method for other flags):
//
// inline Inline the field, which must be a struct or a map.
// Inlined structs are handled as if its fields were part
// of the outer struct. An inlined map causes keys that do
// not match any other struct field to be inserted in the
// map rather than being discarded as usual.
//
// The target field or element types of out may not necessarily match
// the BSON values of the provided data. The following conversions are
// made automatically:
//
// - Numeric types are converted if at least the integer part of the
// value would be preserved correctly
// - Bools are converted to numeric types as 1 or 0
// - Numeric types are converted to bools as true if not 0 or false otherwise
// - Binary and string BSON data is converted to a string, array or byte slice
//
// If the value would not fit the type and cannot be converted, it's
// silently skipped.
//
// Pointer values are initialized when necessary.
func Unmarshal(in []byte, out interface{}) (err error) {
if raw, ok := out.(*Raw); ok {
raw.Kind = 3
raw.Data = in
return nil
}
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
fallthrough
case reflect.Map:
d := newDecoder(in)
d.readDocTo(v)
case reflect.Struct:
return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Unmarshal needs a map or a pointer to a struct.")
}
return nil
}
// Unmarshal deserializes raw into the out value. If the out value type
// is not compatible with raw, a *bson.TypeError is returned.
//
// See the Unmarshal function documentation for more details on the
// unmarshalling process.
func (raw Raw) Unmarshal(out interface{}) (err error) {
defer handleErr(&err)
v := reflect.ValueOf(out)
switch v.Kind() {
case reflect.Ptr:
v = v.Elem()
fallthrough
case reflect.Map:
d := newDecoder(raw.Data)
good := d.readElemTo(v, raw.Kind)
if !good {
return &TypeError{v.Type(), raw.Kind}
}
case reflect.Struct:
return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
default:
return errors.New("Raw Unmarshal needs a map or a valid pointer.")
}
return nil
}
type TypeError struct {
Type reflect.Type
Kind byte
}
func (e *TypeError) Error() string {
return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
InlineMap int
Zero reflect.Value
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
MinSize bool
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var structMapMutex sync.RWMutex
type externalPanic string
func (e externalPanic) String() string {
return string(e)
}
func getStructInfo(st reflect.Type) (*structInfo, error) {
structMapMutex.RLock()
sinfo, found := structMap[st]
structMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("bson")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "minsize":
info.MinSize = true
case "inline":
inline = true
default:
msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
panic(externalPanic(msg))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
panic("Option ,inline needs a struct value or map field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
fieldsMap,
fieldsList,
inlineMap,
reflect.New(st).Elem(),
}
structMapMutex.Lock()
structMap[st] = sinfo
structMapMutex.Unlock()
return sinfo, nil
}

844
vendor/gopkg.in/mgo.v2/bson/decode.go generated vendored Normal file
View file

@ -0,0 +1,844 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"sync"
"time"
)
type decoder struct {
in []byte
i int
docType reflect.Type
}
var typeM = reflect.TypeOf(M{})
func newDecoder(in []byte) *decoder {
return &decoder{in, 0, typeM}
}
// --------------------------------------------------------------------------
// Some helper functions.
func corrupted() {
panic("Document is corrupted")
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
// --------------------------------------------------------------------------
// Unmarshaling of documents.
const (
setterUnknown = iota
setterNone
setterType
setterAddr
)
var setterStyles map[reflect.Type]int
var setterIface reflect.Type
var setterMutex sync.RWMutex
func init() {
var iface Setter
setterIface = reflect.TypeOf(&iface).Elem()
setterStyles = make(map[reflect.Type]int)
}
func setterStyle(outt reflect.Type) int {
setterMutex.RLock()
style := setterStyles[outt]
setterMutex.RUnlock()
if style == setterUnknown {
setterMutex.Lock()
defer setterMutex.Unlock()
if outt.Implements(setterIface) {
setterStyles[outt] = setterType
} else if reflect.PtrTo(outt).Implements(setterIface) {
setterStyles[outt] = setterAddr
} else {
setterStyles[outt] = setterNone
}
style = setterStyles[outt]
}
return style
}
func getSetter(outt reflect.Type, out reflect.Value) Setter {
style := setterStyle(outt)
if style == setterNone {
return nil
}
if style == setterAddr {
if !out.CanAddr() {
return nil
}
out = out.Addr()
} else if outt.Kind() == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
return out.Interface().(Setter)
}
func clearMap(m reflect.Value) {
var none reflect.Value
for _, k := range m.MapKeys() {
m.SetMapIndex(k, none)
}
}
func (d *decoder) readDocTo(out reflect.Value) {
var elemType reflect.Type
outt := out.Type()
outk := outt.Kind()
for {
if outk == reflect.Ptr && out.IsNil() {
out.Set(reflect.New(outt.Elem()))
}
if setter := getSetter(outt, out); setter != nil {
var raw Raw
d.readDocTo(reflect.ValueOf(&raw))
err := setter.SetBSON(raw)
if _, ok := err.(*TypeError); err != nil && !ok {
panic(err)
}
return
}
if outk == reflect.Ptr {
out = out.Elem()
outt = out.Type()
outk = out.Kind()
continue
}
break
}
var fieldsMap map[string]fieldInfo
var inlineMap reflect.Value
start := d.i
origout := out
if outk == reflect.Interface {
if d.docType.Kind() == reflect.Map {
mv := reflect.MakeMap(d.docType)
out.Set(mv)
out = mv
} else {
dv := reflect.New(d.docType).Elem()
out.Set(dv)
out = dv
}
outt = out.Type()
outk = outt.Kind()
}
docType := d.docType
keyType := typeString
convertKey := false
switch outk {
case reflect.Map:
keyType = outt.Key()
if keyType.Kind() != reflect.String {
panic("BSON map must have string keys. Got: " + outt.String())
}
if keyType != typeString {
convertKey = true
}
elemType = outt.Elem()
if elemType == typeIface {
d.docType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(out.Type()))
} else if out.Len() > 0 {
clearMap(out)
}
case reflect.Struct:
if outt != typeRaw {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
fieldsMap = sinfo.FieldsMap
out.Set(sinfo.Zero)
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
if !inlineMap.IsNil() && inlineMap.Len() > 0 {
clearMap(inlineMap)
}
elemType = inlineMap.Type().Elem()
if elemType == typeIface {
d.docType = inlineMap.Type()
}
}
}
case reflect.Slice:
switch outt.Elem() {
case typeDocElem:
origout.Set(d.readDocElems(outt))
return
case typeRawDocElem:
origout.Set(d.readRawDocElems(outt))
return
}
fallthrough
default:
panic("Unsupported document type for unmarshalling: " + out.Type().String())
}
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
switch outk {
case reflect.Map:
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
k := reflect.ValueOf(name)
if convertKey {
k = k.Convert(keyType)
}
out.SetMapIndex(k, e)
}
case reflect.Struct:
if outt == typeRaw {
d.dropElem(kind)
} else {
if info, ok := fieldsMap[name]; ok {
if info.Inline == nil {
d.readElemTo(out.Field(info.Num), kind)
} else {
d.readElemTo(out.FieldByIndex(info.Inline), kind)
}
} else if inlineMap.IsValid() {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
inlineMap.SetMapIndex(reflect.ValueOf(name), e)
}
} else {
d.dropElem(kind)
}
}
case reflect.Slice:
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
d.docType = docType
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
}
}
func (d *decoder) readArrayDocTo(out reflect.Value) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
i := 0
l := out.Len()
for d.in[d.i] != '\x00' {
if i >= l {
panic("Length mismatch on array field")
}
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
d.readElemTo(out.Index(i), kind)
if d.i >= end {
corrupted()
}
i++
}
if i != l {
panic("Length mismatch on array field")
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
tmp := make([]reflect.Value, 0, 8)
elemType := t.Elem()
if elemType == typeRawDocElem {
d.dropElem(0x04)
return reflect.Zero(t).Interface()
}
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
for d.i < end && d.in[d.i] != '\x00' {
d.i++
}
if d.i >= end {
corrupted()
}
d.i++
e := reflect.New(elemType).Elem()
if d.readElemTo(e, kind) {
tmp = append(tmp, e)
}
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
n := len(tmp)
slice := reflect.MakeSlice(t, n, n)
for i := 0; i != n; i++ {
slice.Index(i).Set(tmp[i])
}
return slice.Interface()
}
var typeSlice = reflect.TypeOf([]interface{}{})
var typeIface = typeSlice.Elem()
func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]DocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := DocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
docType := d.docType
d.docType = typ
slice := make([]RawDocElem, 0, 8)
d.readDocWith(func(kind byte, name string) {
e := RawDocElem{Name: name}
v := reflect.ValueOf(&e.Value)
if d.readElemTo(v.Elem(), kind) {
slice = append(slice, e)
}
})
slicev := reflect.New(typ).Elem()
slicev.Set(reflect.ValueOf(slice))
d.docType = docType
return slicev
}
func (d *decoder) readDocWith(f func(kind byte, name string)) {
end := int(d.readInt32())
end += d.i - 4
if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
corrupted()
}
for d.in[d.i] != '\x00' {
kind := d.readByte()
name := d.readCStr()
if d.i >= end {
corrupted()
}
f(kind, name)
if d.i >= end {
corrupted()
}
}
d.i++ // '\x00'
if d.i != end {
corrupted()
}
}
// --------------------------------------------------------------------------
// Unmarshaling of individual elements within a document.
var blackHole = settableValueOf(struct{}{})
func (d *decoder) dropElem(kind byte) {
d.readElemTo(blackHole, kind)
}
// Attempt to decode an element from the document and put it into out.
// If the types are not compatible, the returned ok value will be
// false and out will be unchanged.
func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
start := d.i
if kind == 0x03 {
// Delegate unmarshaling of documents.
outt := out.Type()
outk := out.Kind()
switch outk {
case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
d.readDocTo(out)
return true
}
if setterStyle(outt) != setterNone {
d.readDocTo(out)
return true
}
if outk == reflect.Slice {
switch outt.Elem() {
case typeDocElem:
out.Set(d.readDocElems(outt))
case typeRawDocElem:
out.Set(d.readRawDocElems(outt))
default:
d.readDocTo(blackHole)
}
return true
}
d.readDocTo(blackHole)
return true
}
var in interface{}
switch kind {
case 0x01: // Float64
in = d.readFloat64()
case 0x02: // UTF-8 string
in = d.readStr()
case 0x03: // Document
panic("Can't happen. Handled above.")
case 0x04: // Array
outt := out.Type()
if setterStyle(outt) != setterNone {
// Skip the value so its data is handed to the setter below.
d.dropElem(kind)
break
}
for outt.Kind() == reflect.Ptr {
outt = outt.Elem()
}
switch outt.Kind() {
case reflect.Array:
d.readArrayDocTo(out)
return true
case reflect.Slice:
in = d.readSliceDoc(outt)
default:
in = d.readSliceDoc(typeSlice)
}
case 0x05: // Binary
b := d.readBinary()
if b.Kind == 0x00 || b.Kind == 0x02 {
in = b.Data
} else {
in = b
}
case 0x06: // Undefined (obsolete, but still seen in the wild)
in = Undefined
case 0x07: // ObjectId
in = ObjectId(d.readBytes(12))
case 0x08: // Bool
in = d.readBool()
case 0x09: // Timestamp
// MongoDB handles timestamps as milliseconds.
i := d.readInt64()
if i == -62135596800000 {
in = time.Time{} // In UTC for convenience.
} else {
in = time.Unix(i/1e3, i%1e3*1e6)
}
case 0x0A: // Nil
in = nil
case 0x0B: // RegEx
in = d.readRegEx()
case 0x0C:
in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
case 0x0D: // JavaScript without scope
in = JavaScript{Code: d.readStr()}
case 0x0E: // Symbol
in = Symbol(d.readStr())
case 0x0F: // JavaScript with scope
d.i += 4 // Skip length
js := JavaScript{d.readStr(), make(M)}
d.readDocTo(reflect.ValueOf(js.Scope))
in = js
case 0x10: // Int32
in = int(d.readInt32())
case 0x11: // Mongo-specific timestamp
in = MongoTimestamp(d.readInt64())
case 0x12: // Int64
in = d.readInt64()
case 0x7F: // Max key
in = MaxKey
case 0xFF: // Min key
in = MinKey
default:
panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
}
outt := out.Type()
if outt == typeRaw {
out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
return true
}
if setter := getSetter(outt, out); setter != nil {
err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
if err == SetZero {
out.Set(reflect.Zero(outt))
return true
}
if err == nil {
return true
}
if _, ok := err.(*TypeError); !ok {
panic(err)
}
return false
}
if in == nil {
out.Set(reflect.Zero(outt))
return true
}
outk := outt.Kind()
// Dereference and initialize pointer if necessary.
first := true
for outk == reflect.Ptr {
if !out.IsNil() {
out = out.Elem()
} else {
elem := reflect.New(outt.Elem())
if first {
// Only set if value is compatible.
first = false
defer func(out, elem reflect.Value) {
if good {
out.Set(elem)
}
}(out, elem)
} else {
out.Set(elem)
}
out = elem
}
outt = out.Type()
outk = outt.Kind()
}
inv := reflect.ValueOf(in)
if outt == inv.Type() {
out.Set(inv)
return true
}
switch outk {
case reflect.Interface:
out.Set(inv)
return true
case reflect.String:
switch inv.Kind() {
case reflect.String:
out.SetString(inv.String())
return true
case reflect.Slice:
if b, ok := in.([]byte); ok {
out.SetString(string(b))
return true
}
case reflect.Int, reflect.Int64:
if outt == typeJSONNumber {
out.SetString(strconv.FormatInt(inv.Int(), 10))
return true
}
case reflect.Float64:
if outt == typeJSONNumber {
out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
return true
}
}
case reflect.Slice, reflect.Array:
// Remember, array (0x04) slices are built with the correct
// element type. If we are here, must be a cross BSON kind
// conversion (e.g. 0x05 unmarshalling on string).
if outt.Elem().Kind() != reflect.Uint8 {
break
}
switch inv.Kind() {
case reflect.String:
slice := []byte(inv.String())
out.Set(reflect.ValueOf(slice))
return true
case reflect.Slice:
switch outt.Kind() {
case reflect.Array:
reflect.Copy(out, inv)
case reflect.Slice:
out.SetBytes(inv.Bytes())
}
return true
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetInt(inv.Int())
return true
case reflect.Float32, reflect.Float64:
out.SetInt(int64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetInt(1)
} else {
out.SetInt(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("can't happen: no uint types in BSON (!?)")
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch inv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetUint(uint64(inv.Int()))
return true
case reflect.Float32, reflect.Float64:
out.SetUint(uint64(inv.Float()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetUint(1)
} else {
out.SetUint(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON.")
}
case reflect.Float32, reflect.Float64:
switch inv.Kind() {
case reflect.Float32, reflect.Float64:
out.SetFloat(inv.Float())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetFloat(float64(inv.Int()))
return true
case reflect.Bool:
if inv.Bool() {
out.SetFloat(1)
} else {
out.SetFloat(0)
}
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Bool:
switch inv.Kind() {
case reflect.Bool:
out.SetBool(inv.Bool())
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
out.SetBool(inv.Int() != 0)
return true
case reflect.Float32, reflect.Float64:
out.SetBool(inv.Float() != 0)
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
panic("Can't happen. No uint types in BSON?")
}
case reflect.Struct:
if outt == typeURL && inv.Kind() == reflect.String {
u, err := url.Parse(inv.String())
if err != nil {
panic(err)
}
out.Set(reflect.ValueOf(u).Elem())
return true
}
if outt == typeBinary {
if b, ok := in.([]byte); ok {
out.Set(reflect.ValueOf(Binary{Data: b}))
return true
}
}
}
return false
}
// --------------------------------------------------------------------------
// Parsers of basic types.
func (d *decoder) readRegEx() RegEx {
re := RegEx{}
re.Pattern = d.readCStr()
re.Options = d.readCStr()
return re
}
func (d *decoder) readBinary() Binary {
l := d.readInt32()
b := Binary{}
b.Kind = d.readByte()
b.Data = d.readBytes(l)
if b.Kind == 0x02 && len(b.Data) >= 4 {
// Weird obsolete format with redundant length.
b.Data = b.Data[4:]
}
return b
}
func (d *decoder) readStr() string {
l := d.readInt32()
b := d.readBytes(l - 1)
if d.readByte() != '\x00' {
corrupted()
}
return string(b)
}
func (d *decoder) readCStr() string {
start := d.i
end := start
l := len(d.in)
for ; end != l; end++ {
if d.in[end] == '\x00' {
break
}
}
d.i = end + 1
if d.i > l {
corrupted()
}
return string(d.in[start:end])
}
func (d *decoder) readBool() bool {
b := d.readByte()
if b == 0 {
return false
}
if b == 1 {
return true
}
panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
}
func (d *decoder) readFloat64() float64 {
return math.Float64frombits(uint64(d.readInt64()))
}
func (d *decoder) readInt32() int32 {
b := d.readBytes(4)
return int32((uint32(b[0]) << 0) |
(uint32(b[1]) << 8) |
(uint32(b[2]) << 16) |
(uint32(b[3]) << 24))
}
func (d *decoder) readInt64() int64 {
b := d.readBytes(8)
return int64((uint64(b[0]) << 0) |
(uint64(b[1]) << 8) |
(uint64(b[2]) << 16) |
(uint64(b[3]) << 24) |
(uint64(b[4]) << 32) |
(uint64(b[5]) << 40) |
(uint64(b[6]) << 48) |
(uint64(b[7]) << 56))
}
func (d *decoder) readByte() byte {
i := d.i
d.i++
if d.i > len(d.in) {
corrupted()
}
return d.in[i]
}
func (d *decoder) readBytes(length int32) []byte {
if length < 0 {
corrupted()
}
start := d.i
d.i += int(length)
if d.i < start || d.i > len(d.in) {
corrupted()
}
return d.in[start : start+int(length)]
}

509
vendor/gopkg.in/mgo.v2/bson/encode.go generated vendored Normal file
View file

@ -0,0 +1,509 @@
// BSON library for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// gobson - BSON library for Go.
package bson
import (
"encoding/json"
"fmt"
"math"
"net/url"
"reflect"
"strconv"
"time"
)
// --------------------------------------------------------------------------
// Some internal infrastructure.
var (
typeBinary = reflect.TypeOf(Binary{})
typeObjectId = reflect.TypeOf(ObjectId(""))
typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
typeSymbol = reflect.TypeOf(Symbol(""))
typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
typeOrderKey = reflect.TypeOf(MinKey)
typeDocElem = reflect.TypeOf(DocElem{})
typeRawDocElem = reflect.TypeOf(RawDocElem{})
typeRaw = reflect.TypeOf(Raw{})
typeURL = reflect.TypeOf(url.URL{})
typeTime = reflect.TypeOf(time.Time{})
typeString = reflect.TypeOf("")
typeJSONNumber = reflect.TypeOf(json.Number(""))
)
const itoaCacheSize = 32
var itoaCache []string
func init() {
itoaCache = make([]string, itoaCacheSize)
for i := 0; i != itoaCacheSize; i++ {
itoaCache[i] = strconv.Itoa(i)
}
}
func itoa(i int) string {
if i < itoaCacheSize {
return itoaCache[i]
}
return strconv.Itoa(i)
}
// --------------------------------------------------------------------------
// Marshaling of the document value itself.
type encoder struct {
out []byte
}
func (e *encoder) addDoc(v reflect.Value) {
for {
if vi, ok := v.Interface().(Getter); ok {
getv, err := vi.GetBSON()
if err != nil {
panic(err)
}
v = reflect.ValueOf(getv)
continue
}
if v.Kind() == reflect.Ptr {
v = v.Elem()
continue
}
break
}
if v.Type() == typeRaw {
raw := v.Interface().(Raw)
if raw.Kind != 0x03 && raw.Kind != 0x00 {
panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
}
if len(raw.Data) == 0 {
panic("Attempted to marshal empty Raw document")
}
e.addBytes(raw.Data...)
return
}
start := e.reserveInt32()
switch v.Kind() {
case reflect.Map:
e.addMap(v)
case reflect.Struct:
e.addStruct(v)
case reflect.Array, reflect.Slice:
e.addSlice(v)
default:
panic("Can't marshal " + v.Type().String() + " as a BSON document")
}
e.addBytes(0)
e.setInt32(start, int32(len(e.out)-start))
}
func (e *encoder) addMap(v reflect.Value) {
for _, k := range v.MapKeys() {
e.addElem(k.String(), v.MapIndex(k), false)
}
}
func (e *encoder) addStruct(v reflect.Value) {
sinfo, err := getStructInfo(v.Type())
if err != nil {
panic(err)
}
var value reflect.Value
if sinfo.InlineMap >= 0 {
m := v.Field(sinfo.InlineMap)
if m.Len() > 0 {
for _, k := range m.MapKeys() {
ks := k.String()
if _, found := sinfo.FieldsMap[ks]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
}
e.addElem(ks, m.MapIndex(k), false)
}
}
}
for _, info := range sinfo.FieldsList {
if info.Inline == nil {
value = v.Field(info.Num)
} else {
value = v.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.addElem(info.Key, value, info.MinSize)
}
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.String:
return len(v.String()) == 0
case reflect.Ptr, reflect.Interface:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
if vt == typeTime {
return v.Interface().(time.Time).IsZero()
}
for i := 0; i < v.NumField(); i++ {
if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}
func (e *encoder) addSlice(v reflect.Value) {
vi := v.Interface()
if d, ok := vi.(D); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if d, ok := vi.(RawD); ok {
for _, elem := range d {
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
l := v.Len()
et := v.Type().Elem()
if et == typeDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(DocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
if et == typeRawDocElem {
for i := 0; i < l; i++ {
elem := v.Index(i).Interface().(RawDocElem)
e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
}
return
}
for i := 0; i < l; i++ {
e.addElem(itoa(i), v.Index(i), false)
}
}
// --------------------------------------------------------------------------
// Marshaling of elements in a document.
func (e *encoder) addElemName(kind byte, name string) {
e.addBytes(kind)
e.addBytes([]byte(name)...)
e.addBytes(0)
}
func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
if !v.IsValid() {
e.addElemName('\x0A', name)
return
}
if getter, ok := v.Interface().(Getter); ok {
getv, err := getter.GetBSON()
if err != nil {
panic(err)
}
e.addElem(name, reflect.ValueOf(getv), minSize)
return
}
switch v.Kind() {
case reflect.Interface:
e.addElem(name, v.Elem(), minSize)
case reflect.Ptr:
e.addElem(name, v.Elem(), minSize)
case reflect.String:
s := v.String()
switch v.Type() {
case typeObjectId:
if len(s) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s)) + ")")
}
e.addElemName('\x07', name)
e.addBytes([]byte(s)...)
case typeSymbol:
e.addElemName('\x0E', name)
e.addStr(s)
case typeJSONNumber:
n := v.Interface().(json.Number)
if i, err := n.Int64(); err == nil {
e.addElemName('\x12', name)
e.addInt64(i)
} else if f, err := n.Float64(); err == nil {
e.addElemName('\x01', name)
e.addFloat64(f)
} else {
panic("failed to convert json.Number to a number: " + s)
}
default:
e.addElemName('\x02', name)
e.addStr(s)
}
case reflect.Float32, reflect.Float64:
e.addElemName('\x01', name)
e.addFloat64(v.Float())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
u := v.Uint()
if int64(u) < 0 {
panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
e.addElemName('\x10', name)
e.addInt32(int32(u))
} else {
e.addElemName('\x12', name)
e.addInt64(int64(u))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch v.Type() {
case typeMongoTimestamp:
e.addElemName('\x11', name)
e.addInt64(v.Int())
case typeOrderKey:
if v.Int() == int64(MaxKey) {
e.addElemName('\x7F', name)
} else {
e.addElemName('\xFF', name)
}
default:
i := v.Int()
if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
// It fits into an int32, encode as such.
e.addElemName('\x10', name)
e.addInt32(int32(i))
} else {
e.addElemName('\x12', name)
e.addInt64(i)
}
}
case reflect.Bool:
e.addElemName('\x08', name)
if v.Bool() {
e.addBytes(1)
} else {
e.addBytes(0)
}
case reflect.Map:
e.addElemName('\x03', name)
e.addDoc(v)
case reflect.Slice:
vt := v.Type()
et := vt.Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
e.addBinary('\x00', v.Bytes())
} else if et == typeDocElem || et == typeRawDocElem {
e.addElemName('\x03', name)
e.addDoc(v)
} else {
e.addElemName('\x04', name)
e.addDoc(v)
}
case reflect.Array:
et := v.Type().Elem()
if et.Kind() == reflect.Uint8 {
e.addElemName('\x05', name)
if v.CanAddr() {
e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte))
} else {
n := v.Len()
e.addInt32(int32(n))
e.addBytes('\x00')
for i := 0; i < n; i++ {
el := v.Index(i)
e.addBytes(byte(el.Uint()))
}
}
} else {
e.addElemName('\x04', name)
e.addDoc(v)
}
case reflect.Struct:
switch s := v.Interface().(type) {
case Raw:
kind := s.Kind
if kind == 0x00 {
kind = 0x03
}
if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
panic("Attempted to marshal empty Raw document")
}
e.addElemName(kind, name)
e.addBytes(s.Data...)
case Binary:
e.addElemName('\x05', name)
e.addBinary(s.Kind, s.Data)
case DBPointer:
e.addElemName('\x0C', name)
e.addStr(s.Namespace)
if len(s.Id) != 12 {
panic("ObjectIDs must be exactly 12 bytes long (got " +
strconv.Itoa(len(s.Id)) + ")")
}
e.addBytes([]byte(s.Id)...)
case RegEx:
e.addElemName('\x0B', name)
e.addCStr(s.Pattern)
e.addCStr(s.Options)
case JavaScript:
if s.Scope == nil {
e.addElemName('\x0D', name)
e.addStr(s.Code)
} else {
e.addElemName('\x0F', name)
start := e.reserveInt32()
e.addStr(s.Code)
e.addDoc(reflect.ValueOf(s.Scope))
e.setInt32(start, int32(len(e.out)-start))
}
case time.Time:
// MongoDB handles timestamps as milliseconds.
e.addElemName('\x09', name)
e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
case url.URL:
e.addElemName('\x02', name)
e.addStr(s.String())
case undefined:
e.addElemName('\x06', name)
default:
e.addElemName('\x03', name)
e.addDoc(v)
}
default:
panic("Can't marshal " + v.Type().String() + " in a BSON document")
}
}
// --------------------------------------------------------------------------
// Marshaling of base types.
func (e *encoder) addBinary(subtype byte, v []byte) {
if subtype == 0x02 {
// Wonder how that brilliant idea came to life. Obsolete, luckily.
e.addInt32(int32(len(v) + 4))
e.addBytes(subtype)
e.addInt32(int32(len(v)))
} else {
e.addInt32(int32(len(v)))
e.addBytes(subtype)
}
e.addBytes(v...)
}
func (e *encoder) addStr(v string) {
e.addInt32(int32(len(v) + 1))
e.addCStr(v)
}
func (e *encoder) addCStr(v string) {
e.addBytes([]byte(v)...)
e.addBytes(0)
}
func (e *encoder) reserveInt32() (pos int) {
pos = len(e.out)
e.addBytes(0, 0, 0, 0)
return pos
}
func (e *encoder) setInt32(pos int, v int32) {
e.out[pos+0] = byte(v)
e.out[pos+1] = byte(v >> 8)
e.out[pos+2] = byte(v >> 16)
e.out[pos+3] = byte(v >> 24)
}
func (e *encoder) addInt32(v int32) {
u := uint32(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
}
func (e *encoder) addInt64(v int64) {
u := uint64(v)
e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
}
func (e *encoder) addFloat64(v float64) {
e.addInt64(int64(math.Float64bits(v)))
}
func (e *encoder) addBytes(v ...byte) {
e.out = append(e.out, v...)
}

351
vendor/gopkg.in/mgo.v2/bulk.go generated vendored Normal file
View file

@ -0,0 +1,351 @@
package mgo
import (
"bytes"
"sort"
"gopkg.in/mgo.v2/bson"
)
// Bulk represents an operation that can be prepared with several
// orthogonal changes before being delivered to the server.
//
// MongoDB servers older than version 2.6 do not have proper support for bulk
// operations, so the driver attempts to map its API as much as possible into
// the functionality that works. In particular, in those releases updates and
// removals are sent individually, and inserts are sent in bulk but have
// suboptimal error reporting compared to more recent versions of the server.
// See the documentation of BulkErrorCase for details on that.
//
// Relevant documentation:
//
// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
//
type Bulk struct {
c *Collection
opcount int
actions []bulkAction
ordered bool
}
type bulkOp int
const (
bulkInsert bulkOp = iota + 1
bulkUpdate
bulkUpdateAll
bulkRemove
)
type bulkAction struct {
op bulkOp
docs []interface{}
idxs []int
}
type bulkUpdateOp []interface{}
type bulkDeleteOp []interface{}
// BulkResult holds the results for a bulk operation.
type BulkResult struct {
Matched int
Modified int // Available only for MongoDB 2.6+
// Be conservative while we understand exactly how to report these
// results in a useful and convenient way, and also how to emulate
// them with prior servers.
private bool
}
// BulkError holds an error returned from running a Bulk operation.
// Individual errors may be obtained and inspected via the Cases method.
type BulkError struct {
ecases []BulkErrorCase
}
func (e *BulkError) Error() string {
if len(e.ecases) == 0 {
return "invalid BulkError instance: no errors"
}
if len(e.ecases) == 1 {
return e.ecases[0].Err.Error()
}
msgs := make([]string, 0, len(e.ecases))
seen := make(map[string]bool)
for _, ecase := range e.ecases {
msg := ecase.Err.Error()
if !seen[msg] {
seen[msg] = true
msgs = append(msgs, msg)
}
}
if len(msgs) == 1 {
return msgs[0]
}
var buf bytes.Buffer
buf.WriteString("multiple errors in bulk operation:\n")
for _, msg := range msgs {
buf.WriteString(" - ")
buf.WriteString(msg)
buf.WriteByte('\n')
}
return buf.String()
}
type bulkErrorCases []BulkErrorCase
func (slice bulkErrorCases) Len() int { return len(slice) }
func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index }
func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] }
// BulkErrorCase holds an individual error found while attempting a single change
// within a bulk operation, and the position in which it was enqueued.
//
// MongoDB servers older than version 2.6 do not have proper support for bulk
// operations, so the driver attempts to map its API as much as possible into
// the functionality that works. In particular, only the last error is reported
// for bulk inserts and without any positional information, so the Index
// field is set to -1 in these cases.
type BulkErrorCase struct {
Index int // Position of operation that failed, or -1 if unknown.
Err error
}
// Cases returns all individual errors found while attempting the requested changes.
//
// See the documentation of BulkErrorCase for limitations in older MongoDB releases.
func (e *BulkError) Cases() []BulkErrorCase {
return e.ecases
}
// Bulk returns a value to prepare the execution of a bulk operation.
func (c *Collection) Bulk() *Bulk {
return &Bulk{c: c, ordered: true}
}
// Unordered puts the bulk operation in unordered mode.
//
// In unordered mode the indvidual operations may be sent
// out of order, which means latter operations may proceed
// even if prior ones have failed.
func (b *Bulk) Unordered() {
b.ordered = false
}
func (b *Bulk) action(op bulkOp, opcount int) *bulkAction {
var action *bulkAction
if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op {
action = &b.actions[len(b.actions)-1]
} else if !b.ordered {
for i := range b.actions {
if b.actions[i].op == op {
action = &b.actions[i]
break
}
}
}
if action == nil {
b.actions = append(b.actions, bulkAction{op: op})
action = &b.actions[len(b.actions)-1]
}
for i := 0; i < opcount; i++ {
action.idxs = append(action.idxs, b.opcount)
b.opcount++
}
return action
}
// Insert queues up the provided documents for insertion.
func (b *Bulk) Insert(docs ...interface{}) {
action := b.action(bulkInsert, len(docs))
action.docs = append(action.docs, docs...)
}
// Remove queues up the provided selectors for removing matching documents.
// Each selector will remove only a single matching document.
func (b *Bulk) Remove(selectors ...interface{}) {
action := b.action(bulkRemove, len(selectors))
for _, selector := range selectors {
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &deleteOp{
Collection: b.c.FullName,
Selector: selector,
Flags: 1,
Limit: 1,
})
}
}
// RemoveAll queues up the provided selectors for removing all matching documents.
// Each selector will remove all matching documents.
func (b *Bulk) RemoveAll(selectors ...interface{}) {
action := b.action(bulkRemove, len(selectors))
for _, selector := range selectors {
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &deleteOp{
Collection: b.c.FullName,
Selector: selector,
Flags: 0,
Limit: 0,
})
}
}
// Update queues up the provided pairs of updating instructions.
// The first element of each pair selects which documents must be
// updated, and the second element defines how to update it.
// Each pair matches exactly one document for updating at most.
func (b *Bulk) Update(pairs ...interface{}) {
if len(pairs)%2 != 0 {
panic("Bulk.Update requires an even number of parameters")
}
action := b.action(bulkUpdate, len(pairs)/2)
for i := 0; i < len(pairs); i += 2 {
selector := pairs[i]
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &updateOp{
Collection: b.c.FullName,
Selector: selector,
Update: pairs[i+1],
})
}
}
// UpdateAll queues up the provided pairs of updating instructions.
// The first element of each pair selects which documents must be
// updated, and the second element defines how to update it.
// Each pair updates all documents matching the selector.
func (b *Bulk) UpdateAll(pairs ...interface{}) {
if len(pairs)%2 != 0 {
panic("Bulk.UpdateAll requires an even number of parameters")
}
action := b.action(bulkUpdate, len(pairs)/2)
for i := 0; i < len(pairs); i += 2 {
selector := pairs[i]
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &updateOp{
Collection: b.c.FullName,
Selector: selector,
Update: pairs[i+1],
Flags: 2,
Multi: true,
})
}
}
// Upsert queues up the provided pairs of upserting instructions.
// The first element of each pair selects which documents must be
// updated, and the second element defines how to update it.
// Each pair matches exactly one document for updating at most.
func (b *Bulk) Upsert(pairs ...interface{}) {
if len(pairs)%2 != 0 {
panic("Bulk.Update requires an even number of parameters")
}
action := b.action(bulkUpdate, len(pairs)/2)
for i := 0; i < len(pairs); i += 2 {
selector := pairs[i]
if selector == nil {
selector = bson.D{}
}
action.docs = append(action.docs, &updateOp{
Collection: b.c.FullName,
Selector: selector,
Update: pairs[i+1],
Flags: 1,
Upsert: true,
})
}
}
// Run runs all the operations queued up.
//
// If an error is reported on an unordered bulk operation, the error value may
// be an aggregation of all issues observed. As an exception to that, Insert
// operations running on MongoDB versions prior to 2.6 will report the last
// error only due to a limitation in the wire protocol.
func (b *Bulk) Run() (*BulkResult, error) {
var result BulkResult
var berr BulkError
var failed bool
for i := range b.actions {
action := &b.actions[i]
var ok bool
switch action.op {
case bulkInsert:
ok = b.runInsert(action, &result, &berr)
case bulkUpdate:
ok = b.runUpdate(action, &result, &berr)
case bulkRemove:
ok = b.runRemove(action, &result, &berr)
default:
panic("unknown bulk operation")
}
if !ok {
failed = true
if b.ordered {
break
}
}
}
if failed {
sort.Sort(bulkErrorCases(berr.ecases))
return nil, &berr
}
return &result, nil
}
func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool {
op := &insertOp{b.c.FullName, action.docs, 0}
if !b.ordered {
op.flags = 1 // ContinueOnError
}
lerr, err := b.c.writeOp(op, b.ordered)
return b.checkSuccess(action, berr, lerr, err)
}
func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool {
lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered)
if lerr != nil {
result.Matched += lerr.N
result.Modified += lerr.modified
}
return b.checkSuccess(action, berr, lerr, err)
}
func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool {
lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered)
if lerr != nil {
result.Matched += lerr.N
result.Modified += lerr.modified
}
return b.checkSuccess(action, berr, lerr, err)
}
func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool {
if lerr != nil && len(lerr.ecases) > 0 {
for i := 0; i < len(lerr.ecases); i++ {
// Map back from the local error index into the visible one.
ecase := lerr.ecases[i]
idx := ecase.Index
if idx >= 0 {
idx = action.idxs[idx]
}
berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err})
}
return false
} else if err != nil {
for i := 0; i < len(action.idxs); i++ {
berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err})
}
return false
}
return true
}

679
vendor/gopkg.in/mgo.v2/cluster.go generated vendored Normal file
View file

@ -0,0 +1,679 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"fmt"
"net"
"strconv"
"strings"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
// ---------------------------------------------------------------------------
// Mongo cluster encapsulation.
//
// A cluster enables the communication with one or more servers participating
// in a mongo cluster. This works with individual servers, a replica set,
// a replica pair, one or multiple mongos routers, etc.
type mongoCluster struct {
sync.RWMutex
serverSynced sync.Cond
userSeeds []string
dynaSeeds []string
servers mongoServers
masters mongoServers
references int
syncing bool
direct bool
failFast bool
syncCount uint
setName string
cachedIndex map[string]bool
sync chan bool
dial dialer
}
func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster {
cluster := &mongoCluster{
userSeeds: userSeeds,
references: 1,
direct: direct,
failFast: failFast,
dial: dial,
setName: setName,
}
cluster.serverSynced.L = cluster.RWMutex.RLocker()
cluster.sync = make(chan bool, 1)
stats.cluster(+1)
go cluster.syncServersLoop()
return cluster
}
// Acquire increases the reference count for the cluster.
func (cluster *mongoCluster) Acquire() {
cluster.Lock()
cluster.references++
debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
cluster.Unlock()
}
// Release decreases the reference count for the cluster. Once
// it reaches zero, all servers will be closed.
func (cluster *mongoCluster) Release() {
cluster.Lock()
if cluster.references == 0 {
panic("cluster.Release() with references == 0")
}
cluster.references--
debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
if cluster.references == 0 {
for _, server := range cluster.servers.Slice() {
server.Close()
}
// Wake up the sync loop so it can die.
cluster.syncServers()
stats.cluster(-1)
}
cluster.Unlock()
}
func (cluster *mongoCluster) LiveServers() (servers []string) {
cluster.RLock()
for _, serv := range cluster.servers.Slice() {
servers = append(servers, serv.Addr)
}
cluster.RUnlock()
return servers
}
func (cluster *mongoCluster) removeServer(server *mongoServer) {
cluster.Lock()
cluster.masters.Remove(server)
other := cluster.servers.Remove(server)
cluster.Unlock()
if other != nil {
other.Close()
log("Removed server ", server.Addr, " from cluster.")
}
server.Close()
}
type isMasterResult struct {
IsMaster bool
Secondary bool
Primary string
Hosts []string
Passives []string
Tags bson.D
Msg string
SetName string `bson:"setName"`
MaxWireVersion int `bson:"maxWireVersion"`
}
func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
// Monotonic let's it talk to a slave and still hold the socket.
session := newSession(Monotonic, cluster, 10*time.Second)
session.setSocket(socket)
err := session.Run("ismaster", result)
session.Close()
return err
}
type possibleTimeout interface {
Timeout() bool
}
var syncSocketTimeout = 5 * time.Second
func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
var syncTimeout time.Duration
if raceDetector {
// This variable is only ever touched by tests.
globalMutex.Lock()
syncTimeout = syncSocketTimeout
globalMutex.Unlock()
} else {
syncTimeout = syncSocketTimeout
}
addr := server.Addr
log("SYNC Processing ", addr, "...")
// Retry a few times to avoid knocking a server down for a hiccup.
var result isMasterResult
var tryerr error
for retry := 0; ; retry++ {
if retry == 3 || retry == 1 && cluster.failFast {
return nil, nil, tryerr
}
if retry > 0 {
// Don't abuse the server needlessly if there's something actually wrong.
if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
// Give a chance for waiters to timeout as well.
cluster.serverSynced.Broadcast()
}
time.Sleep(syncShortDelay)
}
// It's not clear what would be a good timeout here. Is it
// better to wait longer or to retry?
socket, _, err := server.AcquireSocket(0, syncTimeout)
if err != nil {
tryerr = err
logf("SYNC Failed to get socket to %s: %v", addr, err)
continue
}
err = cluster.isMaster(socket, &result)
socket.Release()
if err != nil {
tryerr = err
logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
continue
}
debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
break
}
if cluster.setName != "" && result.SetName != cluster.setName {
logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName)
return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName)
}
if result.IsMaster {
debugf("SYNC %s is a master.", addr)
if !server.info.Master {
// Made an incorrect assumption above, so fix stats.
stats.conn(-1, false)
stats.conn(+1, true)
}
} else if result.Secondary {
debugf("SYNC %s is a slave.", addr)
} else if cluster.direct {
logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
} else {
logf("SYNC %s is neither a master nor a slave.", addr)
// Let stats track it as whatever was known before.
return nil, nil, errors.New(addr + " is not a master nor slave")
}
info = &mongoServerInfo{
Master: result.IsMaster,
Mongos: result.Msg == "isdbgrid",
Tags: result.Tags,
SetName: result.SetName,
MaxWireVersion: result.MaxWireVersion,
}
hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
if result.Primary != "" {
// First in the list to speed up master discovery.
hosts = append(hosts, result.Primary)
}
hosts = append(hosts, result.Hosts...)
hosts = append(hosts, result.Passives...)
debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
return info, hosts, nil
}
type syncKind bool
const (
completeSync syncKind = true
partialSync syncKind = false
)
func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
cluster.Lock()
current := cluster.servers.Search(server.ResolvedAddr)
if current == nil {
if syncKind == partialSync {
cluster.Unlock()
server.Close()
log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
return
}
cluster.servers.Add(server)
if info.Master {
cluster.masters.Add(server)
log("SYNC Adding ", server.Addr, " to cluster as a master.")
} else {
log("SYNC Adding ", server.Addr, " to cluster as a slave.")
}
} else {
if server != current {
panic("addServer attempting to add duplicated server")
}
if server.Info().Master != info.Master {
if info.Master {
log("SYNC Server ", server.Addr, " is now a master.")
cluster.masters.Add(server)
} else {
log("SYNC Server ", server.Addr, " is now a slave.")
cluster.masters.Remove(server)
}
}
}
server.SetInfo(info)
debugf("SYNC Broadcasting availability of server %s", server.Addr)
cluster.serverSynced.Broadcast()
cluster.Unlock()
}
func (cluster *mongoCluster) getKnownAddrs() []string {
cluster.RLock()
max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
seen := make(map[string]bool, max)
known := make([]string, 0, max)
add := func(addr string) {
if _, found := seen[addr]; !found {
seen[addr] = true
known = append(known, addr)
}
}
for _, addr := range cluster.userSeeds {
add(addr)
}
for _, addr := range cluster.dynaSeeds {
add(addr)
}
for _, serv := range cluster.servers.Slice() {
add(serv.Addr)
}
cluster.RUnlock()
return known
}
// syncServers injects a value into the cluster.sync channel to force
// an iteration of the syncServersLoop function.
func (cluster *mongoCluster) syncServers() {
select {
case cluster.sync <- true:
default:
}
}
// How long to wait for a checkup of the cluster topology if nothing
// else kicks a synchronization before that.
const syncServersDelay = 30 * time.Second
const syncShortDelay = 500 * time.Millisecond
// syncServersLoop loops while the cluster is alive to keep its idea of
// the server topology up-to-date. It must be called just once from
// newCluster. The loop iterates once syncServersDelay has passed, or
// if somebody injects a value into the cluster.sync channel to force a
// synchronization. A loop iteration will contact all servers in
// parallel, ask them about known peers and their own role within the
// cluster, and then attempt to do the same with all the peers
// retrieved.
func (cluster *mongoCluster) syncServersLoop() {
for {
debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
cluster.Lock()
if cluster.references == 0 {
cluster.Unlock()
break
}
cluster.references++ // Keep alive while syncing.
direct := cluster.direct
cluster.Unlock()
cluster.syncServersIteration(direct)
// We just synchronized, so consume any outstanding requests.
select {
case <-cluster.sync:
default:
}
cluster.Release()
// Hold off before allowing another sync. No point in
// burning CPU looking for down servers.
if !cluster.failFast {
time.Sleep(syncShortDelay)
}
cluster.Lock()
if cluster.references == 0 {
cluster.Unlock()
break
}
cluster.syncCount++
// Poke all waiters so they have a chance to timeout or
// restart syncing if they wish to.
cluster.serverSynced.Broadcast()
// Check if we have to restart immediately either way.
restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
cluster.Unlock()
if restart {
log("SYNC No masters found. Will synchronize again.")
time.Sleep(syncShortDelay)
continue
}
debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
// Hold off until somebody explicitly requests a synchronization
// or it's time to check for a cluster topology change again.
select {
case <-cluster.sync:
case <-time.After(syncServersDelay):
}
}
debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
}
func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
cluster.RLock()
server := cluster.servers.Search(tcpaddr.String())
cluster.RUnlock()
if server != nil {
return server
}
return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
}
func resolveAddr(addr string) (*net.TCPAddr, error) {
// Simple cases that do not need actual resolution. Works with IPv4 and v6.
if host, port, err := net.SplitHostPort(addr); err == nil {
if port, _ := strconv.Atoi(port); port > 0 {
zone := ""
if i := strings.LastIndex(host, "%"); i >= 0 {
zone = host[i+1:]
host = host[:i]
}
ip := net.ParseIP(host)
if ip != nil {
return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil
}
}
}
// Attempt to resolve IPv4 and v6 concurrently.
addrChan := make(chan *net.TCPAddr, 2)
for _, network := range []string{"udp4", "udp6"} {
network := network
go func() {
// The unfortunate UDP dialing hack allows having a timeout on address resolution.
conn, err := net.DialTimeout(network, addr, 10*time.Second)
if err != nil {
addrChan <- nil
} else {
addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
conn.Close()
}
}()
}
// Wait for the result of IPv4 and v6 resolution. Use IPv4 if available.
tcpaddr := <-addrChan
if tcpaddr == nil || len(tcpaddr.IP) != 4 {
var timeout <-chan time.Time
if tcpaddr != nil {
// Don't wait too long if an IPv6 address is known.
timeout = time.After(50 * time.Millisecond)
}
select {
case <-timeout:
case tcpaddr2 := <-addrChan:
if tcpaddr == nil || tcpaddr2 != nil {
// It's an IPv4 address or the only known address. Use it.
tcpaddr = tcpaddr2
}
}
}
if tcpaddr == nil {
log("SYNC Failed to resolve server address: ", addr)
return nil, errors.New("failed to resolve server address: " + addr)
}
if tcpaddr.String() != addr {
debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
}
return tcpaddr, nil
}
type pendingAdd struct {
server *mongoServer
info *mongoServerInfo
}
func (cluster *mongoCluster) syncServersIteration(direct bool) {
log("SYNC Starting full topology synchronization...")
var wg sync.WaitGroup
var m sync.Mutex
notYetAdded := make(map[string]pendingAdd)
addIfFound := make(map[string]bool)
seen := make(map[string]bool)
syncKind := partialSync
var spawnSync func(addr string, byMaster bool)
spawnSync = func(addr string, byMaster bool) {
wg.Add(1)
go func() {
defer wg.Done()
tcpaddr, err := resolveAddr(addr)
if err != nil {
log("SYNC Failed to start sync of ", addr, ": ", err.Error())
return
}
resolvedAddr := tcpaddr.String()
m.Lock()
if byMaster {
if pending, ok := notYetAdded[resolvedAddr]; ok {
delete(notYetAdded, resolvedAddr)
m.Unlock()
cluster.addServer(pending.server, pending.info, completeSync)
return
}
addIfFound[resolvedAddr] = true
}
if seen[resolvedAddr] {
m.Unlock()
return
}
seen[resolvedAddr] = true
m.Unlock()
server := cluster.server(addr, tcpaddr)
info, hosts, err := cluster.syncServer(server)
if err != nil {
cluster.removeServer(server)
return
}
m.Lock()
add := direct || info.Master || addIfFound[resolvedAddr]
if add {
syncKind = completeSync
} else {
notYetAdded[resolvedAddr] = pendingAdd{server, info}
}
m.Unlock()
if add {
cluster.addServer(server, info, completeSync)
}
if !direct {
for _, addr := range hosts {
spawnSync(addr, info.Master)
}
}
}()
}
knownAddrs := cluster.getKnownAddrs()
for _, addr := range knownAddrs {
spawnSync(addr, false)
}
wg.Wait()
if syncKind == completeSync {
logf("SYNC Synchronization was complete (got data from primary).")
for _, pending := range notYetAdded {
cluster.removeServer(pending.server)
}
} else {
logf("SYNC Synchronization was partial (cannot talk to primary).")
for _, pending := range notYetAdded {
cluster.addServer(pending.server, pending.info, partialSync)
}
}
cluster.Lock()
mastersLen := cluster.masters.Len()
logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen)
// Update dynamic seeds, but only if we have any good servers. Otherwise,
// leave them alone for better chances of a successful sync in the future.
if syncKind == completeSync {
dynaSeeds := make([]string, cluster.servers.Len())
for i, server := range cluster.servers.Slice() {
dynaSeeds[i] = server.Addr
}
cluster.dynaSeeds = dynaSeeds
debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
}
cluster.Unlock()
}
// AcquireSocket returns a socket to a server in the cluster. If slaveOk is
// true, it will attempt to return a socket to a slave server. If it is
// false, the socket will necessarily be to a master server.
func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
var started time.Time
var syncCount uint
warnedLimit := false
for {
cluster.RLock()
for {
mastersLen := cluster.masters.Len()
slavesLen := cluster.servers.Len() - mastersLen
debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen)
if !(slaveOk && mode == Secondary) && mastersLen > 0 || slaveOk && slavesLen > 0 {
break
}
if started.IsZero() {
// Initialize after fast path above.
started = time.Now()
syncCount = cluster.syncCount
} else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
cluster.RUnlock()
return nil, errors.New("no reachable servers")
}
log("Waiting for servers to synchronize...")
cluster.syncServers()
// Remember: this will release and reacquire the lock.
cluster.serverSynced.Wait()
}
var server *mongoServer
if slaveOk {
server = cluster.servers.BestFit(mode, serverTags)
} else {
server = cluster.masters.BestFit(mode, nil)
}
cluster.RUnlock()
if server == nil {
// Must have failed the requested tags. Sleep to avoid spinning.
time.Sleep(1e8)
continue
}
s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
if err == errPoolLimit {
if !warnedLimit {
warnedLimit = true
log("WARNING: Per-server connection limit reached.")
}
time.Sleep(100 * time.Millisecond)
continue
}
if err != nil {
cluster.removeServer(server)
cluster.syncServers()
continue
}
if abended && !slaveOk {
var result isMasterResult
err := cluster.isMaster(s, &result)
if err != nil || !result.IsMaster {
logf("Cannot confirm server %s as master (%v)", server.Addr, err)
s.Release()
cluster.syncServers()
time.Sleep(100 * time.Millisecond)
continue
}
}
return s, nil
}
panic("unreached")
}
func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
cluster.Lock()
if cluster.cachedIndex == nil {
cluster.cachedIndex = make(map[string]bool)
}
if exists {
cluster.cachedIndex[cacheKey] = true
} else {
delete(cluster.cachedIndex, cacheKey)
}
cluster.Unlock()
}
func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
cluster.RLock()
if cluster.cachedIndex != nil {
result = cluster.cachedIndex[cacheKey]
}
cluster.RUnlock()
return
}
func (cluster *mongoCluster) ResetIndexCache() {
cluster.Lock()
cluster.cachedIndex = make(map[string]bool)
cluster.Unlock()
}

31
vendor/gopkg.in/mgo.v2/doc.go generated vendored Normal file
View file

@ -0,0 +1,31 @@
// Package mgo offers a rich MongoDB driver for Go.
//
// Details about the mgo project (pronounced as "mango") are found
// in its web page:
//
// http://labix.org/mgo
//
// Usage of the driver revolves around the concept of sessions. To
// get started, obtain a session using the Dial function:
//
// session, err := mgo.Dial(url)
//
// This will establish one or more connections with the cluster of
// servers defined by the url parameter. From then on, the cluster
// may be queried with multiple consistency rules (see SetMode) and
// documents retrieved with statements such as:
//
// c := session.DB(database).C(collection)
// err := c.Find(query).One(&result)
//
// New sessions are typically created by calling session.Copy on the
// initial session obtained at dial time. These new sessions will share
// the same cluster information and connection pool, and may be easily
// handed into other methods and functions for organizing logic.
// Every session created must have its Close method called at the end
// of its life time, so its resources may be put back in the pool or
// collected, depending on the case.
//
// For more details, see the documentation for the types and methods.
//
package mgo

761
vendor/gopkg.in/mgo.v2/gridfs.go generated vendored Normal file
View file

@ -0,0 +1,761 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"crypto/md5"
"encoding/hex"
"errors"
"hash"
"io"
"os"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
type GridFS struct {
Files *Collection
Chunks *Collection
}
type gfsFileMode int
const (
gfsClosed gfsFileMode = 0
gfsReading gfsFileMode = 1
gfsWriting gfsFileMode = 2
)
type GridFile struct {
m sync.Mutex
c sync.Cond
gfs *GridFS
mode gfsFileMode
err error
chunk int
offset int64
wpending int
wbuf []byte
wsum hash.Hash
rbuf []byte
rcache *gfsCachedChunk
doc gfsFile
}
type gfsFile struct {
Id interface{} "_id"
ChunkSize int "chunkSize"
UploadDate time.Time "uploadDate"
Length int64 ",minsize"
MD5 string
Filename string ",omitempty"
ContentType string "contentType,omitempty"
Metadata *bson.Raw ",omitempty"
}
type gfsChunk struct {
Id interface{} "_id"
FilesId interface{} "files_id"
N int
Data []byte
}
type gfsCachedChunk struct {
wait sync.Mutex
n int
data []byte
err error
}
func newGridFS(db *Database, prefix string) *GridFS {
return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
}
func (gfs *GridFS) newFile() *GridFile {
file := &GridFile{gfs: gfs}
file.c.L = &file.m
//runtime.SetFinalizer(file, finalizeFile)
return file
}
func finalizeFile(file *GridFile) {
file.Close()
}
// Create creates a new file with the provided name in the GridFS. If the file
// name already exists, a new version will be inserted with an up-to-date
// uploadDate that will cause it to be atomically visible to the Open and
// OpenId methods. If the file name is not important, an empty name may be
// provided and the file Id used instead.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// A simple example inserting a new file:
//
// func check(err error) {
// if err != nil {
// panic(err.String())
// }
// }
// file, err := db.GridFS("fs").Create("myfile.txt")
// check(err)
// n, err := file.Write([]byte("Hello world!"))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes written\n", n)
//
// The io.Writer interface is implemented by *GridFile and may be used to
// help on the file creation. For example:
//
// file, err := db.GridFS("fs").Create("myfile.txt")
// check(err)
// messages, err := os.Open("/var/log/messages")
// check(err)
// defer messages.Close()
// err = io.Copy(file, messages)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
file = gfs.newFile()
file.mode = gfsWriting
file.wsum = md5.New()
file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
return
}
// OpenId returns the file with the provided id, for reading.
// If the file isn't found, err will be set to mgo.ErrNotFound.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// The following example will print the first 8192 bytes from the file:
//
// func check(err error) {
// if err != nil {
// panic(err.String())
// }
// }
// file, err := db.GridFS("fs").OpenId(objid)
// check(err)
// b := make([]byte, 8192)
// n, err := file.Read(b)
// check(err)
// fmt.Println(string(b))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes read\n", n)
//
// The io.Reader interface is implemented by *GridFile and may be used to
// deal with it. As an example, the following snippet will dump the whole
// file into the standard output:
//
// file, err := db.GridFS("fs").OpenId(objid)
// check(err)
// err = io.Copy(os.Stdout, file)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
var doc gfsFile
err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
if err != nil {
return
}
file = gfs.newFile()
file.mode = gfsReading
file.doc = doc
return
}
// Open returns the most recently uploaded file with the provided
// name, for reading. If the file isn't found, err will be set
// to mgo.ErrNotFound.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
//
// The following example will print the first 8192 bytes from the file:
//
// file, err := db.GridFS("fs").Open("myfile.txt")
// check(err)
// b := make([]byte, 8192)
// n, err := file.Read(b)
// check(err)
// fmt.Println(string(b))
// check(err)
// err = file.Close()
// check(err)
// fmt.Printf("%d bytes read\n", n)
//
// The io.Reader interface is implemented by *GridFile and may be used to
// deal with it. As an example, the following snippet will dump the whole
// file into the standard output:
//
// file, err := db.GridFS("fs").Open("myfile.txt")
// check(err)
// err = io.Copy(os.Stdout, file)
// check(err)
// err = file.Close()
// check(err)
//
func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
var doc gfsFile
err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
if err != nil {
return
}
file = gfs.newFile()
file.mode = gfsReading
file.doc = doc
return
}
// OpenNext opens the next file from iter for reading, sets *file to it,
// and returns true on the success case. If no more documents are available
// on iter or an error occurred, *file is set to nil and the result is false.
// Errors will be available via iter.Err().
//
// The iter parameter must be an iterator on the GridFS files collection.
// Using the GridFS.Find method is an easy way to obtain such an iterator,
// but any iterator on the collection will work.
//
// If the provided *file is non-nil, OpenNext will close it before attempting
// to iterate to the next element. This means that in a loop one only
// has to worry about closing files when breaking out of the loop early
// (break, return, or panic).
//
// For example:
//
// gfs := db.GridFS("fs")
// query := gfs.Find(nil).Sort("filename")
// iter := query.Iter()
// var f *mgo.GridFile
// for gfs.OpenNext(iter, &f) {
// fmt.Printf("Filename: %s\n", f.Name())
// }
// if iter.Close() != nil {
// panic(iter.Close())
// }
//
func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
if *file != nil {
// Ignoring the error here shouldn't be a big deal
// as we're reading the file and the loop iteration
// for this file is finished.
_ = (*file).Close()
}
var doc gfsFile
if !iter.Next(&doc) {
*file = nil
return false
}
f := gfs.newFile()
f.mode = gfsReading
f.doc = doc
*file = f
return true
}
// Find runs query on GridFS's files collection and returns
// the resulting Query.
//
// This logic:
//
// gfs := db.GridFS("fs")
// iter := gfs.Find(nil).Iter()
//
// Is equivalent to:
//
// files := db.C("fs" + ".files")
// iter := files.Find(nil).Iter()
//
func (gfs *GridFS) Find(query interface{}) *Query {
return gfs.Files.Find(query)
}
// RemoveId deletes the file with the provided id from the GridFS.
func (gfs *GridFS) RemoveId(id interface{}) error {
err := gfs.Files.Remove(bson.M{"_id": id})
if err != nil {
return err
}
_, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
return err
}
type gfsDocId struct {
Id interface{} "_id"
}
// Remove deletes all files with the provided name from the GridFS.
func (gfs *GridFS) Remove(name string) (err error) {
iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
var doc gfsDocId
for iter.Next(&doc) {
if e := gfs.RemoveId(doc.Id); e != nil {
err = e
}
}
if err == nil {
err = iter.Close()
}
return err
}
func (file *GridFile) assertMode(mode gfsFileMode) {
switch file.mode {
case mode:
return
case gfsWriting:
panic("GridFile is open for writing")
case gfsReading:
panic("GridFile is open for reading")
case gfsClosed:
panic("GridFile is closed")
default:
panic("internal error: missing GridFile mode")
}
}
// SetChunkSize sets size of saved chunks. Once the file is written to, it
// will be split in blocks of that size and each block saved into an
// independent chunk document. The default chunk size is 256kb.
//
// It is a runtime error to call this function once the file has started
// being written to.
func (file *GridFile) SetChunkSize(bytes int) {
file.assertMode(gfsWriting)
debugf("GridFile %p: setting chunk size to %d", file, bytes)
file.m.Lock()
file.doc.ChunkSize = bytes
file.m.Unlock()
}
// Id returns the current file Id.
func (file *GridFile) Id() interface{} {
return file.doc.Id
}
// SetId changes the current file Id.
//
// It is a runtime error to call this function once the file has started
// being written to, or when the file is not open for writing.
func (file *GridFile) SetId(id interface{}) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.Id = id
file.m.Unlock()
}
// Name returns the optional file name. An empty string will be returned
// in case it is unset.
func (file *GridFile) Name() string {
return file.doc.Filename
}
// SetName changes the optional file name. An empty string may be used to
// unset it.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetName(name string) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.Filename = name
file.m.Unlock()
}
// ContentType returns the optional file content type. An empty string will be
// returned in case it is unset.
func (file *GridFile) ContentType() string {
return file.doc.ContentType
}
// ContentType changes the optional file content type. An empty string may be
// used to unset it.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetContentType(ctype string) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.ContentType = ctype
file.m.Unlock()
}
// GetMeta unmarshals the optional "metadata" field associated with the
// file into the result parameter. The meaning of keys under that field
// is user-defined. For example:
//
// result := struct{ INode int }{}
// err = file.GetMeta(&result)
// if err != nil {
// panic(err.String())
// }
// fmt.Printf("inode: %d\n", result.INode)
//
func (file *GridFile) GetMeta(result interface{}) (err error) {
file.m.Lock()
if file.doc.Metadata != nil {
err = bson.Unmarshal(file.doc.Metadata.Data, result)
}
file.m.Unlock()
return
}
// SetMeta changes the optional "metadata" field associated with the
// file. The meaning of keys under that field is user-defined.
// For example:
//
// file.SetMeta(bson.M{"inode": inode})
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetMeta(metadata interface{}) {
file.assertMode(gfsWriting)
data, err := bson.Marshal(metadata)
file.m.Lock()
if err != nil && file.err == nil {
file.err = err
} else {
file.doc.Metadata = &bson.Raw{Data: data}
}
file.m.Unlock()
}
// Size returns the file size in bytes.
func (file *GridFile) Size() (bytes int64) {
file.m.Lock()
bytes = file.doc.Length
file.m.Unlock()
return
}
// MD5 returns the file MD5 as a hex-encoded string.
func (file *GridFile) MD5() (md5 string) {
return file.doc.MD5
}
// UploadDate returns the file upload time.
func (file *GridFile) UploadDate() time.Time {
return file.doc.UploadDate
}
// SetUploadDate changes the file upload time.
//
// It is a runtime error to call this function when the file is not open
// for writing.
func (file *GridFile) SetUploadDate(t time.Time) {
file.assertMode(gfsWriting)
file.m.Lock()
file.doc.UploadDate = t
file.m.Unlock()
}
// Close flushes any pending changes in case the file is being written
// to, waits for any background operations to finish, and closes the file.
//
// It's important to Close files whether they are being written to
// or read from, and to check the err result to ensure the operation
// completed successfully.
func (file *GridFile) Close() (err error) {
file.m.Lock()
defer file.m.Unlock()
if file.mode == gfsWriting {
if len(file.wbuf) > 0 && file.err == nil {
file.insertChunk(file.wbuf)
file.wbuf = file.wbuf[0:0]
}
file.completeWrite()
} else if file.mode == gfsReading && file.rcache != nil {
file.rcache.wait.Lock()
file.rcache = nil
}
file.mode = gfsClosed
debugf("GridFile %p: closed", file)
return file.err
}
func (file *GridFile) completeWrite() {
for file.wpending > 0 {
debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
file.c.Wait()
}
if file.err == nil {
hexsum := hex.EncodeToString(file.wsum.Sum(nil))
if file.doc.UploadDate.IsZero() {
file.doc.UploadDate = bson.Now()
}
file.doc.MD5 = hexsum
file.err = file.gfs.Files.Insert(file.doc)
}
if file.err != nil {
file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
}
if file.err == nil {
index := Index{
Key: []string{"files_id", "n"},
Unique: true,
}
file.err = file.gfs.Chunks.EnsureIndex(index)
}
}
// Abort cancels an in-progress write, preventing the file from being
// automically created and ensuring previously written chunks are
// removed when the file is closed.
//
// It is a runtime error to call Abort when the file was not opened
// for writing.
func (file *GridFile) Abort() {
if file.mode != gfsWriting {
panic("file.Abort must be called on file opened for writing")
}
file.err = errors.New("write aborted")
}
// Write writes the provided data to the file and returns the
// number of bytes written and an error in case something
// wrong happened.
//
// The file will internally cache the data so that all but the last
// chunk sent to the database have the size defined by SetChunkSize.
// This also means that errors may be deferred until a future call
// to Write or Close.
//
// The parameters and behavior of this function turn the file
// into an io.Writer.
func (file *GridFile) Write(data []byte) (n int, err error) {
file.assertMode(gfsWriting)
file.m.Lock()
debugf("GridFile %p: writing %d bytes", file, len(data))
defer file.m.Unlock()
if file.err != nil {
return 0, file.err
}
n = len(data)
file.doc.Length += int64(n)
chunkSize := file.doc.ChunkSize
if len(file.wbuf)+len(data) < chunkSize {
file.wbuf = append(file.wbuf, data...)
return
}
// First, flush file.wbuf complementing with data.
if len(file.wbuf) > 0 {
missing := chunkSize - len(file.wbuf)
if missing > len(data) {
missing = len(data)
}
file.wbuf = append(file.wbuf, data[:missing]...)
data = data[missing:]
file.insertChunk(file.wbuf)
file.wbuf = file.wbuf[0:0]
}
// Then, flush all chunks from data without copying.
for len(data) > chunkSize {
size := chunkSize
if size > len(data) {
size = len(data)
}
file.insertChunk(data[:size])
data = data[size:]
}
// And append the rest for a future call.
file.wbuf = append(file.wbuf, data...)
return n, file.err
}
func (file *GridFile) insertChunk(data []byte) {
n := file.chunk
file.chunk++
debugf("GridFile %p: adding to checksum: %q", file, string(data))
file.wsum.Write(data)
for file.doc.ChunkSize*file.wpending >= 1024*1024 {
// Hold on.. we got a MB pending.
file.c.Wait()
if file.err != nil {
return
}
}
file.wpending++
debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
// We may not own the memory of data, so rather than
// simply copying it, we'll marshal the document ahead of time.
data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
if err != nil {
file.err = err
return
}
go func() {
err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
file.m.Lock()
file.wpending--
if err != nil && file.err == nil {
file.err = err
}
file.c.Broadcast()
file.m.Unlock()
}()
}
// Seek sets the offset for the next Read or Write on file to
// offset, interpreted according to whence: 0 means relative to
// the origin of the file, 1 means relative to the current offset,
// and 2 means relative to the end. It returns the new offset and
// an error, if any.
func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
file.m.Lock()
debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
defer file.m.Unlock()
switch whence {
case os.SEEK_SET:
case os.SEEK_CUR:
offset += file.offset
case os.SEEK_END:
offset += file.doc.Length
default:
panic("unsupported whence value")
}
if offset > file.doc.Length {
return file.offset, errors.New("seek past end of file")
}
if offset == file.doc.Length {
// If we're seeking to the end of the file,
// no need to read anything. This enables
// a client to find the size of the file using only the
// io.ReadSeeker interface with low overhead.
file.offset = offset
return file.offset, nil
}
chunk := int(offset / int64(file.doc.ChunkSize))
if chunk+1 == file.chunk && offset >= file.offset {
file.rbuf = file.rbuf[int(offset-file.offset):]
file.offset = offset
return file.offset, nil
}
file.offset = offset
file.chunk = chunk
file.rbuf = nil
file.rbuf, err = file.getChunk()
if err == nil {
file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
}
return file.offset, err
}
// Read reads into b the next available data from the file and
// returns the number of bytes written and an error in case
// something wrong happened. At the end of the file, n will
// be zero and err will be set to io.EOF.
//
// The parameters and behavior of this function turn the file
// into an io.Reader.
func (file *GridFile) Read(b []byte) (n int, err error) {
file.assertMode(gfsReading)
file.m.Lock()
debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
defer file.m.Unlock()
if file.offset == file.doc.Length {
return 0, io.EOF
}
for err == nil {
i := copy(b, file.rbuf)
n += i
file.offset += int64(i)
file.rbuf = file.rbuf[i:]
if i == len(b) || file.offset == file.doc.Length {
break
}
b = b[i:]
file.rbuf, err = file.getChunk()
}
return n, err
}
func (file *GridFile) getChunk() (data []byte, err error) {
cache := file.rcache
file.rcache = nil
if cache != nil && cache.n == file.chunk {
debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
cache.wait.Lock()
data, err = cache.data, cache.err
} else {
debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
var doc gfsChunk
err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
data = doc.Data
}
file.chunk++
if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
// Read the next one in background.
cache = &gfsCachedChunk{n: file.chunk}
cache.wait.Lock()
debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
// Clone the session to avoid having it closed in between.
chunks := file.gfs.Chunks
session := chunks.Database.Session.Clone()
go func(id interface{}, n int) {
defer session.Close()
chunks = chunks.With(session)
var doc gfsChunk
cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
cache.data = doc.Data
cache.wait.Unlock()
}(file.doc.Id, file.chunk)
file.rcache = cache
}
debugf("Returning err: %#v", err)
return
}

133
vendor/gopkg.in/mgo.v2/log.go generated vendored Normal file
View file

@ -0,0 +1,133 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"fmt"
"sync"
)
// ---------------------------------------------------------------------------
// Logging integration.
// Avoid importing the log type information unnecessarily. There's a small cost
// associated with using an interface rather than the type. Depending on how
// often the logger is plugged in, it would be worth using the type instead.
type log_Logger interface {
Output(calldepth int, s string) error
}
var (
globalLogger log_Logger
globalDebug bool
globalMutex sync.Mutex
)
// RACE WARNING: There are known data races when logging, which are manually
// silenced when the race detector is in use. These data races won't be
// observed in typical use, because logging is supposed to be set up once when
// the application starts. Having raceDetector as a constant, the compiler
// should elide the locks altogether in actual use.
// Specify the *log.Logger object where log messages should be sent to.
func SetLogger(logger log_Logger) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
globalLogger = logger
}
// Enable the delivery of debug messages to the logger. Only meaningful
// if a logger is also set.
func SetDebug(debug bool) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
globalDebug = debug
}
func log(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalLogger != nil {
globalLogger.Output(2, fmt.Sprint(v...))
}
}
func logln(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalLogger != nil {
globalLogger.Output(2, fmt.Sprintln(v...))
}
}
func logf(format string, v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalLogger != nil {
globalLogger.Output(2, fmt.Sprintf(format, v...))
}
}
func debug(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalDebug && globalLogger != nil {
globalLogger.Output(2, fmt.Sprint(v...))
}
}
func debugln(v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalDebug && globalLogger != nil {
globalLogger.Output(2, fmt.Sprintln(v...))
}
}
func debugf(format string, v ...interface{}) {
if raceDetector {
globalMutex.Lock()
defer globalMutex.Unlock()
}
if globalDebug && globalLogger != nil {
globalLogger.Output(2, fmt.Sprintf(format, v...))
}
}

91
vendor/gopkg.in/mgo.v2/queue.go generated vendored Normal file
View file

@ -0,0 +1,91 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
type queue struct {
elems []interface{}
nelems, popi, pushi int
}
func (q *queue) Len() int {
return q.nelems
}
func (q *queue) Push(elem interface{}) {
//debugf("Pushing(pushi=%d popi=%d cap=%d): %#v\n",
// q.pushi, q.popi, len(q.elems), elem)
if q.nelems == len(q.elems) {
q.expand()
}
q.elems[q.pushi] = elem
q.nelems++
q.pushi = (q.pushi + 1) % len(q.elems)
//debugf(" Pushed(pushi=%d popi=%d cap=%d): %#v\n",
// q.pushi, q.popi, len(q.elems), elem)
}
func (q *queue) Pop() (elem interface{}) {
//debugf("Popping(pushi=%d popi=%d cap=%d)\n",
// q.pushi, q.popi, len(q.elems))
if q.nelems == 0 {
return nil
}
elem = q.elems[q.popi]
q.elems[q.popi] = nil // Help GC.
q.nelems--
q.popi = (q.popi + 1) % len(q.elems)
//debugf(" Popped(pushi=%d popi=%d cap=%d): %#v\n",
// q.pushi, q.popi, len(q.elems), elem)
return elem
}
func (q *queue) expand() {
curcap := len(q.elems)
var newcap int
if curcap == 0 {
newcap = 8
} else if curcap < 1024 {
newcap = curcap * 2
} else {
newcap = curcap + (curcap / 4)
}
elems := make([]interface{}, newcap)
if q.popi == 0 {
copy(elems, q.elems)
q.pushi = curcap
} else {
newpopi := newcap - (curcap - q.popi)
copy(elems, q.elems[:q.popi])
copy(elems[newpopi:], q.elems[q.popi:])
q.popi = newpopi
}
for i := range q.elems {
q.elems[i] = nil // Help GC.
}
q.elems = elems
}

5
vendor/gopkg.in/mgo.v2/raceoff.go generated vendored Normal file
View file

@ -0,0 +1,5 @@
// +build !race
package mgo
const raceDetector = false

5
vendor/gopkg.in/mgo.v2/raceon.go generated vendored Normal file
View file

@ -0,0 +1,5 @@
// +build race
package mgo
const raceDetector = true

11
vendor/gopkg.in/mgo.v2/saslimpl.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
//+build sasl
package mgo
import (
"gopkg.in/mgo.v2/internal/sasl"
)
func saslNew(cred Credential, host string) (saslStepper, error) {
return sasl.New(cred.Username, cred.Password, cred.Mechanism, cred.Service, host)
}

11
vendor/gopkg.in/mgo.v2/saslstub.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
//+build !sasl
package mgo
import (
"fmt"
)
func saslNew(cred Credential, host string) (saslStepper, error) {
return nil, fmt.Errorf("SASL support not enabled during build (-tags sasl)")
}

452
vendor/gopkg.in/mgo.v2/server.go generated vendored Normal file
View file

@ -0,0 +1,452 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"net"
"sort"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
// ---------------------------------------------------------------------------
// Mongo server encapsulation.
type mongoServer struct {
sync.RWMutex
Addr string
ResolvedAddr string
tcpaddr *net.TCPAddr
unusedSockets []*mongoSocket
liveSockets []*mongoSocket
closed bool
abended bool
sync chan bool
dial dialer
pingValue time.Duration
pingIndex int
pingCount uint32
pingWindow [6]time.Duration
info *mongoServerInfo
}
type dialer struct {
old func(addr net.Addr) (net.Conn, error)
new func(addr *ServerAddr) (net.Conn, error)
}
func (dial dialer) isSet() bool {
return dial.old != nil || dial.new != nil
}
type mongoServerInfo struct {
Master bool
Mongos bool
Tags bson.D
MaxWireVersion int
SetName string
}
var defaultServerInfo mongoServerInfo
func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) *mongoServer {
server := &mongoServer{
Addr: addr,
ResolvedAddr: tcpaddr.String(),
tcpaddr: tcpaddr,
sync: sync,
dial: dial,
info: &defaultServerInfo,
pingValue: time.Hour, // Push it back before an actual ping.
}
go server.pinger(true)
return server
}
var errPoolLimit = errors.New("per-server connection limit reached")
var errServerClosed = errors.New("server was closed")
// AcquireSocket returns a socket for communicating with the server.
// This will attempt to reuse an old connection, if one is available. Otherwise,
// it will establish a new one. The returned socket is owned by the call site,
// and will return to the cache when the socket has its Release method called
// the same number of times as AcquireSocket + Acquire were called for it.
// If the poolLimit argument is greater than zero and the number of sockets in
// use in this server is greater than the provided limit, errPoolLimit is
// returned.
func (server *mongoServer) AcquireSocket(poolLimit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) {
for {
server.Lock()
abended = server.abended
if server.closed {
server.Unlock()
return nil, abended, errServerClosed
}
n := len(server.unusedSockets)
if poolLimit > 0 && len(server.liveSockets)-n >= poolLimit {
server.Unlock()
return nil, false, errPoolLimit
}
if n > 0 {
socket = server.unusedSockets[n-1]
server.unusedSockets[n-1] = nil // Help GC.
server.unusedSockets = server.unusedSockets[:n-1]
info := server.info
server.Unlock()
err = socket.InitialAcquire(info, timeout)
if err != nil {
continue
}
} else {
server.Unlock()
socket, err = server.Connect(timeout)
if err == nil {
server.Lock()
// We've waited for the Connect, see if we got
// closed in the meantime
if server.closed {
server.Unlock()
socket.Release()
socket.Close()
return nil, abended, errServerClosed
}
server.liveSockets = append(server.liveSockets, socket)
server.Unlock()
}
}
return
}
panic("unreachable")
}
// Connect establishes a new connection to the server. This should
// generally be done through server.AcquireSocket().
func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) {
server.RLock()
master := server.info.Master
dial := server.dial
server.RUnlock()
logf("Establishing new connection to %s (timeout=%s)...", server.Addr, timeout)
var conn net.Conn
var err error
switch {
case !dial.isSet():
// Cannot do this because it lacks timeout support. :-(
//conn, err = net.DialTCP("tcp", nil, server.tcpaddr)
conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout)
if tcpconn, ok := conn.(*net.TCPConn); ok {
tcpconn.SetKeepAlive(true)
} else if err == nil {
panic("internal error: obtained TCP connection is not a *net.TCPConn!?")
}
case dial.old != nil:
conn, err = dial.old(server.tcpaddr)
case dial.new != nil:
conn, err = dial.new(&ServerAddr{server.Addr, server.tcpaddr})
default:
panic("dialer is set, but both dial.old and dial.new are nil")
}
if err != nil {
logf("Connection to %s failed: %v", server.Addr, err.Error())
return nil, err
}
logf("Connection to %s established.", server.Addr)
stats.conn(+1, master)
return newSocket(server, conn, timeout), nil
}
// Close forces closing all sockets that are alive, whether
// they're currently in use or not.
func (server *mongoServer) Close() {
server.Lock()
server.closed = true
liveSockets := server.liveSockets
unusedSockets := server.unusedSockets
server.liveSockets = nil
server.unusedSockets = nil
server.Unlock()
logf("Connections to %s closing (%d live sockets).", server.Addr, len(liveSockets))
for i, s := range liveSockets {
s.Close()
liveSockets[i] = nil
}
for i := range unusedSockets {
unusedSockets[i] = nil
}
}
// RecycleSocket puts socket back into the unused cache.
func (server *mongoServer) RecycleSocket(socket *mongoSocket) {
server.Lock()
if !server.closed {
server.unusedSockets = append(server.unusedSockets, socket)
}
server.Unlock()
}
func removeSocket(sockets []*mongoSocket, socket *mongoSocket) []*mongoSocket {
for i, s := range sockets {
if s == socket {
copy(sockets[i:], sockets[i+1:])
n := len(sockets) - 1
sockets[n] = nil
sockets = sockets[:n]
break
}
}
return sockets
}
// AbendSocket notifies the server that the given socket has terminated
// abnormally, and thus should be discarded rather than cached.
func (server *mongoServer) AbendSocket(socket *mongoSocket) {
server.Lock()
server.abended = true
if server.closed {
server.Unlock()
return
}
server.liveSockets = removeSocket(server.liveSockets, socket)
server.unusedSockets = removeSocket(server.unusedSockets, socket)
server.Unlock()
// Maybe just a timeout, but suggest a cluster sync up just in case.
select {
case server.sync <- true:
default:
}
}
func (server *mongoServer) SetInfo(info *mongoServerInfo) {
server.Lock()
server.info = info
server.Unlock()
}
func (server *mongoServer) Info() *mongoServerInfo {
server.Lock()
info := server.info
server.Unlock()
return info
}
func (server *mongoServer) hasTags(serverTags []bson.D) bool {
NextTagSet:
for _, tags := range serverTags {
NextReqTag:
for _, req := range tags {
for _, has := range server.info.Tags {
if req.Name == has.Name {
if req.Value == has.Value {
continue NextReqTag
}
continue NextTagSet
}
}
continue NextTagSet
}
return true
}
return false
}
var pingDelay = 15 * time.Second
func (server *mongoServer) pinger(loop bool) {
var delay time.Duration
if raceDetector {
// This variable is only ever touched by tests.
globalMutex.Lock()
delay = pingDelay
globalMutex.Unlock()
} else {
delay = pingDelay
}
op := queryOp{
collection: "admin.$cmd",
query: bson.D{{"ping", 1}},
flags: flagSlaveOk,
limit: -1,
}
for {
if loop {
time.Sleep(delay)
}
op := op
socket, _, err := server.AcquireSocket(0, delay)
if err == nil {
start := time.Now()
_, _ = socket.SimpleQuery(&op)
delay := time.Now().Sub(start)
server.pingWindow[server.pingIndex] = delay
server.pingIndex = (server.pingIndex + 1) % len(server.pingWindow)
server.pingCount++
var max time.Duration
for i := 0; i < len(server.pingWindow) && uint32(i) < server.pingCount; i++ {
if server.pingWindow[i] > max {
max = server.pingWindow[i]
}
}
socket.Release()
server.Lock()
if server.closed {
loop = false
}
server.pingValue = max
server.Unlock()
logf("Ping for %s is %d ms", server.Addr, max/time.Millisecond)
} else if err == errServerClosed {
return
}
if !loop {
return
}
}
}
type mongoServerSlice []*mongoServer
func (s mongoServerSlice) Len() int {
return len(s)
}
func (s mongoServerSlice) Less(i, j int) bool {
return s[i].ResolvedAddr < s[j].ResolvedAddr
}
func (s mongoServerSlice) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s mongoServerSlice) Sort() {
sort.Sort(s)
}
func (s mongoServerSlice) Search(resolvedAddr string) (i int, ok bool) {
n := len(s)
i = sort.Search(n, func(i int) bool {
return s[i].ResolvedAddr >= resolvedAddr
})
return i, i != n && s[i].ResolvedAddr == resolvedAddr
}
type mongoServers struct {
slice mongoServerSlice
}
func (servers *mongoServers) Search(resolvedAddr string) (server *mongoServer) {
if i, ok := servers.slice.Search(resolvedAddr); ok {
return servers.slice[i]
}
return nil
}
func (servers *mongoServers) Add(server *mongoServer) {
servers.slice = append(servers.slice, server)
servers.slice.Sort()
}
func (servers *mongoServers) Remove(other *mongoServer) (server *mongoServer) {
if i, found := servers.slice.Search(other.ResolvedAddr); found {
server = servers.slice[i]
copy(servers.slice[i:], servers.slice[i+1:])
n := len(servers.slice) - 1
servers.slice[n] = nil // Help GC.
servers.slice = servers.slice[:n]
}
return
}
func (servers *mongoServers) Slice() []*mongoServer {
return ([]*mongoServer)(servers.slice)
}
func (servers *mongoServers) Get(i int) *mongoServer {
return servers.slice[i]
}
func (servers *mongoServers) Len() int {
return len(servers.slice)
}
func (servers *mongoServers) Empty() bool {
return len(servers.slice) == 0
}
// BestFit returns the best guess of what would be the most interesting
// server to perform operations on at this point in time.
func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServer {
var best *mongoServer
for _, next := range servers.slice {
if best == nil {
best = next
best.RLock()
if serverTags != nil && !next.info.Mongos && !best.hasTags(serverTags) {
best.RUnlock()
best = nil
}
continue
}
next.RLock()
swap := false
switch {
case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags):
// Must have requested tags.
case next.info.Master != best.info.Master && mode != Nearest:
// Prefer slaves, unless the mode is PrimaryPreferred.
swap = (mode == PrimaryPreferred) != best.info.Master
case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond:
// Prefer nearest server.
swap = next.pingValue < best.pingValue
case len(next.liveSockets)-len(next.unusedSockets) < len(best.liveSockets)-len(best.unusedSockets):
// Prefer servers with less connections.
swap = true
}
if swap {
best.RUnlock()
best = next
} else {
next.RUnlock()
}
}
if best != nil {
best.RUnlock()
}
return best
}
func absDuration(d time.Duration) time.Duration {
if d < 0 {
return -d
}
return d
}

4722
vendor/gopkg.in/mgo.v2/session.go generated vendored Normal file

File diff suppressed because it is too large Load diff

707
vendor/gopkg.in/mgo.v2/socket.go generated vendored Normal file
View file

@ -0,0 +1,707 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"errors"
"fmt"
"net"
"sync"
"time"
"gopkg.in/mgo.v2/bson"
)
type replyFunc func(err error, reply *replyOp, docNum int, docData []byte)
type mongoSocket struct {
sync.Mutex
server *mongoServer // nil when cached
conn net.Conn
timeout time.Duration
addr string // For debugging only.
nextRequestId uint32
replyFuncs map[uint32]replyFunc
references int
creds []Credential
logout []Credential
cachedNonce string
gotNonce sync.Cond
dead error
serverInfo *mongoServerInfo
}
type queryOpFlags uint32
const (
_ queryOpFlags = 1 << iota
flagTailable
flagSlaveOk
flagLogReplay
flagNoCursorTimeout
flagAwaitData
)
type queryOp struct {
collection string
query interface{}
skip int32
limit int32
selector interface{}
flags queryOpFlags
replyFunc replyFunc
mode Mode
options queryWrapper
hasOptions bool
serverTags []bson.D
}
type queryWrapper struct {
Query interface{} "$query"
OrderBy interface{} "$orderby,omitempty"
Hint interface{} "$hint,omitempty"
Explain bool "$explain,omitempty"
Snapshot bool "$snapshot,omitempty"
ReadPreference bson.D "$readPreference,omitempty"
MaxScan int "$maxScan,omitempty"
MaxTimeMS int "$maxTimeMS,omitempty"
Comment string "$comment,omitempty"
}
func (op *queryOp) finalQuery(socket *mongoSocket) interface{} {
if op.flags&flagSlaveOk != 0 && socket.ServerInfo().Mongos {
var modeName string
switch op.mode {
case Strong:
modeName = "primary"
case Monotonic, Eventual:
modeName = "secondaryPreferred"
case PrimaryPreferred:
modeName = "primaryPreferred"
case Secondary:
modeName = "secondary"
case SecondaryPreferred:
modeName = "secondaryPreferred"
case Nearest:
modeName = "nearest"
default:
panic(fmt.Sprintf("unsupported read mode: %d", op.mode))
}
op.hasOptions = true
op.options.ReadPreference = make(bson.D, 0, 2)
op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"mode", modeName})
if len(op.serverTags) > 0 {
op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"tags", op.serverTags})
}
}
if op.hasOptions {
if op.query == nil {
var empty bson.D
op.options.Query = empty
} else {
op.options.Query = op.query
}
debugf("final query is %#v\n", &op.options)
return &op.options
}
return op.query
}
type getMoreOp struct {
collection string
limit int32
cursorId int64
replyFunc replyFunc
}
type replyOp struct {
flags uint32
cursorId int64
firstDoc int32
replyDocs int32
}
type insertOp struct {
collection string // "database.collection"
documents []interface{} // One or more documents to insert
flags uint32
}
type updateOp struct {
Collection string `bson:"-"` // "database.collection"
Selector interface{} `bson:"q"`
Update interface{} `bson:"u"`
Flags uint32 `bson:"-"`
Multi bool `bson:"multi,omitempty"`
Upsert bool `bson:"upsert,omitempty"`
}
type deleteOp struct {
Collection string `bson:"-"` // "database.collection"
Selector interface{} `bson:"q"`
Flags uint32 `bson:"-"`
Limit int `bson:"limit"`
}
type killCursorsOp struct {
cursorIds []int64
}
type requestInfo struct {
bufferPos int
replyFunc replyFunc
}
func newSocket(server *mongoServer, conn net.Conn, timeout time.Duration) *mongoSocket {
socket := &mongoSocket{
conn: conn,
addr: server.Addr,
server: server,
replyFuncs: make(map[uint32]replyFunc),
}
socket.gotNonce.L = &socket.Mutex
if err := socket.InitialAcquire(server.Info(), timeout); err != nil {
panic("newSocket: InitialAcquire returned error: " + err.Error())
}
stats.socketsAlive(+1)
debugf("Socket %p to %s: initialized", socket, socket.addr)
socket.resetNonce()
go socket.readLoop()
return socket
}
// Server returns the server that the socket is associated with.
// It returns nil while the socket is cached in its respective server.
func (socket *mongoSocket) Server() *mongoServer {
socket.Lock()
server := socket.server
socket.Unlock()
return server
}
// ServerInfo returns details for the server at the time the socket
// was initially acquired.
func (socket *mongoSocket) ServerInfo() *mongoServerInfo {
socket.Lock()
serverInfo := socket.serverInfo
socket.Unlock()
return serverInfo
}
// InitialAcquire obtains the first reference to the socket, either
// right after the connection is made or once a recycled socket is
// being put back in use.
func (socket *mongoSocket) InitialAcquire(serverInfo *mongoServerInfo, timeout time.Duration) error {
socket.Lock()
if socket.references > 0 {
panic("Socket acquired out of cache with references")
}
if socket.dead != nil {
dead := socket.dead
socket.Unlock()
return dead
}
socket.references++
socket.serverInfo = serverInfo
socket.timeout = timeout
stats.socketsInUse(+1)
stats.socketRefs(+1)
socket.Unlock()
return nil
}
// Acquire obtains an additional reference to the socket.
// The socket will only be recycled when it's released as many
// times as it's been acquired.
func (socket *mongoSocket) Acquire() (info *mongoServerInfo) {
socket.Lock()
if socket.references == 0 {
panic("Socket got non-initial acquire with references == 0")
}
// We'll track references to dead sockets as well.
// Caller is still supposed to release the socket.
socket.references++
stats.socketRefs(+1)
serverInfo := socket.serverInfo
socket.Unlock()
return serverInfo
}
// Release decrements a socket reference. The socket will be
// recycled once its released as many times as it's been acquired.
func (socket *mongoSocket) Release() {
socket.Lock()
if socket.references == 0 {
panic("socket.Release() with references == 0")
}
socket.references--
stats.socketRefs(-1)
if socket.references == 0 {
stats.socketsInUse(-1)
server := socket.server
socket.Unlock()
socket.LogoutAll()
// If the socket is dead server is nil.
if server != nil {
server.RecycleSocket(socket)
}
} else {
socket.Unlock()
}
}
// SetTimeout changes the timeout used on socket operations.
func (socket *mongoSocket) SetTimeout(d time.Duration) {
socket.Lock()
socket.timeout = d
socket.Unlock()
}
type deadlineType int
const (
readDeadline deadlineType = 1
writeDeadline deadlineType = 2
)
func (socket *mongoSocket) updateDeadline(which deadlineType) {
var when time.Time
if socket.timeout > 0 {
when = time.Now().Add(socket.timeout)
}
whichstr := ""
switch which {
case readDeadline | writeDeadline:
whichstr = "read/write"
socket.conn.SetDeadline(when)
case readDeadline:
whichstr = "read"
socket.conn.SetReadDeadline(when)
case writeDeadline:
whichstr = "write"
socket.conn.SetWriteDeadline(when)
default:
panic("invalid parameter to updateDeadline")
}
debugf("Socket %p to %s: updated %s deadline to %s ahead (%s)", socket, socket.addr, whichstr, socket.timeout, when)
}
// Close terminates the socket use.
func (socket *mongoSocket) Close() {
socket.kill(errors.New("Closed explicitly"), false)
}
func (socket *mongoSocket) kill(err error, abend bool) {
socket.Lock()
if socket.dead != nil {
debugf("Socket %p to %s: killed again: %s (previously: %s)", socket, socket.addr, err.Error(), socket.dead.Error())
socket.Unlock()
return
}
logf("Socket %p to %s: closing: %s (abend=%v)", socket, socket.addr, err.Error(), abend)
socket.dead = err
socket.conn.Close()
stats.socketsAlive(-1)
replyFuncs := socket.replyFuncs
socket.replyFuncs = make(map[uint32]replyFunc)
server := socket.server
socket.server = nil
socket.gotNonce.Broadcast()
socket.Unlock()
for _, replyFunc := range replyFuncs {
logf("Socket %p to %s: notifying replyFunc of closed socket: %s", socket, socket.addr, err.Error())
replyFunc(err, nil, -1, nil)
}
if abend {
server.AbendSocket(socket)
}
}
func (socket *mongoSocket) SimpleQuery(op *queryOp) (data []byte, err error) {
var wait, change sync.Mutex
var replyDone bool
var replyData []byte
var replyErr error
wait.Lock()
op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
change.Lock()
if !replyDone {
replyDone = true
replyErr = err
if err == nil {
replyData = docData
}
}
change.Unlock()
wait.Unlock()
}
err = socket.Query(op)
if err != nil {
return nil, err
}
wait.Lock()
change.Lock()
data = replyData
err = replyErr
change.Unlock()
return data, err
}
func (socket *mongoSocket) Query(ops ...interface{}) (err error) {
if lops := socket.flushLogout(); len(lops) > 0 {
ops = append(lops, ops...)
}
buf := make([]byte, 0, 256)
// Serialize operations synchronously to avoid interrupting
// other goroutines while we can't really be sending data.
// Also, record id positions so that we can compute request
// ids at once later with the lock already held.
requests := make([]requestInfo, len(ops))
requestCount := 0
for _, op := range ops {
debugf("Socket %p to %s: serializing op: %#v", socket, socket.addr, op)
if qop, ok := op.(*queryOp); ok {
if cmd, ok := qop.query.(*findCmd); ok {
debugf("Socket %p to %s: find command: %#v", socket, socket.addr, cmd)
}
}
start := len(buf)
var replyFunc replyFunc
switch op := op.(type) {
case *updateOp:
buf = addHeader(buf, 2001)
buf = addInt32(buf, 0) // Reserved
buf = addCString(buf, op.Collection)
buf = addInt32(buf, int32(op.Flags))
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector)
buf, err = addBSON(buf, op.Selector)
if err != nil {
return err
}
debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.Update)
buf, err = addBSON(buf, op.Update)
if err != nil {
return err
}
case *insertOp:
buf = addHeader(buf, 2002)
buf = addInt32(buf, int32(op.flags))
buf = addCString(buf, op.collection)
for _, doc := range op.documents {
debugf("Socket %p to %s: serializing document for insertion: %#v", socket, socket.addr, doc)
buf, err = addBSON(buf, doc)
if err != nil {
return err
}
}
case *queryOp:
buf = addHeader(buf, 2004)
buf = addInt32(buf, int32(op.flags))
buf = addCString(buf, op.collection)
buf = addInt32(buf, op.skip)
buf = addInt32(buf, op.limit)
buf, err = addBSON(buf, op.finalQuery(socket))
if err != nil {
return err
}
if op.selector != nil {
buf, err = addBSON(buf, op.selector)
if err != nil {
return err
}
}
replyFunc = op.replyFunc
case *getMoreOp:
buf = addHeader(buf, 2005)
buf = addInt32(buf, 0) // Reserved
buf = addCString(buf, op.collection)
buf = addInt32(buf, op.limit)
buf = addInt64(buf, op.cursorId)
replyFunc = op.replyFunc
case *deleteOp:
buf = addHeader(buf, 2006)
buf = addInt32(buf, 0) // Reserved
buf = addCString(buf, op.Collection)
buf = addInt32(buf, int32(op.Flags))
debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector)
buf, err = addBSON(buf, op.Selector)
if err != nil {
return err
}
case *killCursorsOp:
buf = addHeader(buf, 2007)
buf = addInt32(buf, 0) // Reserved
buf = addInt32(buf, int32(len(op.cursorIds)))
for _, cursorId := range op.cursorIds {
buf = addInt64(buf, cursorId)
}
default:
panic("internal error: unknown operation type")
}
setInt32(buf, start, int32(len(buf)-start))
if replyFunc != nil {
request := &requests[requestCount]
request.replyFunc = replyFunc
request.bufferPos = start
requestCount++
}
}
// Buffer is ready for the pipe. Lock, allocate ids, and enqueue.
socket.Lock()
if socket.dead != nil {
dead := socket.dead
socket.Unlock()
debugf("Socket %p to %s: failing query, already closed: %s", socket, socket.addr, socket.dead.Error())
// XXX This seems necessary in case the session is closed concurrently
// with a query being performed, but it's not yet tested:
for i := 0; i != requestCount; i++ {
request := &requests[i]
if request.replyFunc != nil {
request.replyFunc(dead, nil, -1, nil)
}
}
return dead
}
wasWaiting := len(socket.replyFuncs) > 0
// Reserve id 0 for requests which should have no responses.
requestId := socket.nextRequestId + 1
if requestId == 0 {
requestId++
}
socket.nextRequestId = requestId + uint32(requestCount)
for i := 0; i != requestCount; i++ {
request := &requests[i]
setInt32(buf, request.bufferPos+4, int32(requestId))
socket.replyFuncs[requestId] = request.replyFunc
requestId++
}
debugf("Socket %p to %s: sending %d op(s) (%d bytes)", socket, socket.addr, len(ops), len(buf))
stats.sentOps(len(ops))
socket.updateDeadline(writeDeadline)
_, err = socket.conn.Write(buf)
if !wasWaiting && requestCount > 0 {
socket.updateDeadline(readDeadline)
}
socket.Unlock()
return err
}
func fill(r net.Conn, b []byte) error {
l := len(b)
n, err := r.Read(b)
for n != l && err == nil {
var ni int
ni, err = r.Read(b[n:])
n += ni
}
return err
}
// Estimated minimum cost per socket: 1 goroutine + memory for the largest
// document ever seen.
func (socket *mongoSocket) readLoop() {
p := make([]byte, 36) // 16 from header + 20 from OP_REPLY fixed fields
s := make([]byte, 4)
conn := socket.conn // No locking, conn never changes.
for {
err := fill(conn, p)
if err != nil {
socket.kill(err, true)
return
}
totalLen := getInt32(p, 0)
responseTo := getInt32(p, 8)
opCode := getInt32(p, 12)
// Don't use socket.server.Addr here. socket is not
// locked and socket.server may go away.
debugf("Socket %p to %s: got reply (%d bytes)", socket, socket.addr, totalLen)
_ = totalLen
if opCode != 1 {
socket.kill(errors.New("opcode != 1, corrupted data?"), true)
return
}
reply := replyOp{
flags: uint32(getInt32(p, 16)),
cursorId: getInt64(p, 20),
firstDoc: getInt32(p, 28),
replyDocs: getInt32(p, 32),
}
stats.receivedOps(+1)
stats.receivedDocs(int(reply.replyDocs))
socket.Lock()
replyFunc, ok := socket.replyFuncs[uint32(responseTo)]
if ok {
delete(socket.replyFuncs, uint32(responseTo))
}
socket.Unlock()
if replyFunc != nil && reply.replyDocs == 0 {
replyFunc(nil, &reply, -1, nil)
} else {
for i := 0; i != int(reply.replyDocs); i++ {
err := fill(conn, s)
if err != nil {
if replyFunc != nil {
replyFunc(err, nil, -1, nil)
}
socket.kill(err, true)
return
}
b := make([]byte, int(getInt32(s, 0)))
// copy(b, s) in an efficient way.
b[0] = s[0]
b[1] = s[1]
b[2] = s[2]
b[3] = s[3]
err = fill(conn, b[4:])
if err != nil {
if replyFunc != nil {
replyFunc(err, nil, -1, nil)
}
socket.kill(err, true)
return
}
if globalDebug && globalLogger != nil {
m := bson.M{}
if err := bson.Unmarshal(b, m); err == nil {
debugf("Socket %p to %s: received document: %#v", socket, socket.addr, m)
}
}
if replyFunc != nil {
replyFunc(nil, &reply, i, b)
}
// XXX Do bound checking against totalLen.
}
}
socket.Lock()
if len(socket.replyFuncs) == 0 {
// Nothing else to read for now. Disable deadline.
socket.conn.SetReadDeadline(time.Time{})
} else {
socket.updateDeadline(readDeadline)
}
socket.Unlock()
// XXX Do bound checking against totalLen.
}
}
var emptyHeader = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
func addHeader(b []byte, opcode int) []byte {
i := len(b)
b = append(b, emptyHeader...)
// Enough for current opcodes.
b[i+12] = byte(opcode)
b[i+13] = byte(opcode >> 8)
return b
}
func addInt32(b []byte, i int32) []byte {
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24))
}
func addInt64(b []byte, i int64) []byte {
return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24),
byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56))
}
func addCString(b []byte, s string) []byte {
b = append(b, []byte(s)...)
b = append(b, 0)
return b
}
func addBSON(b []byte, doc interface{}) ([]byte, error) {
if doc == nil {
return append(b, 5, 0, 0, 0, 0), nil
}
data, err := bson.Marshal(doc)
if err != nil {
return b, err
}
return append(b, data...), nil
}
func setInt32(b []byte, pos int, i int32) {
b[pos] = byte(i)
b[pos+1] = byte(i >> 8)
b[pos+2] = byte(i >> 16)
b[pos+3] = byte(i >> 24)
}
func getInt32(b []byte, pos int) int32 {
return (int32(b[pos+0])) |
(int32(b[pos+1]) << 8) |
(int32(b[pos+2]) << 16) |
(int32(b[pos+3]) << 24)
}
func getInt64(b []byte, pos int) int64 {
return (int64(b[pos+0])) |
(int64(b[pos+1]) << 8) |
(int64(b[pos+2]) << 16) |
(int64(b[pos+3]) << 24) |
(int64(b[pos+4]) << 32) |
(int64(b[pos+5]) << 40) |
(int64(b[pos+6]) << 48) |
(int64(b[pos+7]) << 56)
}

147
vendor/gopkg.in/mgo.v2/stats.go generated vendored Normal file
View file

@ -0,0 +1,147 @@
// mgo - MongoDB driver for Go
//
// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package mgo
import (
"sync"
)
var stats *Stats
var statsMutex sync.Mutex
func SetStats(enabled bool) {
statsMutex.Lock()
if enabled {
if stats == nil {
stats = &Stats{}
}
} else {
stats = nil
}
statsMutex.Unlock()
}
func GetStats() (snapshot Stats) {
statsMutex.Lock()
snapshot = *stats
statsMutex.Unlock()
return
}
func ResetStats() {
statsMutex.Lock()
debug("Resetting stats")
old := stats
stats = &Stats{}
// These are absolute values:
stats.Clusters = old.Clusters
stats.SocketsInUse = old.SocketsInUse
stats.SocketsAlive = old.SocketsAlive
stats.SocketRefs = old.SocketRefs
statsMutex.Unlock()
return
}
type Stats struct {
Clusters int
MasterConns int
SlaveConns int
SentOps int
ReceivedOps int
ReceivedDocs int
SocketsAlive int
SocketsInUse int
SocketRefs int
}
func (stats *Stats) cluster(delta int) {
if stats != nil {
statsMutex.Lock()
stats.Clusters += delta
statsMutex.Unlock()
}
}
func (stats *Stats) conn(delta int, master bool) {
if stats != nil {
statsMutex.Lock()
if master {
stats.MasterConns += delta
} else {
stats.SlaveConns += delta
}
statsMutex.Unlock()
}
}
func (stats *Stats) sentOps(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SentOps += delta
statsMutex.Unlock()
}
}
func (stats *Stats) receivedOps(delta int) {
if stats != nil {
statsMutex.Lock()
stats.ReceivedOps += delta
statsMutex.Unlock()
}
}
func (stats *Stats) receivedDocs(delta int) {
if stats != nil {
statsMutex.Lock()
stats.ReceivedDocs += delta
statsMutex.Unlock()
}
}
func (stats *Stats) socketsInUse(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SocketsInUse += delta
statsMutex.Unlock()
}
}
func (stats *Stats) socketsAlive(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SocketsAlive += delta
statsMutex.Unlock()
}
}
func (stats *Stats) socketRefs(delta int) {
if stats != nil {
statsMutex.Lock()
stats.SocketRefs += delta
statsMutex.Unlock()
}
}

678
vendor/gopkg.in/ns1/ns1-go.v2/LICENSE.txt generated vendored Normal file
View file

@ -0,0 +1,678 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
APACHE HTTP SERVER SUBCOMPONENTS:
The Apache HTTP Server includes a number of subcomponents with
separate copyright notices and license terms. Your use of the source
code for the these subcomponents is subject to the terms and
conditions of the following licenses.
For the mod_mime_magic component:
/*
* mod_mime_magic: MIME type lookup via file magic numbers
* Copyright (c) 1996-1997 Cisco Systems, Inc.
*
* This software was submitted by Cisco Systems to the Apache Group in July
* 1997. Future revisions and derivatives of this source code must
* acknowledge Cisco Systems as the original contributor of this module.
* All other licensing and usage conditions are those of the Apache Group.
*
* Some of this code is derived from the free version of the file command
* originally posted to comp.sources.unix. Copyright info for that program
* is included below as required.
* ---------------------------------------------------------------------------
* - Copyright (c) Ian F. Darwin, 1987. Written by Ian F. Darwin.
*
* This software is not subject to any license of the American Telephone and
* Telegraph Company or of the Regents of the University of California.
*
* Permission is granted to anyone to use this software for any purpose on any
* computer system, and to alter it and redistribute it freely, subject to
* the following restrictions:
*
* 1. The author is not responsible for the consequences of use of this
* software, no matter how awful, even if they arise from flaws in it.
*
* 2. The origin of this software must not be misrepresented, either by
* explicit claim or by omission. Since few users ever read sources, credits
* must appear in the documentation.
*
* 3. Altered versions must be plainly marked as such, and must not be
* misrepresented as being the original software. Since few users ever read
* sources, credits must appear in the documentation.
*
* 4. This notice may not be removed or altered.
* -------------------------------------------------------------------------
*
*/
For the modules\mappers\mod_imap.c component:
"macmartinized" polygon code copyright 1992 by Eric Haines, erich@eye.com
For the server\util_md5.c component:
/************************************************************************
* NCSA HTTPd Server
* Software Development Group
* National Center for Supercomputing Applications
* University of Illinois at Urbana-Champaign
* 605 E. Springfield, Champaign, IL 61820
* httpd@ncsa.uiuc.edu
*
* Copyright (C) 1995, Board of Trustees of the University of Illinois
*
************************************************************************
*
* md5.c: NCSA HTTPd code which uses the md5c.c RSA Code
*
* Original Code Copyright (C) 1994, Jeff Hostetler, Spyglass, Inc.
* Portions of Content-MD5 code Copyright (C) 1993, 1994 by Carnegie Mellon
* University (see Copyright below).
* Portions of Content-MD5 code Copyright (C) 1991 Bell Communications
* Research, Inc. (Bellcore) (see Copyright below).
* Portions extracted from mpack, John G. Myers - jgm+@cmu.edu
* Content-MD5 Code contributed by Martin Hamilton (martin@net.lut.ac.uk)
*
*/
/* these portions extracted from mpack, John G. Myers - jgm+@cmu.edu */
/* (C) Copyright 1993,1994 by Carnegie Mellon University
* All Rights Reserved.
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without
* fee, provided that the above copyright notice appear in all copies
* and that both that copyright notice and this permission notice
* appear in supporting documentation, and that the name of Carnegie
* Mellon University not be used in advertising or publicity
* pertaining to distribution of the software without specific,
* written prior permission. Carnegie Mellon University makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied
* warranty.
*
* CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO
* THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
* FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
/*
* Copyright (c) 1991 Bell Communications Research, Inc. (Bellcore)
*
* Permission to use, copy, modify, and distribute this material
* for any purpose and without fee is hereby granted, provided
* that the above copyright notice and this permission notice
* appear in all copies, and that the name of Bellcore not be
* used in advertising or publicity pertaining to this
* material without the specific, prior written permission
* of an authorized representative of Bellcore. BELLCORE
* MAKES NO REPRESENTATIONS ABOUT THE ACCURACY OR SUITABILITY
* OF THIS MATERIAL FOR ANY PURPOSE. IT IS PROVIDED "AS IS",
* WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES.
*/
For the srclib\apr\include\apr_md5.h component:
/*
* This is work is derived from material Copyright RSA Data Security, Inc.
*
* The RSA copyright statement and Licence for that original material is
* included below. This is followed by the Apache copyright statement and
* licence for the modifications made to that material.
*/
/* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
rights reserved.
License to copy and use this software is granted provided that it
is identified as the "RSA Data Security, Inc. MD5 Message-Digest
Algorithm" in all material mentioning or referencing this software
or this function.
License is also granted to make and use derivative works provided
that such works are identified as "derived from the RSA Data
Security, Inc. MD5 Message-Digest Algorithm" in all material
mentioning or referencing the derived work.
RSA Data Security, Inc. makes no representations concerning either
the merchantability of this software or the suitability of this
software for any particular purpose. It is provided "as is"
without express or implied warranty of any kind.
These notices must be retained in any copies of any part of this
documentation and/or software.
*/
For the srclib\apr\passwd\apr_md5.c component:
/*
* This is work is derived from material Copyright RSA Data Security, Inc.
*
* The RSA copyright statement and Licence for that original material is
* included below. This is followed by the Apache copyright statement and
* licence for the modifications made to that material.
*/
/* MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm
*/
/* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
rights reserved.
License to copy and use this software is granted provided that it
is identified as the "RSA Data Security, Inc. MD5 Message-Digest
Algorithm" in all material mentioning or referencing this software
or this function.
License is also granted to make and use derivative works provided
that such works are identified as "derived from the RSA Data
Security, Inc. MD5 Message-Digest Algorithm" in all material
mentioning or referencing the derived work.
RSA Data Security, Inc. makes no representations concerning either
the merchantability of this software or the suitability of this
software for any particular purpose. It is provided "as is"
without express or implied warranty of any kind.
These notices must be retained in any copies of any part of this
documentation and/or software.
*/
/*
* The apr_md5_encode() routine uses much code obtained from the FreeBSD 3.0
* MD5 crypt() function, which is licenced as follows:
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
* <phk@login.dknet.dk> wrote this file. As long as you retain this notice you
* can do whatever you want with this stuff. If we meet some day, and you think
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*/
For the srclib\apr-util\crypto\apr_md4.c component:
* This is derived from material copyright RSA Data Security, Inc.
* Their notice is reproduced below in its entirety.
*
* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
* rights reserved.
*
* License to copy and use this software is granted provided that it
* is identified as the "RSA Data Security, Inc. MD4 Message-Digest
* Algorithm" in all material mentioning or referencing this software
* or this function.
*
* License is also granted to make and use derivative works provided
* that such works are identified as "derived from the RSA Data
* Security, Inc. MD4 Message-Digest Algorithm" in all material
* mentioning or referencing the derived work.
*
* RSA Data Security, Inc. makes no representations concerning either
* the merchantability of this software or the suitability of this
* software for any particular purpose. It is provided "as is"
* without express or implied warranty of any kind.
*
* These notices must be retained in any copies of any part of this
* documentation and/or software.
*/
For the srclib\apr-util\include\apr_md4.h component:
*
* This is derived from material copyright RSA Data Security, Inc.
* Their notice is reproduced below in its entirety.
*
* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All
* rights reserved.
*
* License to copy and use this software is granted provided that it
* is identified as the "RSA Data Security, Inc. MD4 Message-Digest
* Algorithm" in all material mentioning or referencing this software
* or this function.
*
* License is also granted to make and use derivative works provided
* that such works are identified as "derived from the RSA Data
* Security, Inc. MD4 Message-Digest Algorithm" in all material
* mentioning or referencing the derived work.
*
* RSA Data Security, Inc. makes no representations concerning either
* the merchantability of this software or the suitability of this
* software for any particular purpose. It is provided "as is"
* without express or implied warranty of any kind.
*
* These notices must be retained in any copies of any part of this
* documentation and/or software.
*/
For the srclib\apr-util\test\testdbm.c component:
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2000-2002 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" must
* not be used to endorse or promote products derived from this
* software without prior written permission. For written
* permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* nor may "Apache" appear in their name, without prior written
* permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
* This file came from the SDBM package (written by oz@nexus.yorku.ca).
* That package was under public domain. This file has been ported to
* APR, updated to ANSI C and other, newer idioms, and added to the Apache
* codebase under the above copyright and license.
*/
For the srclib\apr-util\test\testmd4.c component:
*
* This is derived from material copyright RSA Data Security, Inc.
* Their notice is reproduced below in its entirety.
*
* Copyright (C) 1990-2, RSA Data Security, Inc. Created 1990. All
* rights reserved.
*
* RSA Data Security, Inc. makes no representations concerning either
* the merchantability of this software or the suitability of this
* software for any particular purpose. It is provided "as is"
* without express or implied warranty of any kind.
*
* These notices must be retained in any copies of any part of this
* documentation and/or software.
*/
For the srclib\apr-util\xml\expat\conftools\install-sh component:
#
# install - install a program, script, or datafile
# This comes from X11R5 (mit/util/scripts/install.sh).
#
# Copyright 1991 by the Massachusetts Institute of Technology
#
# Permission to use, copy, modify, distribute, and sell this software and its
# documentation for any purpose is hereby granted without fee, provided that
# the above copyright notice appear in all copies and that both that
# copyright notice and this permission notice appear in supporting
# documentation, and that the name of M.I.T. not be used in advertising or
# publicity pertaining to distribution of the software without specific,
# written prior permission. M.I.T. makes no representations about the
# suitability of this software for any purpose. It is provided "as is"
# without express or implied warranty.
#
For the srclib\pcre\install-sh component:
#
# Copyright 1991 by the Massachusetts Institute of Technology
#
# Permission to use, copy, modify, distribute, and sell this software and its
# documentation for any purpose is hereby granted without fee, provided that
# the above copyright notice appear in all copies and that both that
# copyright notice and this permission notice appear in supporting
# documentation, and that the name of M.I.T. not be used in advertising or
# publicity pertaining to distribution of the software without specific,
# written prior permission. M.I.T. makes no representations about the
# suitability of this software for any purpose. It is provided "as is"
# without express or implied warranty.
For the pcre component:
PCRE LICENCE
------------
PCRE is a library of functions to support regular expressions whose syntax
and semantics are as close as possible to those of the Perl 5 language.
Written by: Philip Hazel <ph10@cam.ac.uk>
University of Cambridge Computing Service,
Cambridge, England. Phone: +44 1223 334714.
Copyright (c) 1997-2001 University of Cambridge
Permission is granted to anyone to use this software for any purpose on any
computer system, and to redistribute it freely, subject to the following
restrictions:
1. This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
2. The origin of this software must not be misrepresented, either by
explicit claim or by omission. In practice, this means that if you use
PCRE in software which you distribute to others, commercially or
otherwise, you must put a sentence like this
Regular expression support is provided by the PCRE library package,
which is open source software, written by Philip Hazel, and copyright
by the University of Cambridge, England.
somewhere reasonably visible in your documentation and in any relevant
files or online help data or similar. A reference to the ftp site for
the source, that is, to
ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/
should also be given in the documentation.
3. Altered versions must be plainly marked as such, and must not be
misrepresented as being the original software.
4. If PCRE is embedded in any software that is released under the GNU
General Purpose Licence (GPL), or Lesser General Purpose Licence (LGPL),
then the terms of that licence shall supersede any condition above with
which it is incompatible.
The documentation for PCRE, supplied in the "doc" directory, is distributed
under the same terms as the software itself.
End PCRE LICENCE
For the test\zb.c component:
/* ZeusBench V1.01
===============
This program is Copyright (C) Zeus Technology Limited 1996.
This program may be used and copied freely providing this copyright notice
is not removed.
This software is provided "as is" and any express or implied waranties,
including but not limited to, the implied warranties of merchantability and
fitness for a particular purpose are disclaimed. In no event shall
Zeus Technology Ltd. be liable for any direct, indirect, incidental, special,
exemplary, or consequential damaged (including, but not limited to,
procurement of substitute good or services; loss of use, data, or profits;
or business interruption) however caused and on theory of liability. Whether
in contract, strict liability or tort (including negligence or otherwise)
arising in any way out of the use of this software, even if advised of the
possibility of such damage.
Written by Adam Twiss (adam@zeus.co.uk). March 1996
Thanks to the following people for their input:
Mike Belshe (mbelshe@netscape.com)
Michael Campanella (campanella@stevms.enet.dec.com)
*/
For the expat xml parser component:
Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd
and Clark Cooper
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
====================================================================

8
vendor/gopkg.in/ns1/ns1-go.v2/doc.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
// Package ns1 is the NS1 golang SDK.
//
// To understand the REST models and terminology,
// please visit the ns1 web page:
//
// https://ns1.com/
//
package ns1

143
vendor/gopkg.in/ns1/ns1-go.v2/rest/account_apikey.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
package rest
import (
"errors"
"fmt"
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/account"
)
// APIKeysService handles 'account/apikeys' endpoint.
type APIKeysService service
// List returns all api keys in the account.
//
// NS1 API docs: https://ns1.com/api/#apikeys-get
func (s *APIKeysService) List() ([]*account.APIKey, *http.Response, error) {
req, err := s.client.NewRequest("GET", "account/apikeys", nil)
if err != nil {
return nil, nil, err
}
kl := []*account.APIKey{}
resp, err := s.client.Do(req, &kl)
if err != nil {
return nil, resp, err
}
return kl, resp, nil
}
// Get returns details of an api key, including permissions, for a single API Key.
// Note: do not use the API Key itself as the keyid in the URL — use the id of the key.
//
// NS1 API docs: https://ns1.com/api/#apikeys-id-get
func (s *APIKeysService) Get(keyID string) (*account.APIKey, *http.Response, error) {
path := fmt.Sprintf("account/apikeys/%s", keyID)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var a account.APIKey
resp, err := s.client.Do(req, &a)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "unknown api key" {
return nil, resp, ErrKeyMissing
}
default:
return nil, resp, err
}
}
return &a, resp, nil
}
// Create takes a *APIKey and creates a new account apikey.
//
// NS1 API docs: https://ns1.com/api/#apikeys-put
func (s *APIKeysService) Create(a *account.APIKey) (*http.Response, error) {
req, err := s.client.NewRequest("PUT", "account/apikeys", &a)
if err != nil {
return nil, err
}
// Update account fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &a)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == fmt.Sprintf("api key with name \"%s\" exists", a.Name) {
return resp, ErrKeyExists
}
default:
return resp, err
}
}
return resp, nil
}
// Update changes the name or access rights for an API Key.
//
// NS1 API docs: https://ns1.com/api/#apikeys-id-post
func (s *APIKeysService) Update(a *account.APIKey) (*http.Response, error) {
path := fmt.Sprintf("account/apikeys/%s", a.ID)
req, err := s.client.NewRequest("POST", path, &a)
if err != nil {
return nil, err
}
// Update apikey fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &a)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "unknown api key" {
return resp, ErrKeyMissing
}
default:
return resp, err
}
}
return resp, nil
}
// Delete deletes an apikey.
//
// NS1 API docs: https://ns1.com/api/#apikeys-id-delete
func (s *APIKeysService) Delete(keyID string) (*http.Response, error) {
path := fmt.Sprintf("account/apikeys/%s", keyID)
req, err := s.client.NewRequest("DELETE", path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "unknown api key" {
return resp, ErrKeyMissing
}
default:
return resp, err
}
}
return resp, nil
}
var (
// ErrKeyExists bundles PUT create error.
ErrKeyExists = errors.New("Key already exists.")
// ErrKeyMissing bundles GET/POST/DELETE error.
ErrKeyMissing = errors.New("Key does not exist.")
)

46
vendor/gopkg.in/ns1/ns1-go.v2/rest/account_setting.go generated vendored Normal file
View file

@ -0,0 +1,46 @@
package rest
import (
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/account"
)
// SettingsService handles 'account/settings' endpoint.
type SettingsService service
// Get returns the basic contact details associated with the account.
//
// NS1 API docs: https://ns1.com/api/#settings-get
func (s *SettingsService) Get() (*account.Setting, *http.Response, error) {
req, err := s.client.NewRequest("GET", "account/settings", nil)
if err != nil {
return nil, nil, err
}
var us account.Setting
resp, err := s.client.Do(req, &us)
if err != nil {
return nil, resp, err
}
return &us, resp, nil
}
// Update changes most of the basic contact details, except customerid.
//
// NS1 API docs: https://ns1.com/api/#settings-post
func (s *SettingsService) Update(us *account.Setting) (*http.Response, error) {
req, err := s.client.NewRequest("POST", "account/settings", &us)
if err != nil {
return nil, err
}
// Update usagewarnings fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &us)
if err != nil {
return resp, err
}
return resp, nil
}

142
vendor/gopkg.in/ns1/ns1-go.v2/rest/account_team.go generated vendored Normal file
View file

@ -0,0 +1,142 @@
package rest
import (
"errors"
"fmt"
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/account"
)
// TeamsService handles 'account/teams' endpoint.
type TeamsService service
// List returns all teams in the account.
//
// NS1 API docs: https://ns1.com/api/#teams-get
func (s *TeamsService) List() ([]*account.Team, *http.Response, error) {
req, err := s.client.NewRequest("GET", "account/teams", nil)
if err != nil {
return nil, nil, err
}
tl := []*account.Team{}
resp, err := s.client.Do(req, &tl)
if err != nil {
return nil, resp, err
}
return tl, resp, nil
}
// Get returns details of a single team.
//
// NS1 API docs: https://ns1.com/api/#teams-id-get
func (s *TeamsService) Get(id string) (*account.Team, *http.Response, error) {
path := fmt.Sprintf("account/teams/%s", id)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var t account.Team
resp, err := s.client.Do(req, &t)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "Unknown team id" {
return nil, resp, ErrTeamMissing
}
default:
return nil, resp, err
}
}
return &t, resp, nil
}
// Create takes a *Team and creates a new account team.
//
// NS1 API docs: https://ns1.com/api/#teams-put
func (s *TeamsService) Create(t *account.Team) (*http.Response, error) {
req, err := s.client.NewRequest("PUT", "account/teams", &t)
if err != nil {
return nil, err
}
// Update team fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &t)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == fmt.Sprintf("team with name \"%s\" exists", t.Name) {
return resp, ErrTeamExists
}
default:
return resp, err
}
}
return resp, nil
}
// Update changes the name or access rights for a team.
//
// NS1 API docs: https://ns1.com/api/#teams-id-post
func (s *TeamsService) Update(t *account.Team) (*http.Response, error) {
path := fmt.Sprintf("account/teams/%s", t.ID)
req, err := s.client.NewRequest("POST", path, &t)
if err != nil {
return nil, err
}
// Update team fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &t)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "unknown team id" {
return resp, ErrTeamMissing
}
default:
return resp, err
}
}
return resp, nil
}
// Delete deletes a team.
//
// NS1 API docs: https://ns1.com/api/#teams-id-delete
func (s *TeamsService) Delete(id string) (*http.Response, error) {
path := fmt.Sprintf("account/teams/%s", id)
req, err := s.client.NewRequest("DELETE", path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "unknown team id" {
return resp, ErrTeamMissing
}
default:
return resp, err
}
}
return resp, nil
}
var (
// ErrTeamExists bundles PUT create error.
ErrTeamExists = errors.New("Team already exists.")
// ErrTeamMissing bundles GET/POST/DELETE error.
ErrTeamMissing = errors.New("Team does not exist.")
)

142
vendor/gopkg.in/ns1/ns1-go.v2/rest/account_user.go generated vendored Normal file
View file

@ -0,0 +1,142 @@
package rest
import (
"errors"
"fmt"
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/account"
)
// UsersService handles 'account/users' endpoint.
type UsersService service
// List returns all users in the account.
//
// NS1 API docs: https://ns1.com/api/#users-get
func (s *UsersService) List() ([]*account.User, *http.Response, error) {
req, err := s.client.NewRequest("GET", "account/users", nil)
if err != nil {
return nil, nil, err
}
ul := []*account.User{}
resp, err := s.client.Do(req, &ul)
if err != nil {
return nil, resp, err
}
return ul, resp, nil
}
// Get returns details of a single user.
//
// NS1 API docs: https://ns1.com/api/#users-user-get
func (s *UsersService) Get(username string) (*account.User, *http.Response, error) {
path := fmt.Sprintf("account/users/%s", username)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var u account.User
resp, err := s.client.Do(req, &u)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "Unknown user" {
return nil, resp, ErrUserMissing
}
default:
return nil, resp, err
}
}
return &u, resp, nil
}
// Create takes a *User and creates a new account user.
//
// NS1 API docs: https://ns1.com/api/#users-put
func (s *UsersService) Create(u *account.User) (*http.Response, error) {
req, err := s.client.NewRequest("PUT", "account/users", &u)
if err != nil {
return nil, err
}
// Update user fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &u)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "request failed:Login Name is already in use." {
return resp, ErrUserExists
}
default:
return resp, err
}
}
return resp, nil
}
// Update change contact details, notification settings, or access rights for a user.
//
// NS1 API docs: https://ns1.com/api/#users-user-post
func (s *UsersService) Update(u *account.User) (*http.Response, error) {
path := fmt.Sprintf("account/users/%s", u.Username)
req, err := s.client.NewRequest("POST", path, &u)
if err != nil {
return nil, err
}
// Update user fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &u)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "Unknown user" {
return resp, ErrUserMissing
}
default:
return resp, err
}
}
return resp, nil
}
// Delete deletes a user.
//
// NS1 API docs: https://ns1.com/api/#users-user-delete
func (s *UsersService) Delete(username string) (*http.Response, error) {
path := fmt.Sprintf("account/users/%s", username)
req, err := s.client.NewRequest("DELETE", path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "Unknown user" {
return resp, ErrUserMissing
}
default:
return resp, err
}
}
return resp, nil
}
var (
// ErrUserExists bundles PUT create error.
ErrUserExists = errors.New("User already exists.")
// ErrUserMissing bundles GET/POST/DELETE error.
ErrUserMissing = errors.New("User does not exist.")
)

47
vendor/gopkg.in/ns1/ns1-go.v2/rest/account_warning.go generated vendored Normal file
View file

@ -0,0 +1,47 @@
package rest
import (
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/account"
)
// WarningsService handles 'account/usagewarnings' endpoint.
type WarningsService service
// Get returns toggles and thresholds used when sending overage warning
// alert messages to users with billing notifications enabled.
//
// NS1 API docs: https://ns1.com/api/#usagewarnings-get
func (s *WarningsService) Get() (*account.UsageWarning, *http.Response, error) {
req, err := s.client.NewRequest("GET", "account/usagewarnings", nil)
if err != nil {
return nil, nil, err
}
var uw account.UsageWarning
resp, err := s.client.Do(req, &uw)
if err != nil {
return nil, resp, err
}
return &uw, resp, nil
}
// Update changes alerting toggles and thresholds for overage warning alert messages.
//
// NS1 API docs: https://ns1.com/api/#usagewarnings-post
func (s *WarningsService) Update(uw *account.UsageWarning) (*http.Response, error) {
req, err := s.client.NewRequest("POST", "account/usagewarnings", &uw)
if err != nil {
return nil, err
}
// Update usagewarnings fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &uw)
if err != nil {
return resp, err
}
return resp, nil
}

273
vendor/gopkg.in/ns1/ns1-go.v2/rest/client.go generated vendored Normal file
View file

@ -0,0 +1,273 @@
package rest
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"time"
)
const (
clientVersion = "2.0.0"
defaultEndpoint = "https://api.nsone.net/v1/"
defaultUserAgent = "go-ns1/" + clientVersion
headerAuth = "X-NSONE-Key"
headerRateLimit = "X-Ratelimit-Limit"
headerRateRemaining = "X-Ratelimit-Remaining"
headerRatePeriod = "X-Ratelimit-Period"
)
// Doer is a single method interface that allows a user to extend/augment an http.Client instance.
// Note: http.Client satisfies the Doer interface.
type Doer interface {
Do(*http.Request) (*http.Response, error)
}
// Client manages communication with the NS1 Rest API.
type Client struct {
// httpClient handles all rest api communication,
// and expects an *http.Client.
httpClient Doer
// NS1 rest endpoint, overrides default if given.
Endpoint *url.URL
// NS1 api key (value for http request header 'X-NSONE-Key').
APIKey string
// NS1 go rest user agent (value for http request header 'User-Agent').
UserAgent string
// Func to call after response is returned in Do
RateLimitFunc func(RateLimit)
// From the excellent github-go client.
common service // Reuse a single struct instead of allocating one for each service on the heap.
// Services used for communicating with different components of the NS1 API.
APIKeys *APIKeysService
DataFeeds *DataFeedsService
DataSources *DataSourcesService
Jobs *JobsService
Notifications *NotificationsService
Records *RecordsService
Settings *SettingsService
Teams *TeamsService
Users *UsersService
Warnings *WarningsService
Zones *ZonesService
}
// NewClient constructs and returns a reference to an instantiated Client.
func NewClient(httpClient Doer, options ...func(*Client)) *Client {
endpoint, _ := url.Parse(defaultEndpoint)
if httpClient == nil {
httpClient = http.DefaultClient
}
c := &Client{
httpClient: httpClient,
Endpoint: endpoint,
RateLimitFunc: defaultRateLimitFunc,
UserAgent: defaultUserAgent,
}
c.common.client = c
c.APIKeys = (*APIKeysService)(&c.common)
c.DataFeeds = (*DataFeedsService)(&c.common)
c.DataSources = (*DataSourcesService)(&c.common)
c.Jobs = (*JobsService)(&c.common)
c.Notifications = (*NotificationsService)(&c.common)
c.Records = (*RecordsService)(&c.common)
c.Settings = (*SettingsService)(&c.common)
c.Teams = (*TeamsService)(&c.common)
c.Users = (*UsersService)(&c.common)
c.Warnings = (*WarningsService)(&c.common)
c.Zones = (*ZonesService)(&c.common)
for _, option := range options {
option(c)
}
return c
}
type service struct {
client *Client
}
// SetHTTPClient sets a Client instances' httpClient.
func SetHTTPClient(httpClient Doer) func(*Client) {
return func(c *Client) { c.httpClient = httpClient }
}
// SetAPIKey sets a Client instances' APIKey.
func SetAPIKey(key string) func(*Client) {
return func(c *Client) { c.APIKey = key }
}
// SetEndpoint sets a Client instances' Endpoint.
func SetEndpoint(endpoint string) func(*Client) {
return func(c *Client) { c.Endpoint, _ = url.Parse(endpoint) }
}
// SetUserAgent sets a Client instances' user agent.
func SetUserAgent(ua string) func(*Client) {
return func(c *Client) { c.UserAgent = ua }
}
// SetRateLimitFunc sets a Client instances' RateLimitFunc.
func SetRateLimitFunc(ratefunc func(rl RateLimit)) func(*Client) {
return func(c *Client) { c.RateLimitFunc = ratefunc }
}
// Do satisfies the Doer interface.
func (c Client) Do(req *http.Request, v interface{}) (*http.Response, error) {
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = CheckResponse(resp)
if err != nil {
return resp, err
}
rl := parseRate(resp)
c.RateLimitFunc(rl)
if v != nil {
// Try to unmarshal body into given type using streaming decoder.
if err := json.NewDecoder(resp.Body).Decode(&v); err != nil {
return nil, err
}
}
return resp, err
}
// NewRequest constructs and returns a http.Request.
func (c *Client) NewRequest(method, path string, body interface{}) (*http.Request, error) {
rel, err := url.Parse(path)
if err != nil {
return nil, err
}
uri := c.Endpoint.ResolveReference(rel)
// Encode body as json
buf := new(bytes.Buffer)
if body != nil {
err := json.NewEncoder(buf).Encode(body)
if err != nil {
return nil, err
}
}
req, err := http.NewRequest(method, uri.String(), buf)
if err != nil {
return nil, err
}
req.Header.Add(headerAuth, c.APIKey)
req.Header.Add("User-Agent", c.UserAgent)
return req, nil
}
// Response wraps stdlib http response.
type Response struct {
*http.Response
}
// Error contains all http responses outside the 2xx range.
type Error struct {
Resp *http.Response
Message string
}
// Satisfy std lib error interface.
func (re *Error) Error() string {
return fmt.Sprintf("%v %v: %d %v", re.Resp.Request.Method, re.Resp.Request.URL, re.Resp.StatusCode, re.Message)
}
// CheckResponse handles parsing of rest api errors. Returns nil if no error.
func CheckResponse(resp *http.Response) error {
if c := resp.StatusCode; c >= 200 && c <= 299 {
return nil
}
restErr := &Error{Resp: resp}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(b) == 0 {
return restErr
}
err = json.Unmarshal(b, restErr)
if err != nil {
return err
}
return restErr
}
// RateLimitFunc is rate limiting strategy for the Client instance.
type RateLimitFunc func(RateLimit)
// RateLimit stores X-Ratelimit-* headers
type RateLimit struct {
Limit int
Remaining int
Period int
}
var defaultRateLimitFunc = func(rl RateLimit) {}
// PercentageLeft returns the ratio of Remaining to Limit as a percentage
func (rl RateLimit) PercentageLeft() int {
return rl.Remaining * 100 / rl.Limit
}
// WaitTime returns the time.Duration ratio of Period to Limit
func (rl RateLimit) WaitTime() time.Duration {
return (time.Second * time.Duration(rl.Period)) / time.Duration(rl.Limit)
}
// WaitTimeRemaining returns the time.Duration ratio of Period to Remaining
func (rl RateLimit) WaitTimeRemaining() time.Duration {
return (time.Second * time.Duration(rl.Period)) / time.Duration(rl.Remaining)
}
// RateLimitStrategySleep sets RateLimitFunc to sleep by WaitTimeRemaining
func (c *Client) RateLimitStrategySleep() {
c.RateLimitFunc = func(rl RateLimit) {
remaining := rl.WaitTimeRemaining()
time.Sleep(remaining)
}
}
// parseRate parses rate related headers from http response.
func parseRate(resp *http.Response) RateLimit {
var rl RateLimit
if limit := resp.Header.Get(headerRateLimit); limit != "" {
rl.Limit, _ = strconv.Atoi(limit)
}
if remaining := resp.Header.Get(headerRateRemaining); remaining != "" {
rl.Remaining, _ = strconv.Atoi(remaining)
}
if period := resp.Header.Get(headerRatePeriod); period != "" {
rl.Period, _ = strconv.Atoi(period)
}
return rl
}

116
vendor/gopkg.in/ns1/ns1-go.v2/rest/data_feed.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package rest
import (
"fmt"
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/data"
)
// DataFeedsService handles 'data/feeds' endpoint.
type DataFeedsService service
// List returns all data feeds connected to a given data source.
//
// NS1 API docs: https://ns1.com/api/#feeds-get
func (s *DataFeedsService) List(sourceID string) ([]*data.Feed, *http.Response, error) {
path := fmt.Sprintf("data/feeds/%s", sourceID)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
dfl := []*data.Feed{}
resp, err := s.client.Do(req, &dfl)
if err != nil {
return nil, resp, err
}
return dfl, resp, nil
}
// Get takes a data source ID and a data feed ID and returns the details of a single data feed
//
// NS1 API docs: https://ns1.com/api/#feeds-feed-get
func (s *DataFeedsService) Get(sourceID string, feedID string) (*data.Feed, *http.Response, error) {
path := fmt.Sprintf("data/feeds/%s/%s", sourceID, feedID)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var df data.Feed
resp, err := s.client.Do(req, &df)
if err != nil {
return nil, resp, err
}
return &df, resp, nil
}
// Create takes a *DataFeed and connects a new data feed to an existing data source.
//
// NS1 API docs: https://ns1.com/api/#feeds-put
func (s *DataFeedsService) Create(sourceID string, df *data.Feed) (*http.Response, error) {
path := fmt.Sprintf("data/feeds/%s", sourceID)
req, err := s.client.NewRequest("PUT", path, &df)
if err != nil {
return nil, err
}
// Update datafeeds' fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &df)
if err != nil {
return resp, err
}
return resp, nil
}
// Update takes a *Feed and modifies and existing data feed.
// Note:
// - The 'data' portion of a feed does not actually
// get updated during a POST. In order to update a feeds'
// 'data' attribute, one must use the Publish method.
// - Both the 'destinations' and 'networks' attributes are
// not updated during a POST.
//
// NS1 API docs: https://ns1.com/api/#feeds-post
func (s *DataFeedsService) Update(sourceID string, df *data.Feed) (*http.Response, error) {
path := fmt.Sprintf("data/feeds/%s/%s", sourceID, df.ID)
req, err := s.client.NewRequest("POST", path, &df)
if err != nil {
return nil, err
}
// Update df instance fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &df)
if err != nil {
return resp, err
}
return resp, nil
}
// Delete takes a data source ID and a data feed ID and disconnects the feed from the data source and all attached destination metadata tables.
//
// NS1 API docs: https://ns1.com/api/#feeds-delete
func (s *DataFeedsService) Delete(sourceID string, feedID string) (*http.Response, error) {
path := fmt.Sprintf("data/feeds/%s/%s", sourceID, feedID)
req, err := s.client.NewRequest("DELETE", path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
return resp, err
}
return resp, nil
}

126
vendor/gopkg.in/ns1/ns1-go.v2/rest/data_source.go generated vendored Normal file
View file

@ -0,0 +1,126 @@
package rest
import (
"fmt"
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/data"
)
// DataSourcesService handles 'data/sources' endpoint.
type DataSourcesService service
// List returns all connected data sources.
//
// NS1 API docs: https://ns1.com/api/#sources-get
func (s *DataSourcesService) List() ([]*data.Source, *http.Response, error) {
req, err := s.client.NewRequest("GET", "data/sources", nil)
if err != nil {
return nil, nil, err
}
dsl := []*data.Source{}
resp, err := s.client.Do(req, &dsl)
if err != nil {
return nil, resp, err
}
return dsl, resp, nil
}
// Get takes an ID returns the details for a single data source.
//
// NS1 API docs: https://ns1.com/api/#sources-source-get
func (s *DataSourcesService) Get(id string) (*data.Source, *http.Response, error) {
path := fmt.Sprintf("data/sources/%s", id)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var ds data.Source
resp, err := s.client.Do(req, &ds)
if err != nil {
return nil, resp, err
}
return &ds, resp, nil
}
// Create takes a *DataSource and creates a new data source.
//
// NS1 API docs: https://ns1.com/api/#sources-put
func (s *DataSourcesService) Create(ds *data.Source) (*http.Response, error) {
req, err := s.client.NewRequest("PUT", "data/sources", &ds)
if err != nil {
return nil, err
}
// Update data sources' fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &ds)
if err != nil {
return resp, err
}
return resp, nil
}
// Update takes a *DataSource modifies basic details of a data source.
// NOTE: This does not 'publish' data. See the Publish method.
//
// NS1 API docs: https://ns1.com/api/#sources-post
func (s *DataSourcesService) Update(ds *data.Source) (*http.Response, error) {
path := fmt.Sprintf("data/sources/%s", ds.ID)
req, err := s.client.NewRequest("POST", path, &ds)
if err != nil {
return nil, err
}
// Update data sources' instance fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &ds)
if err != nil {
return resp, err
}
return resp, nil
}
// Delete takes an ID and removes an existing data source and all connected feeds from the source.
//
// NS1 API docs: https://ns1.com/api/#sources-delete
func (s *DataSourcesService) Delete(id string) (*http.Response, error) {
path := fmt.Sprintf("data/sources/%s", id)
req, err := s.client.NewRequest("DELETE", path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// Publish takes a datasources' id and data to publish.
//
// NS1 API docs: https://ns1.com/api/#feed-post
func (s *DataSourcesService) Publish(dsID string, data interface{}) (*http.Response, error) {
path := fmt.Sprintf("feed/%s", dsID)
req, err := s.client.NewRequest("POST", path, &data)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
return resp, err
}
return resp, nil
}

2
vendor/gopkg.in/ns1/ns1-go.v2/rest/doc.go generated vendored Normal file
View file

@ -0,0 +1,2 @@
// Package rest defines the api services used to communicate with NS1.
package rest

View file

@ -0,0 +1,13 @@
package account
// APIKey wraps an NS1 /account/apikeys resource
type APIKey struct {
// Read-only fields
ID string `json:"id,omitempty"`
Key string `json:"key,omitempty"`
LastAccess int `json:"last_access,omitempty"`
Name string `json:"name"`
TeamIDs []string `json:"teams"`
Permissions PermissionsMap `json:"permissions"`
}

View file

@ -0,0 +1,2 @@
// Package account contains definitions for NS1 apikeys/teams/users/etc.
package account

View file

@ -0,0 +1,44 @@
package account
// PermissionsMap wraps a User's "permissions" attribute
type PermissionsMap struct {
DNS PermissionsDNS `json:"dns"`
Data PermissionsData `json:"data"`
Account PermissionsAccount `json:"account"`
Monitoring PermissionsMonitoring `json:"monitoring"`
}
// PermissionsDNS wraps a User's "permissions.dns" attribute
type PermissionsDNS struct {
ViewZones bool `json:"view_zones"`
ManageZones bool `json:"manage_zones"`
ZonesAllowByDefault bool `json:"zones_allow_by_default"`
ZonesDeny []string `json:"zones_deny"`
ZonesAllow []string `json:"zones_allow"`
}
// PermissionsData wraps a User's "permissions.data" attribute
type PermissionsData struct {
PushToDatafeeds bool `json:"push_to_datafeeds"`
ManageDatasources bool `json:"manage_datasources"`
ManageDatafeeds bool `json:"manage_datafeeds"`
}
// PermissionsAccount wraps a User's "permissions.account" attribute
type PermissionsAccount struct {
ManageUsers bool `json:"manage_users"`
ManagePaymentMethods bool `json:"manage_payment_methods"`
ManagePlan bool `json:"manage_plan"`
ManageTeams bool `json:"manage_teams"`
ManageApikeys bool `json:"manage_apikeys"`
ManageAccountSettings bool `json:"manage_account_settings"`
ViewActivityLog bool `json:"view_activity_log"`
ViewInvoices bool `json:"view_invoices"`
}
// PermissionsMonitoring wraps a User's "permissions.monitoring" attribute
type PermissionsMonitoring struct {
ManageLists bool `json:"manage_lists"`
ManageJobs bool `json:"manage_jobs"`
ViewJobs bool `json:"view_jobs"`
}

View file

@ -0,0 +1,21 @@
package account
// Setting represents an accounts' contact info.
type Setting struct {
CustomerID int `json:"customerid,omitempty"`
FirstName string `json:"firstname,omitempty"`
LastName string `json:"lastname,omitempty"`
Company string `json:"company,omitempty"`
Phone string `json:"phone,omitempty"`
Email string `json:"email,omitempty"`
Address Address `json:"address,omitempty"`
}
// Address for Setting struct.
type Address struct {
Country string `json:"country,omitempty"`
Street string `json:"street,omitempty"`
State string `json:"state,omitempty"`
City string `json:"city,omitempty"`
Postal string `json:"postalcode,omitempty"`
}

View file

@ -0,0 +1,8 @@
package account
// Team wraps an NS1 /accounts/teams resource
type Team struct {
ID string `json:"id,omitempty"`
Name string `json:"name"`
Permissions PermissionsMap `json:"permissions"`
}

View file

@ -0,0 +1,19 @@
package account
// User wraps an NS1 /account/users resource
type User struct {
// Read-only fields
LastAccess float64 `json:"last_access"`
Name string `json:"name"`
Username string `json:"username"`
Email string `json:"email"`
TeamIDs []string `json:"teams"`
Notify NotificationSettings `json:"notify"`
Permissions PermissionsMap `json:"permissions"`
}
// NotificationSettings wraps a User's "notify" attribute
type NotificationSettings struct {
Billing bool `json:"billing"`
}

View file

@ -0,0 +1,17 @@
package account
// UsageWarning wraps an NS1 /account/usagewarnings resource
type UsageWarning struct {
Records Warning `json:"records"`
Queries Warning `json:"queries"`
}
// Warning contains alerting toggles and thresholds for overage warning alert messages.
// First thresholds must be smaller than Second ones and all thresholds
// must be percentages between 0 and 100.
type Warning struct {
Send bool `json:"send_warnings"`
First int `json:"warning_1"`
Second int `json:"warning_2"`
}

2
vendor/gopkg.in/ns1/ns1-go.v2/rest/model/data/doc.go generated vendored Normal file
View file

@ -0,0 +1,2 @@
// Package data contains definitions for NS1 metadata/sources/feeds/etc.
package data

38
vendor/gopkg.in/ns1/ns1-go.v2/rest/model/data/feed.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package data
// Destination is the target resource the receives data from a feed/source.
type Destination struct {
ID string `json:"destid"`
// All destinations must point to a record.
RecordID string `json:"record"`
// Type is the 'level' at which to apply the filters(on the targeted record).
// Options:
// - answer (highest precedence)
// - region
// - record (lowest precendence)
Type string `json:"desttype"`
SourceID string `json:"-"`
}
// NewDestination returns an empty feed destination.
func NewDestination() *Destination {
return &Destination{}
}
// Feed wraps an NS1 /data/feeds resource
type Feed struct {
ID string `json:"id,omitempty"`
Name string `json:"name"`
Config Config `json:"config,omitempty"`
Data Meta `json:"data,omitempty"`
SourceID string
}
// NewFeed returns a data feed with given name and config.
func NewFeed(name string, cfg Config) *Feed {
return &Feed{Name: name, Config: cfg}
}

127
vendor/gopkg.in/ns1/ns1-go.v2/rest/model/data/meta.go generated vendored Normal file
View file

@ -0,0 +1,127 @@
package data
// FeedPtr represents the dynamic metadata value in which a feed is providing the value.
type FeedPtr struct {
FeedID string `json:"feed,omitempty"`
}
// Meta contains information on an entities metadata table. Metadata key/value
// pairs are used by a records' filter pipeline during a dns query.
// All values can be a feed id as well, indicating real-time updates of these values.
// Structure/Precendence of metadata tables:
// - Record
// - Meta <- lowest precendence in filter
// - Region(s)
// - Meta <- middle precedence in filter chain
// - ...
// - Answer(s)
// - Meta <- highest precedence in filter chain
// - ...
// - ...
type Meta struct {
// STATUS
// Indicates whether or not entity is considered 'up'
// bool or FeedPtr.
Up interface{} `json:"up,omitempty"`
// Indicates the number of active connections.
// Values must be positive.
// int or FeedPtr.
Connections interface{} `json:"connections,omitempty"`
// Indicates the number of active requests (HTTP or otherwise).
// Values must be positive.
// int or FeedPtr.
Requests interface{} `json:"requests,omitempty"`
// Indicates the "load average".
// Values must be positive, and will be rounded to the nearest tenth.
// float64 or FeedPtr.
LoadAvg interface{} `json:"loadavg,omitempty"`
// The Job ID of a Pulsar telemetry gathering job and routing granularities
// to associate with.
// string or FeedPtr.
Pulsar interface{} `json:"pulsar,omitempty"`
// GEOGRAPHICAL
// Must be between -180.0 and +180.0 where negative
// indicates South and positive indicates North.
// e.g., the longitude of the datacenter where a server resides.
// float64 or FeedPtr.
Latitude interface{} `json:"latitude,omitempty"`
// Must be between -180.0 and +180.0 where negative
// indicates West and positive indicates East.
// e.g., the longitude of the datacenter where a server resides.
// float64 or FeedPtr.
Longitude interface{} `json:"longitude,omitempty"`
// Valid geographic regions are: 'US-EAST', 'US-CENTRAL', 'US-WEST',
// 'EUROPE', 'ASIAPAC', 'SOUTH-AMERICA', 'AFRICA'.
// e.g., the rough geographic location of the Datacenter where a server resides.
// []string or FeedPtr.
Georegion interface{} `json:"georegion,omitempty"`
// Countr(ies) must be specified as ISO3166 2-character country code(s).
// []string or FeedPtr.
Country interface{} `json:"country,omitempty"`
// State(s) must be specified as standard 2-character state code(s).
// []string or FeedPtr.
USState interface{} `json:"us_state,omitempty"`
// Canadian Province(s) must be specified as standard 2-character province
// code(s).
// []string or FeedPtr.
CAProvince interface{} `json:"ca_province,omitempty"`
// INFORMATIONAL
// Notes to indicate any necessary details for operators.
// Up to 256 characters in length.
// string or FeedPtr.
Note interface{} `json:"note,omitempty"`
// NETWORK
// IP (v4 and v6) prefixes in CIDR format ("a.b.c.d/mask").
// May include up to 1000 prefixes.
// e.g., "1.2.3.4/24"
// []string or FeedPtr.
IPPrefixes interface{} `json:"ip_prefixes,omitempty"`
// Autonomous System (AS) number(s).
// May include up to 1000 AS numbers.
// []string or FeedPtr.
ASN interface{} `json:"asn,omitempty"`
// TRAFFIC
// Indicates the "priority tier".
// Lower values indicate higher priority.
// Values must be positive.
// int or FeedPtr.
Priority interface{} `json:"priority,omitempty"`
// Indicates a weight.
// Filters that use weights normalize them.
// Any positive values are allowed.
// Values between 0 and 100 are recommended for simplicity's sake.
// float64 or FeedPtr.
Weight interface{} `json:"weight,omitempty"`
// Indicates a "low watermark" to use for load shedding.
// The value should depend on the metric used to determine
// load (e.g., loadavg, connections, etc).
// int or FeedPtr.
LowWatermark interface{} `json:"low_watermark,omitempty"`
// Indicates a "high watermark" to use for load shedding.
// The value should depend on the metric used to determine
// load (e.g., loadavg, connections, etc).
// int or FeedPtr.
HighWatermark interface{} `json:"high_watermark,omitempty"`
}

View file

@ -0,0 +1,10 @@
package data
// Region is a metadata table with a name/key.
// Can be thought of as metadata groupings.
type Region struct {
Meta Meta `json:"meta,omitempty"`
}
// Regions is simply a mapping of Regions inside a record.
type Regions map[string]Region

View file

@ -0,0 +1,28 @@
package data
// Config is a flat mapping where values are simple (no slices/maps).
type Config map[string]interface{}
// Source wraps an NS1 /data/sources resource
type Source struct {
ID string `json:"id,omitempty"`
// Human readable name of the source.
Name string `json:"name"`
Type string `json:"sourcetype"`
Config Config `json:"config,omitempty"`
Status string `json:"status,omitempty"`
Feeds []*Feed `json:"feeds,omitempty"`
}
// NewSource takes a name and type t.
func NewSource(name string, t string) *Source {
return &Source{
Name: name,
Type: t,
Config: Config{},
Feeds: []*Feed{},
}
}

101
vendor/gopkg.in/ns1/ns1-go.v2/rest/model/dns/answer.go generated vendored Normal file
View file

@ -0,0 +1,101 @@
package dns
import (
"fmt"
"strconv"
"strings"
"gopkg.in/ns1/ns1-go.v2/rest/model/data"
)
// Answer wraps the values of a Record's "filters" attribute
type Answer struct {
Meta *data.Meta `json:"meta,omitempty"`
// Answer response data. eg:
// Av4: ["1.1.1.1"]
// Av6: ["2001:db8:85a3::8a2e:370:7334"]
// MX: [10, "2.2.2.2"]
Rdata []string `json:"answer"`
// Region(grouping) that answer belongs to.
RegionName string `json:"region,omitempty"`
}
func (a Answer) String() string {
return strings.Trim(fmt.Sprint(a.Rdata), "[]")
}
// SetRegion associates a region with this answer.
func (a *Answer) SetRegion(name string) {
a.RegionName = name
}
// NewAnswer creates a generic Answer with given rdata.
func NewAnswer(rdata []string) *Answer {
return &Answer{
Meta: &data.Meta{},
Rdata: rdata,
}
}
// NewAv4Answer creates an Answer for A record.
func NewAv4Answer(host string) *Answer {
return &Answer{
Meta: &data.Meta{},
Rdata: []string{host},
}
}
// NewAv6Answer creates an Answer for AAAA record.
func NewAv6Answer(host string) *Answer {
return &Answer{
Meta: &data.Meta{},
Rdata: []string{host},
}
}
// NewALIASAnswer creates an Answer for ALIAS record.
func NewALIASAnswer(host string) *Answer {
return &Answer{
Meta: &data.Meta{},
Rdata: []string{host},
}
}
// NewCNAMEAnswer creates an Answer for CNAME record.
func NewCNAMEAnswer(name string) *Answer {
return &Answer{
Meta: &data.Meta{},
Rdata: []string{name},
}
}
// NewTXTAnswer creates an Answer for TXT record.
func NewTXTAnswer(text string) *Answer {
return &Answer{
Meta: &data.Meta{},
Rdata: []string{text},
}
}
// NewMXAnswer creates an Answer for MX record.
func NewMXAnswer(pri int, host string) *Answer {
return &Answer{
Meta: &data.Meta{},
Rdata: []string{strconv.Itoa(pri), host},
}
}
// NewSRVAnswer creates an Answer for SRV record.
func NewSRVAnswer(priority, weight, port int, target string) *Answer {
return &Answer{
Meta: &data.Meta{},
Rdata: []string{
strconv.Itoa(priority),
strconv.Itoa(weight),
strconv.Itoa(port),
target,
},
}
}

2
vendor/gopkg.in/ns1/ns1-go.v2/rest/model/dns/doc.go generated vendored Normal file
View file

@ -0,0 +1,2 @@
// Package dns contains definitions for NS1 zones/records/answers/etc.
package dns

76
vendor/gopkg.in/ns1/ns1-go.v2/rest/model/dns/record.go generated vendored Normal file
View file

@ -0,0 +1,76 @@
package dns
import (
"fmt"
"strings"
"gopkg.in/ns1/ns1-go.v2/rest/model/data"
"gopkg.in/ns1/ns1-go.v2/rest/model/filter"
)
// Record wraps an NS1 /zone/{zone}/{domain}/{type} resource
type Record struct {
Meta *data.Meta `json:"meta,omitempty"`
ID string `json:"id,omitempty"`
Zone string `json:"zone"`
Domain string `json:"domain"`
Type string `json:"type"`
Link string `json:"link,omitempty"`
TTL int `json:"ttl,omitempty"`
UseClientSubnet *bool `json:"use_client_subnet,omitempty"`
// Answers must all be of the same type as the record.
Answers []*Answer `json:"answers"`
// The records' filter chain.
Filters []*filter.Filter `json:"filters,omitempty"`
// The records' regions.
Regions data.Regions `json:"regions,omitempty"`
}
func (r Record) String() string {
return fmt.Sprintf("%s %s", r.Domain, r.Type)
}
// NewRecord takes a zone, domain and record type t and creates a *Record with
// UseClientSubnet: true & empty Answers.
func NewRecord(zone string, domain string, t string) *Record {
if !strings.HasSuffix(domain, zone) {
domain = fmt.Sprintf("%s.%s", domain, zone)
}
return &Record{
Meta: &data.Meta{},
Zone: zone,
Domain: domain,
Type: t,
Answers: []*Answer{},
Regions: data.Regions{},
}
}
// LinkTo sets a Record Link to an FQDN.
// to is the FQDN of the target record whose config should be used. Does
// not have to be in the same zone.
func (r *Record) LinkTo(to string) {
r.Meta = nil
r.Answers = []*Answer{}
r.Link = to
}
// AddAnswer adds an answer to the record.
func (r *Record) AddAnswer(ans *Answer) {
if r.Answers == nil {
r.Answers = []*Answer{}
}
r.Answers = append(r.Answers, ans)
}
// AddFilter adds a filter to the records' filter chain(ordering of filters matters).
func (r *Record) AddFilter(fil *filter.Filter) {
if r.Filters == nil {
r.Filters = []*filter.Filter{}
}
r.Filters = append(r.Filters, fil)
}

157
vendor/gopkg.in/ns1/ns1-go.v2/rest/model/dns/zone.go generated vendored Normal file
View file

@ -0,0 +1,157 @@
package dns
import "gopkg.in/ns1/ns1-go.v2/rest/model/data"
// Zone wraps an NS1 /zone resource
type Zone struct {
// Zones have metadata tables, but no filters act on 'zone-level' meta.
Meta *data.Meta `json:"meta,omitempty"`
// Read-only fields
DNSServers []string `json:"dns_servers,omitempty"`
NetworkPools []string `json:"network_pools,omitempty"`
Pool string `json:"pool,omitempty"` // Deprecated
ID string `json:"id,omitempty"`
Zone string `json:"zone,omitempty"`
TTL int `json:"ttl,omitempty"`
NxTTL int `json:"nx_ttl,omitempty"`
Retry int `json:"retry,omitempty"`
Serial int `json:"serial,omitempty"`
Refresh int `json:"refresh,omitempty"`
Expiry int `json:"expiry,omitempty"`
Hostmaster string `json:"hostmaster,omitempty"`
// If this is a linked zone, Link points to an existing standard zone,
// reusing its configuration and records. Link is a zones' domain name.
Link *string `json:"link,omitempty"`
// Networks contains the network ids the zone is available. Most zones
// will be in the NSONE Global Network(which is id 0).
NetworkIDs []int `json:"networks,omitempty"`
Records []*ZoneRecord `json:"records,omitempty"`
// Primary contains info to enable slaving of the zone by third party dns servers.
Primary *ZonePrimary `json:"primary,omitempty"`
// Secondary contains info for slaving the zone to a primary dns server.
Secondary *ZoneSecondary `json:"secondary,omitempty"`
}
func (z Zone) String() string {
return z.Zone
}
// ZoneRecord wraps Zone's "records" attribute
type ZoneRecord struct {
Domain string `json:"Domain,omitempty"`
ID string `json:"id,omitempty"`
Link string `json:"link,omitempty"`
ShortAns []string `json:"short_answers,omitempty"`
Tier int `json:"tier,omitempty"`
TTL int `json:"ttl,omitempty"`
Type string `json:"type,omitempty"`
}
// ZonePrimary wraps a Zone's "primary" attribute
type ZonePrimary struct {
// Enabled determines whether AXFR queries (and optionally NOTIFY messages)
// will be enabled for the zone.
Enabled bool `json:"enabled"`
Secondaries []ZoneSecondaryServer `json:"secondaries"`
}
// ZoneSecondaryServer wraps elements of a Zone's "primary.secondary" attribute
type ZoneSecondaryServer struct {
// Read-Only
NetworkIDs []int `json:"networks,omitempty"`
IP string `json:"ip"`
Port int `json:"port,omitempty"`
Notify bool `json:"notify"`
}
// ZoneSecondary wraps a Zone's "secondary" attribute
type ZoneSecondary struct {
// Read-Only fields
Expired bool `json:"expired,omitempty"`
LastXfr int `json:"last_xfr,omitempty"`
Status string `json:"status,omitempty"`
Error *string `json:"error"`
PrimaryIP string `json:"primary_ip,omitempty"`
PrimaryPort int `json:"primary_port,omitempty"`
Enabled bool `json:"enabled"`
TSIG *TSIG `json:"tsig"`
}
// TSIG is a zones transaction signature.
type TSIG struct {
// Key is the encrypted TSIG key(read-only)
Key string `json:"key,omitempty"`
// Whether TSIG is enabled for a secondary zone.
Enabled bool `json:"enabled,omitempty"`
// Which hashing algorithm
Hash string `json:"hash,omitempty"`
// Name of the TSIG key
Name string `json:"name,omitempty"`
}
// NewZone takes a zone domain name and creates a new zone.
func NewZone(zone string) *Zone {
z := Zone{
Zone: zone,
}
return &z
}
// MakePrimary enables Primary, disables Secondary, and sets primary's
// Secondaries to all provided ZoneSecondaryServers
func (z *Zone) MakePrimary(secondaries ...ZoneSecondaryServer) {
z.Secondary = nil
z.Primary = &ZonePrimary{
Enabled: true,
Secondaries: secondaries,
}
if z.Primary.Secondaries == nil {
z.Primary.Secondaries = make([]ZoneSecondaryServer, 0)
}
}
// MakeSecondary enables Secondary, disables Primary, and sets secondary's
// Primary_ip to provided ip.
func (z *Zone) MakeSecondary(ip string) {
z.Secondary = &ZoneSecondary{
Enabled: true,
PrimaryIP: ip,
PrimaryPort: 53,
}
z.Primary = &ZonePrimary{
Enabled: false,
Secondaries: make([]ZoneSecondaryServer, 0),
}
}
// LinkTo sets Link to a target zone domain name and unsets all other configuration properties.
// No other zone configuration properties (such as refresh, retry, etc) may be specified,
// since they are all pulled from the target zone. Linked zones, once created, cannot be
// configured at all and cannot have records added to them. They may only be deleted, which
// does not affect the target zone at all.
func (z *Zone) LinkTo(to string) {
z.Meta = nil
z.TTL = 0
z.NxTTL = 0
z.Retry = 0
z.Refresh = 0
z.Expiry = 0
z.Primary = nil
z.DNSServers = nil
z.NetworkIDs = nil
z.NetworkPools = nil
z.Hostmaster = ""
z.Pool = ""
z.Secondary = nil
z.Link = &to
}

View file

@ -0,0 +1,2 @@
// Package filter contains definitions for NS1 filter chains.
package filter

View file

@ -0,0 +1,182 @@
package filter
// Filter wraps the values of a Record's "filters" attribute
type Filter struct {
Type string `json:"filter"`
Disabled bool `json:"disabled,omitempty"`
Config Config `json:"config"`
}
// Enable a filter.
func (f *Filter) Enable() {
f.Disabled = false
}
// Disable a filter.
func (f *Filter) Disable() {
f.Disabled = true
}
// Config is a flat mapping where values are simple (no slices/maps).
type Config map[string]interface{}
// NewSelFirstN returns a filter that eliminates all but the
// first N answers from the list.
func NewSelFirstN(n int) *Filter {
return &Filter{
Type: "select_first_n",
Config: Config{"N": n},
}
}
// NewShuffle returns a filter that randomly sorts the answers.
func NewShuffle() *Filter {
return &Filter{Type: "shuffle", Config: Config{}}
}
// GEOGRAPHICAL FILTERS
// NewSelFirstRegion returns a filter that keeps only the answers
// that are in the same region as the first answer.
func NewSelFirstRegion() *Filter {
return &Filter{Type: "select_first_n", Config: Config{}}
}
// NewStickyRegion first sorts regions uniquely depending on the IP
// address of the requester, and then groups all answers together by
// region. The same requester always gets the same ordering of regions,
// but answers within each region may be in any order. byNetwork indicates
// whether to apply the 'stickyness' by subnet(not individual IP).
func NewStickyRegion(byNetwork bool) *Filter {
return &Filter{
Type: "sticky_region",
Config: Config{"sticky_by_network": byNetwork},
}
}
// NewGeofenceCountry returns a filter that fences using "country",
// "us_state", and "ca_province" metadata fields in answers. Only
// answers in the same country/state/province as the user (or
// answers with no specified location) are returned. rmNoLoc determines
// whether to remove answers without location on any match.
func NewGeofenceCountry(rmNoLoc bool) *Filter {
return &Filter{
Type: "geofence_country",
Config: Config{"remove_no_location": rmNoLoc},
}
}
// NewGeofenceRegional returns a filter that restricts to answers in
// same geographical region as requester. rmNoGeo determines whether
// to remove answers without georegion on any match.
func NewGeofenceRegional(rmNoGeo bool) *Filter {
return &Filter{
Type: "geofence_regional",
Config: Config{"remove_no_georegion": rmNoGeo},
}
}
// NewGeotargetCountry returns a filter that sorts answers by distance
// to requester by country, US state, and/or Canadian province.
func NewGeotargetCountry() *Filter {
return &Filter{Type: "geofence_country", Config: Config{}}
}
// NewGeotargetLatLong returns a filter that sorts answers by distance
// to user using lat/long.
func NewGeotargetLatLong() *Filter {
return &Filter{Type: "geotarget_latlong", Config: Config{}}
}
// NewGeotargetRegional returns a filter that sorts answers by distance
// to user by geographical region.
func NewGeotargetRegional() *Filter {
return &Filter{Type: "geotarget_regional", Config: Config{}}
}
// NETWORK FILTERS
// NewSticky returns a filter that sorts answers uniquely depending
// on the IP address of the requester. The same requester always
// gets the same ordering of answers. byNetwork indicates whether
// to apply the 'stickyness' by subnet(not individual IP).
func NewSticky(byNetwork bool) *Filter {
return &Filter{
Type: "sticky",
Config: Config{"sticky_by_network": byNetwork},
}
}
// NewWeightedSticky returns a filter that shuffles answers randomly
// per-requester based on weight. byNetwork indicates whether to
// apply the 'stickyness' by subnet(not individual IP).
func NewWeightedSticky(byNetwork bool) *Filter {
return &Filter{
Type: "weighted_sticky",
Config: Config{"sticky_by_network": byNetwork},
}
}
// NewIPv4PrefixShuffle returns a filter that randomly selects
// IPv4 addresses from prefix list. This filter can only be used
// A records. n is the number of IPs to randomly select per answer.
func NewIPv4PrefixShuffle(n int) *Filter {
return &Filter{
Type: "ipv4_prefix_shuffle",
Config: Config{"N": n},
}
}
// NewNetfenceASN returns a filter that restricts to answers where
// the ASN of requester IP matches ASN list. rmNoASN determines
// whether to remove answers without asn list on any match.
func NewNetfenceASN(rmNoASN bool) *Filter {
return &Filter{
Type: "netfence_asn",
Config: Config{"remove_no_asn": rmNoASN},
}
}
// NewNetfencePrefix returns a filter that restricts to answers where
// requester IP matches prefix list. rmNoIPPrefix determines
// whether to remove answers without ip prefixes on any match.
func NewNetfencePrefix(rmNoIPPrefix bool) *Filter {
return &Filter{
Type: "netfence_prefix",
Config: Config{"remove_no_ip_prefixes": rmNoIPPrefix},
}
}
// STATUS FILTERS
// NewUp returns a filter that eliminates all answers where
// the 'up' metadata field is not true.
func NewUp() *Filter {
return &Filter{Type: "up", Config: Config{}}
}
// NewPriority returns a filter that fails over according to
// prioritized answer tiers.
func NewPriority() *Filter {
return &Filter{Type: "priority", Config: Config{}}
}
// NewShedLoad returns a filter that "sheds" traffic to answers
// based on load, using one of several load metrics. You must set
// values for low_watermark, high_watermark, and the configured
// load metric, for each answer you intend to subject to load
// shedding.
func NewShedLoad(metric string) *Filter {
return &Filter{
Type: "shed_load",
Config: Config{"metric": metric},
}
}
// TRAFFIC FILTERS
// NewWeightedShuffle returns a filter that shuffles answers
// randomly based on their weight.
func NewWeightedShuffle() *Filter {
return &Filter{Type: "weighted_shuffle", Config: Config{}}
}

View file

@ -0,0 +1,4 @@
package monitor
// Config is a flat mapping where values are simple (no slices/maps).
type Config map[string]interface{}

View file

@ -0,0 +1,2 @@
// Package monitor contains definitions for NS1 monitoring jobs.
package monitor

172
vendor/gopkg.in/ns1/ns1-go.v2/rest/model/monitor/job.go generated vendored Normal file
View file

@ -0,0 +1,172 @@
package monitor
// Job wraps an NS1 /monitoring/jobs resource
type Job struct {
ID string `json:"id,omitempty"`
// The id of the notification list to send notifications to.
NotifyListID string `json:"notify_list"`
// Type of monitor to be run.
// Available job types:
// - http: Do an HTTP request against a webserver
// - dns: Do a DNS lookup against a nameserver
// - tcp: Connect to a TCP port on a host
// - ping: Ping a host using ICMP packets
Type string `json:"job_type"`
// Configuration dictionary(key/vals depend on the jobs' type).
Config Config `json:"config"`
// The current status of the monitor.
Status map[string]Status `json:"status,omitempty"`
// Rules for determining failure conditions.
Rules []*Rule `json:"rules"`
// List of regions in which to run the monitor.
// eg, ["dal", "sin", "sjc", "lga", "ams"]
Regions []string `json:"regions"`
// Indicates if the job is active or temporarily disabled.
Active bool `json:"active"`
// Frequency(in seconds), at which to run the monitor.
Frequency int `json:"frequency"`
// The policy for determining the monitor's global status based
// on the status of the job in all regions.
// Available policies:
// - quorum: Status change when majority status
// - all: Status change only when all regions are in agreement
// - one: Status change if any region changes
Policy string `json:"policy"`
// Controls behavior of how the job is assigned to monitoring regions.
// Currently this must be fixed — indicating monitoring regions are explicitly chosen.
RegionScope string `json:"region_scope"`
// Freeform notes to be included in any notifications about this job,
// e.g., instructions for operators who will receive the notifications.
Notes string `json:"notes,omitempty"`
// A free-form display name for the monitoring job.
Name string `json:"name"`
// Time(in seconds) between repeat notifications of a failed job.
// Set to 0 to disable repeating notifications.
NotifyRepeat int `json:"notify_repeat"`
// If true, on any apparent state change, the job is quickly re-run after
// one second to confirm the state change before notification.
RapidRecheck bool `json:"rapid_recheck"`
// Time(in seconds) after a failure to wait before sending a notification.
NotifyDelay int `json:"notify_delay"`
// If true, notifications are sent for any regional failure (and failback if desired),
// in addition to global state notifications.
NotifyRegional bool `json:"notidy_regional"`
// If true, a notification is sent when a job returns to an "up" state.
NotifyFailback bool `json:"notify_failback"`
}
// Activate a monitoring job.
func (j *Job) Activate() {
j.Active = true
}
// Deactivate a monitoring job.
func (j *Job) Deactivate() {
j.Active = false
}
// Result wraps an element of a JobType's "results" attribute
type Result struct {
Comparators []string `json:"comparators"`
Metric bool `json:"metric"`
Validator string `json:"validator"`
ShortDesc string `json:"shortdesc"`
Type string `json:"type"`
Desc string `json:"desc"`
}
// Status wraps an value of a Job's "status" attribute
type Status struct {
Since int `json:"since"`
Status string `json:"status"`
}
// Rule wraps an element of a Job's "rules" attribute
type Rule struct {
Key string `json:"key"`
Value interface{} `json:"value"`
Comparison string `json:"comparison"`
}
// NewHTTPConfig constructs/returns a job configuration for HTTP type jobs.
// url is the URL to query. (Required)
// method is the HTTP method(valid methods are HEAD, GET, and POST).
// ua is the user agent text in the request header.
// auth is the authorization header to use in request.
// connTimeout is the timeout(in sec) to wait for query output.
func NewHTTPConfig(url, method, ua, auth string, connTimeout int) *Config {
return &Config{
"url": url, // Required
"method": method,
"user_agent": ua,
"auth": auth,
"connection_timeout": connTimeout,
}
}
// NewDNSConfig constructs/returns a job configuration for DNS type jobs.
// host is the IP address or hostname of the nameserver to query. (Required)
// domain name to query. (Required)
// port is the dns port to query on host.
// t is the type of the DNS record type to query.
// respTimeout is the timeout(in ms) after sending query to wait for the output.
func NewDNSConfig(host, domain string, port int, t string, respTimeout int) *Config {
return &Config{
"host": host, // Required
"domain": domain, // Required
"port": port,
"type": t,
"response_timeout": respTimeout,
}
}
// NewTCPConfig constructs/returns a job configuration for TCP type jobs.
// host is the IP address or hostname to connect to. (Required)
// port is the tcp port to connect to on host. (Required)
// connTimeout is the timeout(in ms) before giving up on trying to connect.
// respTimeout is the timeout(in sec) after connecting to wait for output.
// send is the string to send to the host upon connecting.
// ssl determines whether to attempt negotiating an SSL connection.
func NewTCPConfig(host string, port, connTimeout, respTimeout int, send string, ssl bool) *Config {
return &Config{
"host": host, // Required
"port": port, // Required
"connection_timeout": connTimeout,
"response_timeout": respTimeout,
"send": send,
"ssl": ssl,
}
}
// NewPINGConfig constructs/returns a job configuration for PING type jobs.
// host is the IP address or hostname to ping. (Required)
// timeout is the timeout(in ms) before marking the host as failed.
// count is the number of packets to send.
// interval is the minimum time(in ms) to wait between sending each packet.
func NewPINGConfig(host string, timeout, count, interval int) *Config {
return &Config{
"host": host, // Required
"timeout": timeout,
"count": count,
"interval": interval,
}
}

View file

@ -0,0 +1,72 @@
package monitor
// NotifyList wraps notifications.
type NotifyList struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Notifications []*Notification `json:"notify_list,omitempty"`
}
// Notification represents endpoint to alert to.
type Notification struct {
Type string `json:"type,omitempty"`
Config Config `json:"config,omitempty"`
}
// NewNotifyList returns a notify list that alerts via the given notifications.
func NewNotifyList(name string, nl ...*Notification) *NotifyList {
if nl == nil {
nl = []*Notification{}
}
return &NotifyList{Name: name, Notifications: nl}
}
// NewUserNotification returns a notification that alerts via user.
func NewUserNotification(username string) *Notification {
return &Notification{
Type: "user",
Config: Config{"user": username}}
}
// NewEmailNotification returns a notification that alerts via email.
func NewEmailNotification(email string) *Notification {
return &Notification{
Type: "email",
Config: Config{"email": email}}
}
// NewFeedNotification returns a notification that alerts via datafeed.
func NewFeedNotification(sourceID string) *Notification {
return &Notification{
Type: "datafeed",
Config: Config{"sourceid": sourceID}}
}
// NewWebNotification returns a notification that alerts via webhook.
func NewWebNotification(url string) *Notification {
return &Notification{
Type: "webhook",
Config: Config{"url": url}}
}
// NewPagerDutyNotification returns a notification that alerts via pagerduty.
func NewPagerDutyNotification(key string) *Notification {
return &Notification{
Type: "pagerduty",
Config: Config{"service_key": key}}
}
// NewHipChatNotification returns a notification that alerts via hipchat.
func NewHipChatNotification(token, room string) *Notification {
return &Notification{
Type: "hipchat",
Config: Config{"token": token, "room": room}}
}
// NewSlackNotification returns a notification that alerts via slack.
func NewSlackNotification(url, username, channel string) *Notification {
return &Notification{
Type: "slack",
Config: Config{"url": url, "username": username, "channel": channel}}
}

108
vendor/gopkg.in/ns1/ns1-go.v2/rest/monitor_job.go generated vendored Normal file
View file

@ -0,0 +1,108 @@
package rest
import (
"fmt"
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/monitor"
)
// JobsService handles 'monitoring/jobs' endpoint.
type JobsService service
// List returns all monitoring jobs for the account.
//
// NS1 API docs: https://ns1.com/api/#jobs-get
func (s *JobsService) List() ([]*monitor.Job, *http.Response, error) {
req, err := s.client.NewRequest("GET", "monitoring/jobs", nil)
if err != nil {
return nil, nil, err
}
mjl := []*monitor.Job{}
resp, err := s.client.Do(req, &mjl)
if err != nil {
return nil, resp, err
}
return mjl, resp, nil
}
// Get takes an ID and returns details for a specific monitoring job.
//
// NS1 API docs: https://ns1.com/api/#jobs-jobid-get
func (s *JobsService) Get(id string) (*monitor.Job, *http.Response, error) {
path := fmt.Sprintf("%s/%s", "monitoring/jobs", id)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var mj monitor.Job
resp, err := s.client.Do(req, &mj)
if err != nil {
return nil, resp, err
}
return &mj, resp, nil
}
// Create takes a *MonitoringJob and creates a new monitoring job.
//
// NS1 API docs: https://ns1.com/api/#jobs-put
func (s *JobsService) Create(mj *monitor.Job) (*http.Response, error) {
path := fmt.Sprintf("%s/%s", "monitoring/jobs", mj.ID)
req, err := s.client.NewRequest("PUT", path, &mj)
if err != nil {
return nil, err
}
// Update mon jobs' fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &mj)
if err != nil {
return resp, err
}
return resp, nil
}
// Update takes a *MonitoringJob and change the configuration details of an existing monitoring job.
//
// NS1 API docs: https://ns1.com/api/#jobs-jobid-post
func (s *JobsService) Update(mj *monitor.Job) (*http.Response, error) {
path := fmt.Sprintf("%s/%s", "monitoring/jobs", mj.ID)
req, err := s.client.NewRequest("POST", path, &mj)
if err != nil {
return nil, err
}
// Update mon jobs' fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &mj)
if err != nil {
return resp, err
}
return resp, nil
}
// Delete takes an ID and immediately terminates and deletes and existing monitoring job.
//
// NS1 API docs: https://ns1.com/api/#jobs-jobid-delete
func (s *JobsService) Delete(id string) (*http.Response, error) {
path := fmt.Sprintf("%s/%s", "monitoring/jobs", id)
req, err := s.client.NewRequest("DELETE", path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
return resp, err
}
return resp, nil
}

128
vendor/gopkg.in/ns1/ns1-go.v2/rest/monitor_notify.go generated vendored Normal file
View file

@ -0,0 +1,128 @@
package rest
import (
"errors"
"fmt"
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/monitor"
)
// NotificationsService handles 'monitoring/lists' endpoint.
type NotificationsService service
// List returns all configured notification lists.
//
// NS1 API docs: https://ns1.com/api/#lists-get
func (s *NotificationsService) List() ([]*monitor.NotifyList, *http.Response, error) {
req, err := s.client.NewRequest("GET", "lists", nil)
if err != nil {
return nil, nil, err
}
nl := []*monitor.NotifyList{}
resp, err := s.client.Do(req, &nl)
if err != nil {
return nil, resp, err
}
return nl, resp, nil
}
// Get returns the details and notifiers associated with a specific notification list.
//
// NS1 API docs: https://ns1.com/api/#lists-listid-get
func (s *NotificationsService) Get(listID string) (*monitor.NotifyList, *http.Response, error) {
path := fmt.Sprintf("%s/%s", "lists", listID)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var nl monitor.NotifyList
resp, err := s.client.Do(req, &nl)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "unknown notification list" {
return nil, resp, ErrListMissing
}
default:
return nil, resp, err
}
}
return &nl, resp, nil
}
// Create takes a *NotifyList and creates a new notify list.
//
// NS1 API docs: https://ns1.com/api/#lists-put
func (s *NotificationsService) Create(nl *monitor.NotifyList) (*http.Response, error) {
req, err := s.client.NewRequest("PUT", "lists", &nl)
if err != nil {
return nil, err
}
// Update notify list fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &nl)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == fmt.Sprintf("notification list with name \"%s\" exists", nl.Name) {
return resp, ErrListExists
}
default:
return resp, err
}
}
return resp, nil
}
// Update adds or removes entries or otherwise update a notification list.
//
// NS1 API docs: https://ns1.com/api/#list-listid-post
func (s *NotificationsService) Update(nl *monitor.NotifyList) (*http.Response, error) {
path := fmt.Sprintf("%s/%s", "lists", nl.ID)
req, err := s.client.NewRequest("POST", path, &nl)
if err != nil {
return nil, err
}
// Update mon lists' fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &nl)
if err != nil {
return resp, err
}
return resp, nil
}
// Delete immediately deletes an existing notification list.
//
// NS1 API docs: https://ns1.com/api/#lists-listid-delete
func (s *NotificationsService) Delete(listID string) (*http.Response, error) {
path := fmt.Sprintf("%s/%s", "lists", listID)
req, err := s.client.NewRequest("DELETE", path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
var (
// ErrListExists bundles PUT create error.
ErrListExists = errors.New("Notify List already exists.")
// ErrListMissing bundles GET/POST/DELETE error.
ErrListMissing = errors.New("Notify List does not exist.")
)

134
vendor/gopkg.in/ns1/ns1-go.v2/rest/record.go generated vendored Normal file
View file

@ -0,0 +1,134 @@
package rest
import (
"errors"
"fmt"
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/dns"
)
// RecordsService handles 'zones/ZONE/DOMAIN/TYPE' endpoint.
type RecordsService service
// Get takes a zone, domain and record type t and returns full configuration for a DNS record.
//
// NS1 API docs: https://ns1.com/api/#record-get
func (s *RecordsService) Get(zone, domain, t string) (*dns.Record, *http.Response, error) {
path := fmt.Sprintf("zones/%s/%s/%s", zone, domain, t)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var r dns.Record
resp, err := s.client.Do(req, &r)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "record not found" {
return nil, resp, ErrRecordMissing
}
default:
return nil, resp, err
}
}
return &r, resp, nil
}
// Create takes a *Record and creates a new DNS record in the specified zone, for the specified domain, of the given record type.
//
// The given record must have at least one answer.
// NS1 API docs: https://ns1.com/api/#record-put
func (s *RecordsService) Create(r *dns.Record) (*http.Response, error) {
path := fmt.Sprintf("zones/%s/%s/%s", r.Zone, r.Domain, r.Type)
req, err := s.client.NewRequest("PUT", path, &r)
if err != nil {
return nil, err
}
// Update record fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &r)
if err != nil {
switch err.(type) {
case *Error:
switch err.(*Error).Message {
case "zone not found":
return resp, ErrZoneMissing
case "record already exists":
return resp, ErrRecordExists
}
default:
return resp, err
}
}
return resp, nil
}
// Update takes a *Record and modifies configuration details for an existing DNS record.
//
// Only the fields to be updated are required in the given record.
// NS1 API docs: https://ns1.com/api/#record-post
func (s *RecordsService) Update(r *dns.Record) (*http.Response, error) {
path := fmt.Sprintf("zones/%s/%s/%s", r.Zone, r.Domain, r.Type)
req, err := s.client.NewRequest("POST", path, &r)
if err != nil {
return nil, err
}
// Update records fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &r)
if err != nil {
switch err.(type) {
case *Error:
switch err.(*Error).Message {
case "zone not found":
return resp, ErrZoneMissing
case "record already exists":
return resp, ErrRecordExists
}
default:
return resp, err
}
}
return resp, nil
}
// Delete takes a zone, domain and record type t and removes an existing record and all associated answers and configuration details.
//
// NS1 API docs: https://ns1.com/api/#record-delete
func (s *RecordsService) Delete(zone string, domain string, t string) (*http.Response, error) {
path := fmt.Sprintf("zones/%s/%s/%s", zone, domain, t)
req, err := s.client.NewRequest("DELETE", path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "record not found" {
return resp, ErrRecordMissing
}
default:
return resp, err
}
}
return resp, nil
}
var (
// ErrRecordExists bundles PUT create error.
ErrRecordExists = errors.New("Record already exists.")
// ErrRecordMissing bundles GET/POST/DELETE error.
ErrRecordMissing = errors.New("Record does not exist.")
)

15
vendor/gopkg.in/ns1/ns1-go.v2/rest/stat.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
package rest
// // GetQPSStats returns current queries per second (QPS) for the account
// func (c APIClient) GetQPSStats() (v float64, err error) {
// var s map[string]float64
// _, err = c.doHTTPUnmarshal("GET", "https://api.nsone.net/v1/stats/qps", nil, &s)
// if err != nil {
// return v, err
// }
// v, found := s["qps"]
// if !found {
// return v, errors.New("Could not find 'qps' key in returned data")
// }
// return v, nil
// }

42
vendor/gopkg.in/ns1/ns1-go.v2/rest/util.go generated vendored Normal file
View file

@ -0,0 +1,42 @@
package rest
import (
"log"
"net/http"
)
// DoerFunc satisfies Interface. DoerFuncs are useful for adding
// logging/instrumentation to the http.Client that is used
// within the rest.APIClient.
type DoerFunc func(*http.Request) (*http.Response, error)
// Do is implementation of rest.Doer interface. Calls itself on the
// given http.Request.
func (f DoerFunc) Do(r *http.Request) (*http.Response, error) {
return f(r)
}
// A Decorator wraps a Doer with extra behavior, and doesnt
// affect the behavior of other instances of the same type.
type Decorator func(Doer) Doer
// Decorate decorates a Doer c with all the given Decorators, in order.
// Core object(Doer instance) that we want to apply layers(Decorator slice) to.
func Decorate(d Doer, ds ...Decorator) Doer {
decorated := d
for _, decorate := range ds {
decorated = decorate(decorated)
}
return decorated
}
// Logging returns a Decorator that logs a Doer's requests.
// Dependency injection for the logger instance(inside the closures environment).
func Logging(l *log.Logger) Decorator {
return func(d Doer) Doer {
return DoerFunc(func(r *http.Request) (*http.Response, error) {
l.Printf("%s: %s %s", r.UserAgent(), r.Method, r.URL)
return d.Do(r)
})
}
}

144
vendor/gopkg.in/ns1/ns1-go.v2/rest/zone.go generated vendored Normal file
View file

@ -0,0 +1,144 @@
package rest
import (
"errors"
"fmt"
"net/http"
"gopkg.in/ns1/ns1-go.v2/rest/model/dns"
)
// ZonesService handles 'zones' endpoint.
type ZonesService service
// List returns all active zones and basic zone configuration details for each.
//
// NS1 API docs: https://ns1.com/api/#zones-get
func (s *ZonesService) List() ([]*dns.Zone, *http.Response, error) {
req, err := s.client.NewRequest("GET", "zones", nil)
if err != nil {
return nil, nil, err
}
zl := []*dns.Zone{}
resp, err := s.client.Do(req, &zl)
if err != nil {
return nil, resp, err
}
return zl, resp, nil
}
// Get takes a zone name and returns a single active zone and its basic configuration details.
//
// NS1 API docs: https://ns1.com/api/#zones-zone-get
func (s *ZonesService) Get(zone string) (*dns.Zone, *http.Response, error) {
path := fmt.Sprintf("zones/%s", zone)
req, err := s.client.NewRequest("GET", path, nil)
if err != nil {
return nil, nil, err
}
var z dns.Zone
resp, err := s.client.Do(req, &z)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "zone not found" {
return nil, resp, ErrZoneMissing
}
default:
return nil, resp, err
}
}
return &z, resp, nil
}
// Create takes a *Zone and creates a new DNS zone.
//
// NS1 API docs: https://ns1.com/api/#zones-put
func (s *ZonesService) Create(z *dns.Zone) (*http.Response, error) {
path := fmt.Sprintf("zones/%s", z.Zone)
req, err := s.client.NewRequest("PUT", path, &z)
if err != nil {
return nil, err
}
// Update zones fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &z)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "zone already exists" {
return resp, ErrZoneExists
}
default:
return resp, err
}
}
return resp, nil
}
// Update takes a *Zone and modifies basic details of a DNS zone.
//
// NS1 API docs: https://ns1.com/api/#zones-post
func (s *ZonesService) Update(z *dns.Zone) (*http.Response, error) {
path := fmt.Sprintf("zones/%s", z.Zone)
req, err := s.client.NewRequest("POST", path, &z)
if err != nil {
return nil, err
}
// Update zones fields with data from api(ensure consistent)
resp, err := s.client.Do(req, &z)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "zone not found" {
return resp, ErrZoneMissing
}
default:
return resp, err
}
}
return resp, nil
}
// Delete takes a zone and destroys an existing DNS zone and all records in the zone.
//
// NS1 API docs: https://ns1.com/api/#zones-delete
func (s *ZonesService) Delete(zone string) (*http.Response, error) {
path := fmt.Sprintf("zones/%s", zone)
req, err := s.client.NewRequest("DELETE", path, nil)
if err != nil {
return nil, err
}
resp, err := s.client.Do(req, nil)
if err != nil {
switch err.(type) {
case *Error:
if err.(*Error).Message == "zone not found" {
return resp, ErrZoneMissing
}
default:
return resp, err
}
}
return resp, nil
}
var (
// ErrZoneExists bundles PUT create error.
ErrZoneExists = errors.New("Zone already exists.")
// ErrZoneMissing bundles GET/POST/DELETE error.
ErrZoneMissing = errors.New("Zone does not exist.")
)

202
vendor/gopkg.in/square/go-jose.v1/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

498
vendor/gopkg.in/square/go-jose.v1/asymmetric.go generated vendored Normal file
View file

@ -0,0 +1,498 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"crypto"
"crypto/aes"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"errors"
"fmt"
"math/big"
"gopkg.in/square/go-jose.v1/cipher"
)
// A generic RSA-based encrypter/verifier
type rsaEncrypterVerifier struct {
publicKey *rsa.PublicKey
}
// A generic RSA-based decrypter/signer
type rsaDecrypterSigner struct {
privateKey *rsa.PrivateKey
}
// A generic EC-based encrypter/verifier
type ecEncrypterVerifier struct {
publicKey *ecdsa.PublicKey
}
// A key generator for ECDH-ES
type ecKeyGenerator struct {
size int
algID string
publicKey *ecdsa.PublicKey
}
// A generic EC-based decrypter/signer
type ecDecrypterSigner struct {
privateKey *ecdsa.PrivateKey
}
// newRSARecipient creates recipientKeyInfo based on the given key.
func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
// Verify that key management algorithm is supported by this encrypter
switch keyAlg {
case RSA1_5, RSA_OAEP, RSA_OAEP_256:
default:
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
}
return recipientKeyInfo{
keyAlg: keyAlg,
keyEncrypter: &rsaEncrypterVerifier{
publicKey: publicKey,
},
}, nil
}
// newRSASigner creates a recipientSigInfo based on the given key.
func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
// Verify that key management algorithm is supported by this encrypter
switch sigAlg {
case RS256, RS384, RS512, PS256, PS384, PS512:
default:
return recipientSigInfo{}, ErrUnsupportedAlgorithm
}
return recipientSigInfo{
sigAlg: sigAlg,
publicKey: &JsonWebKey{
Key: &privateKey.PublicKey,
},
signer: &rsaDecrypterSigner{
privateKey: privateKey,
},
}, nil
}
// newECDHRecipient creates recipientKeyInfo based on the given key.
func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
// Verify that key management algorithm is supported by this encrypter
switch keyAlg {
case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
default:
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
}
return recipientKeyInfo{
keyAlg: keyAlg,
keyEncrypter: &ecEncrypterVerifier{
publicKey: publicKey,
},
}, nil
}
// newECDSASigner creates a recipientSigInfo based on the given key.
func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
// Verify that key management algorithm is supported by this encrypter
switch sigAlg {
case ES256, ES384, ES512:
default:
return recipientSigInfo{}, ErrUnsupportedAlgorithm
}
return recipientSigInfo{
sigAlg: sigAlg,
publicKey: &JsonWebKey{
Key: &privateKey.PublicKey,
},
signer: &ecDecrypterSigner{
privateKey: privateKey,
},
}, nil
}
// Encrypt the given payload and update the object.
func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
encryptedKey, err := ctx.encrypt(cek, alg)
if err != nil {
return recipientInfo{}, err
}
return recipientInfo{
encryptedKey: encryptedKey,
header: &rawHeader{},
}, nil
}
// Encrypt the given payload. Based on the key encryption algorithm,
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
switch alg {
case RSA1_5:
return rsa.EncryptPKCS1v15(randReader, ctx.publicKey, cek)
case RSA_OAEP:
return rsa.EncryptOAEP(sha1.New(), randReader, ctx.publicKey, cek, []byte{})
case RSA_OAEP_256:
return rsa.EncryptOAEP(sha256.New(), randReader, ctx.publicKey, cek, []byte{})
}
return nil, ErrUnsupportedAlgorithm
}
// Decrypt the given payload and return the content encryption key.
func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
return ctx.decrypt(recipient.encryptedKey, KeyAlgorithm(headers.Alg), generator)
}
// Decrypt the given payload. Based on the key encryption algorithm,
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
// Note: The random reader on decrypt operations is only used for blinding,
// so stubbing is meanlingless (hence the direct use of rand.Reader).
switch alg {
case RSA1_5:
defer func() {
// DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
// because of an index out of bounds error, which we want to ignore.
// This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
// only exists for preventing crashes with unpatched versions.
// See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
// See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
_ = recover()
}()
// Perform some input validation.
keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
if keyBytes != len(jek) {
// Input size is incorrect, the encrypted payload should always match
// the size of the public modulus (e.g. using a 2048 bit key will
// produce 256 bytes of output). Reject this since it's invalid input.
return nil, ErrCryptoFailure
}
cek, _, err := generator.genKey()
if err != nil {
return nil, ErrCryptoFailure
}
// When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
// prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
// the Million Message Attack on Cryptographic Message Syntax". We are
// therefore deliberatly ignoring errors here.
_ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
return cek, nil
case RSA_OAEP:
// Use rand.Reader for RSA blinding
return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
case RSA_OAEP_256:
// Use rand.Reader for RSA blinding
return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
}
return nil, ErrUnsupportedAlgorithm
}
// Sign the given payload
func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
var hash crypto.Hash
switch alg {
case RS256, PS256:
hash = crypto.SHA256
case RS384, PS384:
hash = crypto.SHA384
case RS512, PS512:
hash = crypto.SHA512
default:
return Signature{}, ErrUnsupportedAlgorithm
}
hasher := hash.New()
// According to documentation, Write() on hash never fails
_, _ = hasher.Write(payload)
hashed := hasher.Sum(nil)
var out []byte
var err error
switch alg {
case RS256, RS384, RS512:
out, err = rsa.SignPKCS1v15(randReader, ctx.privateKey, hash, hashed)
case PS256, PS384, PS512:
out, err = rsa.SignPSS(randReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
})
}
if err != nil {
return Signature{}, err
}
return Signature{
Signature: out,
protected: &rawHeader{},
}, nil
}
// Verify the given payload
func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
var hash crypto.Hash
switch alg {
case RS256, PS256:
hash = crypto.SHA256
case RS384, PS384:
hash = crypto.SHA384
case RS512, PS512:
hash = crypto.SHA512
default:
return ErrUnsupportedAlgorithm
}
hasher := hash.New()
// According to documentation, Write() on hash never fails
_, _ = hasher.Write(payload)
hashed := hasher.Sum(nil)
switch alg {
case RS256, RS384, RS512:
return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
case PS256, PS384, PS512:
return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
}
return ErrUnsupportedAlgorithm
}
// Encrypt the given payload and update the object.
func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
switch alg {
case ECDH_ES:
// ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
return recipientInfo{
header: &rawHeader{},
}, nil
case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
default:
return recipientInfo{}, ErrUnsupportedAlgorithm
}
generator := ecKeyGenerator{
algID: string(alg),
publicKey: ctx.publicKey,
}
switch alg {
case ECDH_ES_A128KW:
generator.size = 16
case ECDH_ES_A192KW:
generator.size = 24
case ECDH_ES_A256KW:
generator.size = 32
}
kek, header, err := generator.genKey()
if err != nil {
return recipientInfo{}, err
}
block, err := aes.NewCipher(kek)
if err != nil {
return recipientInfo{}, err
}
jek, err := josecipher.KeyWrap(block, cek)
if err != nil {
return recipientInfo{}, err
}
return recipientInfo{
encryptedKey: jek,
header: &header,
}, nil
}
// Get key size for EC key generator
func (ctx ecKeyGenerator) keySize() int {
return ctx.size
}
// Get a content encryption key for ECDH-ES
func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, randReader)
if err != nil {
return nil, rawHeader{}, err
}
out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
headers := rawHeader{
Epk: &JsonWebKey{
Key: &priv.PublicKey,
},
}
return out, headers, nil
}
// Decrypt the given payload and return the content encryption key.
func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
if headers.Epk == nil {
return nil, errors.New("square/go-jose: missing epk header")
}
publicKey, ok := headers.Epk.Key.(*ecdsa.PublicKey)
if publicKey == nil || !ok {
return nil, errors.New("square/go-jose: invalid epk header")
}
apuData := headers.Apu.bytes()
apvData := headers.Apv.bytes()
deriveKey := func(algID string, size int) []byte {
return josecipher.DeriveECDHES(algID, apuData, apvData, ctx.privateKey, publicKey, size)
}
var keySize int
switch KeyAlgorithm(headers.Alg) {
case ECDH_ES:
// ECDH-ES uses direct key agreement, no key unwrapping necessary.
return deriveKey(string(headers.Enc), generator.keySize()), nil
case ECDH_ES_A128KW:
keySize = 16
case ECDH_ES_A192KW:
keySize = 24
case ECDH_ES_A256KW:
keySize = 32
default:
return nil, ErrUnsupportedAlgorithm
}
key := deriveKey(headers.Alg, keySize)
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
return josecipher.KeyUnwrap(block, recipient.encryptedKey)
}
// Sign the given payload
func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
var expectedBitSize int
var hash crypto.Hash
switch alg {
case ES256:
expectedBitSize = 256
hash = crypto.SHA256
case ES384:
expectedBitSize = 384
hash = crypto.SHA384
case ES512:
expectedBitSize = 521
hash = crypto.SHA512
}
curveBits := ctx.privateKey.Curve.Params().BitSize
if expectedBitSize != curveBits {
return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
}
hasher := hash.New()
// According to documentation, Write() on hash never fails
_, _ = hasher.Write(payload)
hashed := hasher.Sum(nil)
r, s, err := ecdsa.Sign(randReader, ctx.privateKey, hashed)
if err != nil {
return Signature{}, err
}
keyBytes := curveBits / 8
if curveBits%8 > 0 {
keyBytes += 1
}
// We serialize the outpus (r and s) into big-endian byte arrays and pad
// them with zeros on the left to make sure the sizes work out. Both arrays
// must be keyBytes long, and the output must be 2*keyBytes long.
rBytes := r.Bytes()
rBytesPadded := make([]byte, keyBytes)
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
sBytes := s.Bytes()
sBytesPadded := make([]byte, keyBytes)
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
out := append(rBytesPadded, sBytesPadded...)
return Signature{
Signature: out,
protected: &rawHeader{},
}, nil
}
// Verify the given payload
func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
var keySize int
var hash crypto.Hash
switch alg {
case ES256:
keySize = 32
hash = crypto.SHA256
case ES384:
keySize = 48
hash = crypto.SHA384
case ES512:
keySize = 66
hash = crypto.SHA512
}
if len(signature) != 2*keySize {
return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
}
hasher := hash.New()
// According to documentation, Write() on hash never fails
_, _ = hasher.Write(payload)
hashed := hasher.Sum(nil)
r := big.NewInt(0).SetBytes(signature[:keySize])
s := big.NewInt(0).SetBytes(signature[keySize:])
match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
if !match {
return errors.New("square/go-jose: ecdsa signature failed to verify")
}
return nil
}

196
vendor/gopkg.in/square/go-jose.v1/cipher/cbc_hmac.go generated vendored Normal file
View file

@ -0,0 +1,196 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package josecipher
import (
"bytes"
"crypto/cipher"
"crypto/hmac"
"crypto/sha256"
"crypto/sha512"
"crypto/subtle"
"encoding/binary"
"errors"
"hash"
)
const (
nonceBytes = 16
)
// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
keySize := len(key) / 2
integrityKey := key[:keySize]
encryptionKey := key[keySize:]
blockCipher, err := newBlockCipher(encryptionKey)
if err != nil {
return nil, err
}
var hash func() hash.Hash
switch keySize {
case 16:
hash = sha256.New
case 24:
hash = sha512.New384
case 32:
hash = sha512.New
}
return &cbcAEAD{
hash: hash,
blockCipher: blockCipher,
authtagBytes: keySize,
integrityKey: integrityKey,
}, nil
}
// An AEAD based on CBC+HMAC
type cbcAEAD struct {
hash func() hash.Hash
authtagBytes int
integrityKey []byte
blockCipher cipher.Block
}
func (ctx *cbcAEAD) NonceSize() int {
return nonceBytes
}
func (ctx *cbcAEAD) Overhead() int {
// Maximum overhead is block size (for padding) plus auth tag length, where
// the length of the auth tag is equivalent to the key size.
return ctx.blockCipher.BlockSize() + ctx.authtagBytes
}
// Seal encrypts and authenticates the plaintext.
func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
// Output buffer -- must take care not to mangle plaintext input.
ciphertext := make([]byte, len(plaintext)+ctx.Overhead())[:len(plaintext)]
copy(ciphertext, plaintext)
ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
cbc.CryptBlocks(ciphertext, ciphertext)
authtag := ctx.computeAuthTag(data, nonce, ciphertext)
ret, out := resize(dst, len(dst)+len(ciphertext)+len(authtag))
copy(out, ciphertext)
copy(out[len(ciphertext):], authtag)
return ret
}
// Open decrypts and authenticates the ciphertext.
func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
if len(ciphertext) < ctx.authtagBytes {
return nil, errors.New("square/go-jose: invalid ciphertext (too short)")
}
offset := len(ciphertext) - ctx.authtagBytes
expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
if match != 1 {
return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)")
}
cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
// Make copy of ciphertext buffer, don't want to modify in place
buffer := append([]byte{}, []byte(ciphertext[:offset])...)
if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)")
}
cbc.CryptBlocks(buffer, buffer)
// Remove padding
plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
if err != nil {
return nil, err
}
ret, out := resize(dst, len(dst)+len(plaintext))
copy(out, plaintext)
return ret, nil
}
// Compute an authentication tag
func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
buffer := make([]byte, len(aad)+len(nonce)+len(ciphertext)+8)
n := 0
n += copy(buffer, aad)
n += copy(buffer[n:], nonce)
n += copy(buffer[n:], ciphertext)
binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad)*8))
// According to documentation, Write() on hash.Hash never fails.
hmac := hmac.New(ctx.hash, ctx.integrityKey)
_, _ = hmac.Write(buffer)
return hmac.Sum(nil)[:ctx.authtagBytes]
}
// resize ensures the the given slice has a capacity of at least n bytes.
// If the capacity of the slice is less than n, a new slice is allocated
// and the existing data will be copied.
func resize(in []byte, n int) (head, tail []byte) {
if cap(in) >= n {
head = in[:n]
} else {
head = make([]byte, n)
copy(head, in)
}
tail = head[len(in):]
return
}
// Apply padding
func padBuffer(buffer []byte, blockSize int) []byte {
missing := blockSize - (len(buffer) % blockSize)
ret, out := resize(buffer, len(buffer)+missing)
padding := bytes.Repeat([]byte{byte(missing)}, missing)
copy(out, padding)
return ret
}
// Remove padding
func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
if len(buffer)%blockSize != 0 {
return nil, errors.New("square/go-jose: invalid padding")
}
last := buffer[len(buffer)-1]
count := int(last)
if count == 0 || count > blockSize || count > len(buffer) {
return nil, errors.New("square/go-jose: invalid padding")
}
padding := bytes.Repeat([]byte{last}, count)
if !bytes.HasSuffix(buffer, padding) {
return nil, errors.New("square/go-jose: invalid padding")
}
return buffer[:len(buffer)-count], nil
}

75
vendor/gopkg.in/square/go-jose.v1/cipher/concat_kdf.go generated vendored Normal file
View file

@ -0,0 +1,75 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package josecipher
import (
"crypto"
"encoding/binary"
"hash"
"io"
)
type concatKDF struct {
z, info []byte
i uint32
cache []byte
hasher hash.Hash
}
// NewConcatKDF builds a KDF reader based on the given inputs.
func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
buffer := make([]byte, len(algID)+len(ptyUInfo)+len(ptyVInfo)+len(supPubInfo)+len(supPrivInfo))
n := 0
n += copy(buffer, algID)
n += copy(buffer[n:], ptyUInfo)
n += copy(buffer[n:], ptyVInfo)
n += copy(buffer[n:], supPubInfo)
copy(buffer[n:], supPrivInfo)
hasher := hash.New()
return &concatKDF{
z: z,
info: buffer,
hasher: hasher,
cache: []byte{},
i: 1,
}
}
func (ctx *concatKDF) Read(out []byte) (int, error) {
copied := copy(out, ctx.cache)
ctx.cache = ctx.cache[copied:]
for copied < len(out) {
ctx.hasher.Reset()
// Write on a hash.Hash never fails
_ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
_, _ = ctx.hasher.Write(ctx.z)
_, _ = ctx.hasher.Write(ctx.info)
hash := ctx.hasher.Sum(nil)
chunkCopied := copy(out[copied:], hash)
copied += chunkCopied
ctx.cache = hash[chunkCopied:]
ctx.i++
}
return copied, nil
}

51
vendor/gopkg.in/square/go-jose.v1/cipher/ecdh_es.go generated vendored Normal file
View file

@ -0,0 +1,51 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package josecipher
import (
"crypto"
"crypto/ecdsa"
"encoding/binary"
)
// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
// algId, partyUInfo, partyVInfo inputs must be prefixed with the length
algID := lengthPrefixed([]byte(alg))
ptyUInfo := lengthPrefixed(apuData)
ptyVInfo := lengthPrefixed(apvData)
// suppPubInfo is the encoded length of the output size in bits
supPubInfo := make([]byte, 4)
binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
z, _ := priv.PublicKey.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
reader := NewConcatKDF(crypto.SHA256, z.Bytes(), algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
key := make([]byte, size)
// Read on the KDF will never fail
_, _ = reader.Read(key)
return key
}
func lengthPrefixed(data []byte) []byte {
out := make([]byte, len(data)+4)
binary.BigEndian.PutUint32(out, uint32(len(data)))
copy(out[4:], data)
return out
}

109
vendor/gopkg.in/square/go-jose.v1/cipher/key_wrap.go generated vendored Normal file
View file

@ -0,0 +1,109 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package josecipher
import (
"crypto/cipher"
"crypto/subtle"
"encoding/binary"
"errors"
)
var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
if len(cek)%8 != 0 {
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
}
n := len(cek) / 8
r := make([][]byte, n)
for i := range r {
r[i] = make([]byte, 8)
copy(r[i], cek[i*8:])
}
buffer := make([]byte, 16)
tBytes := make([]byte, 8)
copy(buffer, defaultIV)
for t := 0; t < 6*n; t++ {
copy(buffer[8:], r[t%n])
block.Encrypt(buffer, buffer)
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
for i := 0; i < 8; i++ {
buffer[i] = buffer[i] ^ tBytes[i]
}
copy(r[t%n], buffer[8:])
}
out := make([]byte, (n+1)*8)
copy(out, buffer[:8])
for i := range r {
copy(out[(i+1)*8:], r[i])
}
return out, nil
}
// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
if len(ciphertext)%8 != 0 {
return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks")
}
n := (len(ciphertext) / 8) - 1
r := make([][]byte, n)
for i := range r {
r[i] = make([]byte, 8)
copy(r[i], ciphertext[(i+1)*8:])
}
buffer := make([]byte, 16)
tBytes := make([]byte, 8)
copy(buffer[:8], ciphertext[:8])
for t := 6*n - 1; t >= 0; t-- {
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
for i := 0; i < 8; i++ {
buffer[i] = buffer[i] ^ tBytes[i]
}
copy(buffer[8:], r[t%n])
block.Decrypt(buffer, buffer)
copy(r[t%n], buffer[8:])
}
if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
return nil, errors.New("square/go-jose: failed to unwrap key")
}
out := make([]byte, n*8)
for i := range r {
copy(out[i*8:], r[i])
}
return out, nil
}

349
vendor/gopkg.in/square/go-jose.v1/crypter.go generated vendored Normal file
View file

@ -0,0 +1,349 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"crypto/ecdsa"
"crypto/rsa"
"fmt"
"reflect"
)
// Encrypter represents an encrypter which produces an encrypted JWE object.
type Encrypter interface {
Encrypt(plaintext []byte) (*JsonWebEncryption, error)
EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error)
SetCompression(alg CompressionAlgorithm)
}
// MultiEncrypter represents an encrypter which supports multiple recipients.
type MultiEncrypter interface {
Encrypt(plaintext []byte) (*JsonWebEncryption, error)
EncryptWithAuthData(plaintext []byte, aad []byte) (*JsonWebEncryption, error)
SetCompression(alg CompressionAlgorithm)
AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) error
}
// A generic content cipher
type contentCipher interface {
keySize() int
encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
}
// A key generator (for generating/getting a CEK)
type keyGenerator interface {
keySize() int
genKey() ([]byte, rawHeader, error)
}
// A generic key encrypter
type keyEncrypter interface {
encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
}
// A generic key decrypter
type keyDecrypter interface {
decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
}
// A generic encrypter based on the given key encrypter and content cipher.
type genericEncrypter struct {
contentAlg ContentEncryption
compressionAlg CompressionAlgorithm
cipher contentCipher
recipients []recipientKeyInfo
keyGenerator keyGenerator
}
type recipientKeyInfo struct {
keyID string
keyAlg KeyAlgorithm
keyEncrypter keyEncrypter
}
// SetCompression sets a compression algorithm to be applied before encryption.
func (ctx *genericEncrypter) SetCompression(compressionAlg CompressionAlgorithm) {
ctx.compressionAlg = compressionAlg
}
// NewEncrypter creates an appropriate encrypter based on the key type
func NewEncrypter(alg KeyAlgorithm, enc ContentEncryption, encryptionKey interface{}) (Encrypter, error) {
encrypter := &genericEncrypter{
contentAlg: enc,
compressionAlg: NONE,
recipients: []recipientKeyInfo{},
cipher: getContentCipher(enc),
}
if encrypter.cipher == nil {
return nil, ErrUnsupportedAlgorithm
}
var keyID string
var rawKey interface{}
switch encryptionKey := encryptionKey.(type) {
case *JsonWebKey:
keyID = encryptionKey.KeyID
rawKey = encryptionKey.Key
default:
rawKey = encryptionKey
}
switch alg {
case DIRECT:
// Direct encryption mode must be treated differently
if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
return nil, ErrUnsupportedKeyType
}
encrypter.keyGenerator = staticKeyGenerator{
key: rawKey.([]byte),
}
recipient, _ := newSymmetricRecipient(alg, rawKey.([]byte))
if keyID != "" {
recipient.keyID = keyID
}
encrypter.recipients = []recipientKeyInfo{recipient}
return encrypter, nil
case ECDH_ES:
// ECDH-ES (w/o key wrapping) is similar to DIRECT mode
typeOf := reflect.TypeOf(rawKey)
if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
return nil, ErrUnsupportedKeyType
}
encrypter.keyGenerator = ecKeyGenerator{
size: encrypter.cipher.keySize(),
algID: string(enc),
publicKey: rawKey.(*ecdsa.PublicKey),
}
recipient, _ := newECDHRecipient(alg, rawKey.(*ecdsa.PublicKey))
if keyID != "" {
recipient.keyID = keyID
}
encrypter.recipients = []recipientKeyInfo{recipient}
return encrypter, nil
default:
// Can just add a standard recipient
encrypter.keyGenerator = randomKeyGenerator{
size: encrypter.cipher.keySize(),
}
err := encrypter.AddRecipient(alg, encryptionKey)
return encrypter, err
}
}
// NewMultiEncrypter creates a multi-encrypter based on the given parameters
func NewMultiEncrypter(enc ContentEncryption) (MultiEncrypter, error) {
cipher := getContentCipher(enc)
if cipher == nil {
return nil, ErrUnsupportedAlgorithm
}
encrypter := &genericEncrypter{
contentAlg: enc,
compressionAlg: NONE,
recipients: []recipientKeyInfo{},
cipher: cipher,
keyGenerator: randomKeyGenerator{
size: cipher.keySize(),
},
}
return encrypter, nil
}
func (ctx *genericEncrypter) AddRecipient(alg KeyAlgorithm, encryptionKey interface{}) (err error) {
var recipient recipientKeyInfo
switch alg {
case DIRECT, ECDH_ES:
return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", alg)
}
recipient, err = makeJWERecipient(alg, encryptionKey)
if err == nil {
ctx.recipients = append(ctx.recipients, recipient)
}
return err
}
func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
switch encryptionKey := encryptionKey.(type) {
case *rsa.PublicKey:
return newRSARecipient(alg, encryptionKey)
case *ecdsa.PublicKey:
return newECDHRecipient(alg, encryptionKey)
case []byte:
return newSymmetricRecipient(alg, encryptionKey)
case *JsonWebKey:
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
if err == nil && encryptionKey.KeyID != "" {
recipient.keyID = encryptionKey.KeyID
}
return recipient, err
default:
return recipientKeyInfo{}, ErrUnsupportedKeyType
}
}
// newDecrypter creates an appropriate decrypter based on the key type
func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
switch decryptionKey := decryptionKey.(type) {
case *rsa.PrivateKey:
return &rsaDecrypterSigner{
privateKey: decryptionKey,
}, nil
case *ecdsa.PrivateKey:
return &ecDecrypterSigner{
privateKey: decryptionKey,
}, nil
case []byte:
return &symmetricKeyCipher{
key: decryptionKey,
}, nil
case *JsonWebKey:
return newDecrypter(decryptionKey.Key)
default:
return nil, ErrUnsupportedKeyType
}
}
// Implementation of encrypt method producing a JWE object.
func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JsonWebEncryption, error) {
return ctx.EncryptWithAuthData(plaintext, nil)
}
// Implementation of encrypt method producing a JWE object.
func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JsonWebEncryption, error) {
obj := &JsonWebEncryption{}
obj.aad = aad
obj.protected = &rawHeader{
Enc: ctx.contentAlg,
}
obj.recipients = make([]recipientInfo, len(ctx.recipients))
if len(ctx.recipients) == 0 {
return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to")
}
cek, headers, err := ctx.keyGenerator.genKey()
if err != nil {
return nil, err
}
obj.protected.merge(&headers)
for i, info := range ctx.recipients {
recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
if err != nil {
return nil, err
}
recipient.header.Alg = string(info.keyAlg)
if info.keyID != "" {
recipient.header.Kid = info.keyID
}
obj.recipients[i] = recipient
}
if len(ctx.recipients) == 1 {
// Move per-recipient headers into main protected header if there's
// only a single recipient.
obj.protected.merge(obj.recipients[0].header)
obj.recipients[0].header = nil
}
if ctx.compressionAlg != NONE {
plaintext, err = compress(ctx.compressionAlg, plaintext)
if err != nil {
return nil, err
}
obj.protected.Zip = ctx.compressionAlg
}
authData := obj.computeAuthData()
parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
if err != nil {
return nil, err
}
obj.iv = parts.iv
obj.ciphertext = parts.ciphertext
obj.tag = parts.tag
return obj, nil
}
// Decrypt and validate the object and return the plaintext.
func (obj JsonWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
headers := obj.mergedHeaders(nil)
if len(headers.Crit) > 0 {
return nil, fmt.Errorf("square/go-jose: unsupported crit header")
}
decrypter, err := newDecrypter(decryptionKey)
if err != nil {
return nil, err
}
cipher := getContentCipher(headers.Enc)
if cipher == nil {
return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.Enc))
}
generator := randomKeyGenerator{
size: cipher.keySize(),
}
parts := &aeadParts{
iv: obj.iv,
ciphertext: obj.ciphertext,
tag: obj.tag,
}
authData := obj.computeAuthData()
var plaintext []byte
for _, recipient := range obj.recipients {
recipientHeaders := obj.mergedHeaders(&recipient)
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
if err == nil {
// Found a valid CEK -- let's try to decrypt.
plaintext, err = cipher.decrypt(cek, authData, parts)
if err == nil {
break
}
}
}
if plaintext == nil {
return nil, ErrCryptoFailure
}
// The "zip" header paramter may only be present in the protected header.
if obj.protected.Zip != "" {
plaintext, err = decompress(obj.protected.Zip, plaintext)
}
return plaintext, err
}

26
vendor/gopkg.in/square/go-jose.v1/doc.go generated vendored Normal file
View file

@ -0,0 +1,26 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Package jose aims to provide an implementation of the Javascript Object Signing
and Encryption set of standards. For the moment, it mainly focuses on
encryption and signing based on the JSON Web Encryption and JSON Web Signature
standards. The library supports both the compact and full serialization
formats, and has optional support for multiple recipients.
*/
package jose // import "gopkg.in/square/go-jose.v1"

191
vendor/gopkg.in/square/go-jose.v1/encoding.go generated vendored Normal file
View file

@ -0,0 +1,191 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"bytes"
"compress/flate"
"encoding/base64"
"encoding/binary"
"io"
"math/big"
"regexp"
"strings"
)
var stripWhitespaceRegex = regexp.MustCompile("\\s")
// Url-safe base64 encode that strips padding
func base64URLEncode(data []byte) string {
var result = base64.URLEncoding.EncodeToString(data)
return strings.TrimRight(result, "=")
}
// Url-safe base64 decoder that adds padding
func base64URLDecode(data string) ([]byte, error) {
var missing = (4 - len(data)%4) % 4
data += strings.Repeat("=", missing)
return base64.URLEncoding.DecodeString(data)
}
// Helper function to serialize known-good objects.
// Precondition: value is not a nil pointer.
func mustSerializeJSON(value interface{}) []byte {
out, err := MarshalJSON(value)
if err != nil {
panic(err)
}
// We never want to serialize the top-level value "null," since it's not a
// valid JOSE message. But if a caller passes in a nil pointer to this method,
// MarshalJSON will happily serialize it as the top-level value "null". If
// that value is then embedded in another operation, for instance by being
// base64-encoded and fed as input to a signing algorithm
// (https://github.com/square/go-jose/issues/22), the result will be
// incorrect. Because this method is intended for known-good objects, and a nil
// pointer is not a known-good object, we are free to panic in this case.
// Note: It's not possible to directly check whether the data pointed at by an
// interface is a nil pointer, so we do this hacky workaround.
// https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
if string(out) == "null" {
panic("Tried to serialize a nil pointer.")
}
return out
}
// Strip all newlines and whitespace
func stripWhitespace(data string) string {
return stripWhitespaceRegex.ReplaceAllString(data, "")
}
// Perform compression based on algorithm
func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
switch algorithm {
case DEFLATE:
return deflate(input)
default:
return nil, ErrUnsupportedAlgorithm
}
}
// Perform decompression based on algorithm
func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
switch algorithm {
case DEFLATE:
return inflate(input)
default:
return nil, ErrUnsupportedAlgorithm
}
}
// Compress with DEFLATE
func deflate(input []byte) ([]byte, error) {
output := new(bytes.Buffer)
// Writing to byte buffer, err is always nil
writer, _ := flate.NewWriter(output, 1)
_, _ = io.Copy(writer, bytes.NewBuffer(input))
err := writer.Close()
return output.Bytes(), err
}
// Decompress with DEFLATE
func inflate(input []byte) ([]byte, error) {
output := new(bytes.Buffer)
reader := flate.NewReader(bytes.NewBuffer(input))
_, err := io.Copy(output, reader)
if err != nil {
return nil, err
}
err = reader.Close()
return output.Bytes(), err
}
// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
type byteBuffer struct {
data []byte
}
func newBuffer(data []byte) *byteBuffer {
if data == nil {
return nil
}
return &byteBuffer{
data: data,
}
}
func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
if len(data) > length {
panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
}
pad := make([]byte, length-len(data))
return newBuffer(append(pad, data...))
}
func newBufferFromInt(num uint64) *byteBuffer {
data := make([]byte, 8)
binary.BigEndian.PutUint64(data, num)
return newBuffer(bytes.TrimLeft(data, "\x00"))
}
func (b *byteBuffer) MarshalJSON() ([]byte, error) {
return MarshalJSON(b.base64())
}
func (b *byteBuffer) UnmarshalJSON(data []byte) error {
var encoded string
err := UnmarshalJSON(data, &encoded)
if err != nil {
return err
}
if encoded == "" {
return nil
}
decoded, err := base64URLDecode(encoded)
if err != nil {
return err
}
*b = *newBuffer(decoded)
return nil
}
func (b *byteBuffer) base64() string {
return base64URLEncode(b.data)
}
func (b *byteBuffer) bytes() []byte {
// Handling nil here allows us to transparently handle nil slices when serializing.
if b == nil {
return nil
}
return b.data
}
func (b byteBuffer) bigInt() *big.Int {
return new(big.Int).SetBytes(b.data)
}
func (b byteBuffer) toInt() int {
return int(b.bigInt().Int64())
}

27
vendor/gopkg.in/square/go-jose.v1/json/LICENSE generated vendored Normal file
View file

@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

1183
vendor/gopkg.in/square/go-jose.v1/json/decode.go generated vendored Normal file

File diff suppressed because it is too large Load diff

1197
vendor/gopkg.in/square/go-jose.v1/json/encode.go generated vendored Normal file

File diff suppressed because it is too large Load diff

141
vendor/gopkg.in/square/go-jose.v1/json/indent.go generated vendored Normal file
View file

@ -0,0 +1,141 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import "bytes"
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
var scan scanner
scan.reset()
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(&scan, c)
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, to make it easier to embed inside other formatted JSON data.
// Although leading space characters (space, tab, carriage return, newline)
// at the beginning of src are dropped, trailing space characters
// at the end of src are preserved and copied to dst.
// For example, if src has no trailing spaces, neither will dst;
// if src ends in a trailing newline, so will dst.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
var scan scanner
scan.reset()
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(&scan, c)
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}

623
vendor/gopkg.in/square/go-jose.v1/json/scanner.go generated vendored Normal file
View file

@ -0,0 +1,623 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, nextValue, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import "strconv"
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, c) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// nextValue splits data after the next whole JSON value,
// returning that value and the bytes that follow it as separate slices.
// scan is passed in for use by nextValue to avoid an allocation.
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
scan.reset()
for i, c := range data {
v := scan.step(scan, c)
if v >= scanEndObject {
switch v {
// probe the scanner with a space to determine whether we will
// get scanEnd on the next character. Otherwise, if the next character
// is not a space, scanEndTop allocates a needless error.
case scanEndObject, scanEndArray:
if scan.step(scan, ' ') == scanEnd {
return data[:i+1], data[i+1:], nil
}
case scanError:
return nil, nil, scan.err
case scanEnd:
return data[:i], data[i:], nil
}
}
}
if scan.eof() == scanError {
return nil, nil, scan.err
}
return data, nil, nil
}
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset() and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, byte) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// 1-byte redo (see undo method)
redo bool
redoCode int
redoState func(*scanner, byte) int
// total bytes consumed, updated by decoder.Decode
bytes int64
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
)
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.redo = false
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
func (s *scanner) pushParseState(p int) {
s.parseState = append(s.parseState, p)
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
s.redo = false
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c byte) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
s.pushParseState(parseObjectKey)
return scanBeginObject
case '[':
s.step = stateBeginValueOrEmpty
s.pushParseState(parseArrayValue)
return scanBeginArray
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 't': // beginning of true
s.step = stateT
return scanBeginLiteral
case 'f': // beginning of false
s.step = stateF
return scanBeginLiteral
case 'n': // beginning of null
s.step = stateN
return scanBeginLiteral
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
return s.error(c, "looking for beginning of value")
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c byte) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if c <= ' ' && isSpace(c) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginString
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValue
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c byte) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c byte) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
case 'u':
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c byte) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c byte) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c byte) int {
if c == '+' || c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
return stateEndValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c byte) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c byte) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c byte) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c byte) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c byte) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c byte) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c byte) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c byte) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c byte) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c byte, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}
// undo causes the scanner to return scanCode from the next state transition.
// This gives callers a simple 1-byte undo mechanism.
func (s *scanner) undo(scanCode int) {
if s.redo {
panic("json: invalid use of scanner")
}
s.redoCode = scanCode
s.redoState = s.step
s.step = stateRedo
s.redo = true
}
// stateRedo helps implement the scanner's 1-byte undo.
func stateRedo(s *scanner, c byte) int {
s.redo = false
s.step = s.redoState
return s.redoCode
}

480
vendor/gopkg.in/square/go-jose.v1/json/stream.go generated vendored Normal file
View file

@ -0,0 +1,480 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON objects from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v interface{}) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value"}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
for {
// Look in the buffer for a new value.
for i, c := range dec.buf[scanp:] {
dec.scan.bytes++
v := dec.scan.step(&dec.scan, c)
if v == scanEnd {
scanp += i
break Input
}
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
scanp += i + 1
break Input
}
if v == scanError {
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
scanp = len(dec.buf)
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(c) {
return true
}
}
return false
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
err error
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w}
}
// Encode writes the JSON encoding of v to the stream,
// followed by a newline character.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState()
err := e.marshal(v)
if err != nil {
return err
}
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
if _, err = enc.w.Write(e.Bytes()); err != nil {
enc.err = err
}
encodeStatePool.Put(e)
return err
}
// RawMessage is a raw encoded JSON object.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns *m as the JSON encoding of m.
func (m *RawMessage) MarshalJSON() ([]byte, error) {
return *m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types:
//
// Delim, for the four JSON delimiters [ ] { }
// bool, for JSON booleans
// float64, for JSON numbers
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
//
type Token interface{}
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", 0}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", 0}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type Delim
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
clearOffset(err)
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x interface{}
if err := dec.Decode(&x); err != nil {
clearOffset(err)
return nil, err
}
return x, nil
}
}
}
func clearOffset(err error) {
if s, ok := err.(*SyntaxError); ok {
s.Offset = 0
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(c) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
/*
TODO
// EncodeToken writes the given JSON token to the stream.
// It returns an error if the delimiters [ ] { } are not properly used.
//
// EncodeToken does not call Flush, because usually it is part of
// a larger operation such as Encode, and those will call Flush when finished.
// Callers that create an Encoder and then invoke EncodeToken directly,
// without using Encode, need to call Flush when finished to ensure that
// the JSON is written to the underlying writer.
func (e *Encoder) EncodeToken(t Token) error {
...
}
*/

44
vendor/gopkg.in/square/go-jose.v1/json/tags.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

31
vendor/gopkg.in/square/go-jose.v1/json_fork.go generated vendored Normal file
View file

@ -0,0 +1,31 @@
// +build !std_json
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"gopkg.in/square/go-jose.v1/json"
)
func MarshalJSON(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
func UnmarshalJSON(data []byte, v interface{}) error {
return json.Unmarshal(data, v)
}

31
vendor/gopkg.in/square/go-jose.v1/json_std.go generated vendored Normal file
View file

@ -0,0 +1,31 @@
// +build std_json
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"encoding/json"
)
func MarshalJSON(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
func UnmarshalJSON(data []byte, v interface{}) error {
return json.Unmarshal(data, v)
}

278
vendor/gopkg.in/square/go-jose.v1/jwe.go generated vendored Normal file
View file

@ -0,0 +1,278 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"fmt"
"strings"
)
// rawJsonWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
type rawJsonWebEncryption struct {
Protected *byteBuffer `json:"protected,omitempty"`
Unprotected *rawHeader `json:"unprotected,omitempty"`
Header *rawHeader `json:"header,omitempty"`
Recipients []rawRecipientInfo `json:"recipients,omitempty"`
Aad *byteBuffer `json:"aad,omitempty"`
EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"`
Iv *byteBuffer `json:"iv,omitempty"`
Ciphertext *byteBuffer `json:"ciphertext,omitempty"`
Tag *byteBuffer `json:"tag,omitempty"`
}
// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing.
type rawRecipientInfo struct {
Header *rawHeader `json:"header,omitempty"`
EncryptedKey string `json:"encrypted_key,omitempty"`
}
// JsonWebEncryption represents an encrypted JWE object after parsing.
type JsonWebEncryption struct {
Header JoseHeader
protected, unprotected *rawHeader
recipients []recipientInfo
aad, iv, ciphertext, tag []byte
original *rawJsonWebEncryption
}
// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing.
type recipientInfo struct {
header *rawHeader
encryptedKey []byte
}
// GetAuthData retrieves the (optional) authenticated data attached to the object.
func (obj JsonWebEncryption) GetAuthData() []byte {
if obj.aad != nil {
out := make([]byte, len(obj.aad))
copy(out, obj.aad)
return out
}
return nil
}
// Get the merged header values
func (obj JsonWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
out := rawHeader{}
out.merge(obj.protected)
out.merge(obj.unprotected)
if recipient != nil {
out.merge(recipient.header)
}
return out
}
// Get the additional authenticated data from a JWE object.
func (obj JsonWebEncryption) computeAuthData() []byte {
var protected string
if obj.original != nil {
protected = obj.original.Protected.base64()
} else {
protected = base64URLEncode(mustSerializeJSON((obj.protected)))
}
output := []byte(protected)
if obj.aad != nil {
output = append(output, '.')
output = append(output, []byte(base64URLEncode(obj.aad))...)
}
return output
}
// ParseEncrypted parses an encrypted message in compact or full serialization format.
func ParseEncrypted(input string) (*JsonWebEncryption, error) {
input = stripWhitespace(input)
if strings.HasPrefix(input, "{") {
return parseEncryptedFull(input)
}
return parseEncryptedCompact(input)
}
// parseEncryptedFull parses a message in compact format.
func parseEncryptedFull(input string) (*JsonWebEncryption, error) {
var parsed rawJsonWebEncryption
err := UnmarshalJSON([]byte(input), &parsed)
if err != nil {
return nil, err
}
return parsed.sanitized()
}
// sanitized produces a cleaned-up JWE object from the raw JSON.
func (parsed *rawJsonWebEncryption) sanitized() (*JsonWebEncryption, error) {
obj := &JsonWebEncryption{
original: parsed,
unprotected: parsed.Unprotected,
}
// Check that there is not a nonce in the unprotected headers
if (parsed.Unprotected != nil && parsed.Unprotected.Nonce != "") ||
(parsed.Header != nil && parsed.Header.Nonce != "") {
return nil, ErrUnprotectedNonce
}
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
err := UnmarshalJSON(parsed.Protected.bytes(), &obj.protected)
if err != nil {
return nil, fmt.Errorf("square/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
}
}
// Note: this must be called _after_ we parse the protected header,
// otherwise fields from the protected header will not get picked up.
obj.Header = obj.mergedHeaders(nil).sanitized()
if len(parsed.Recipients) == 0 {
obj.recipients = []recipientInfo{
recipientInfo{
header: parsed.Header,
encryptedKey: parsed.EncryptedKey.bytes(),
},
}
} else {
obj.recipients = make([]recipientInfo, len(parsed.Recipients))
for r := range parsed.Recipients {
encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey)
if err != nil {
return nil, err
}
// Check that there is not a nonce in the unprotected header
if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.Nonce != "" {
return nil, ErrUnprotectedNonce
}
obj.recipients[r].header = parsed.Recipients[r].Header
obj.recipients[r].encryptedKey = encryptedKey
}
}
for _, recipient := range obj.recipients {
headers := obj.mergedHeaders(&recipient)
if headers.Alg == "" || headers.Enc == "" {
return nil, fmt.Errorf("square/go-jose: message is missing alg/enc headers")
}
}
obj.iv = parsed.Iv.bytes()
obj.ciphertext = parsed.Ciphertext.bytes()
obj.tag = parsed.Tag.bytes()
obj.aad = parsed.Aad.bytes()
return obj, nil
}
// parseEncryptedCompact parses a message in compact format.
func parseEncryptedCompact(input string) (*JsonWebEncryption, error) {
parts := strings.Split(input, ".")
if len(parts) != 5 {
return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts")
}
rawProtected, err := base64URLDecode(parts[0])
if err != nil {
return nil, err
}
encryptedKey, err := base64URLDecode(parts[1])
if err != nil {
return nil, err
}
iv, err := base64URLDecode(parts[2])
if err != nil {
return nil, err
}
ciphertext, err := base64URLDecode(parts[3])
if err != nil {
return nil, err
}
tag, err := base64URLDecode(parts[4])
if err != nil {
return nil, err
}
raw := &rawJsonWebEncryption{
Protected: newBuffer(rawProtected),
EncryptedKey: newBuffer(encryptedKey),
Iv: newBuffer(iv),
Ciphertext: newBuffer(ciphertext),
Tag: newBuffer(tag),
}
return raw.sanitized()
}
// CompactSerialize serializes an object using the compact serialization format.
func (obj JsonWebEncryption) CompactSerialize() (string, error) {
if len(obj.recipients) != 1 || obj.unprotected != nil ||
obj.protected == nil || obj.recipients[0].header != nil {
return "", ErrNotSupported
}
serializedProtected := mustSerializeJSON(obj.protected)
return fmt.Sprintf(
"%s.%s.%s.%s.%s",
base64URLEncode(serializedProtected),
base64URLEncode(obj.recipients[0].encryptedKey),
base64URLEncode(obj.iv),
base64URLEncode(obj.ciphertext),
base64URLEncode(obj.tag)), nil
}
// FullSerialize serializes an object using the full JSON serialization format.
func (obj JsonWebEncryption) FullSerialize() string {
raw := rawJsonWebEncryption{
Unprotected: obj.unprotected,
Iv: newBuffer(obj.iv),
Ciphertext: newBuffer(obj.ciphertext),
EncryptedKey: newBuffer(obj.recipients[0].encryptedKey),
Tag: newBuffer(obj.tag),
Aad: newBuffer(obj.aad),
Recipients: []rawRecipientInfo{},
}
if len(obj.recipients) > 1 {
for _, recipient := range obj.recipients {
info := rawRecipientInfo{
Header: recipient.header,
EncryptedKey: base64URLEncode(recipient.encryptedKey),
}
raw.Recipients = append(raw.Recipients, info)
}
} else {
// Use flattened serialization
raw.Header = obj.recipients[0].header
raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey)
}
if obj.protected != nil {
raw.Protected = newBuffer(mustSerializeJSON(obj.protected))
}
return string(mustSerializeJSON(raw))
}

408
vendor/gopkg.in/square/go-jose.v1/jwk.go generated vendored Normal file
View file

@ -0,0 +1,408 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"fmt"
"math/big"
"reflect"
"strings"
)
// rawJsonWebKey represents a public or private key in JWK format, used for parsing/serializing.
type rawJsonWebKey struct {
Use string `json:"use,omitempty"`
Kty string `json:"kty,omitempty"`
Kid string `json:"kid,omitempty"`
Crv string `json:"crv,omitempty"`
Alg string `json:"alg,omitempty"`
K *byteBuffer `json:"k,omitempty"`
X *byteBuffer `json:"x,omitempty"`
Y *byteBuffer `json:"y,omitempty"`
N *byteBuffer `json:"n,omitempty"`
E *byteBuffer `json:"e,omitempty"`
// -- Following fields are only used for private keys --
// RSA uses D, P and Q, while ECDSA uses only D. Fields Dp, Dq, and Qi are
// completely optional. Therefore for RSA/ECDSA, D != nil is a contract that
// we have a private key whereas D == nil means we have only a public key.
D *byteBuffer `json:"d,omitempty"`
P *byteBuffer `json:"p,omitempty"`
Q *byteBuffer `json:"q,omitempty"`
Dp *byteBuffer `json:"dp,omitempty"`
Dq *byteBuffer `json:"dq,omitempty"`
Qi *byteBuffer `json:"qi,omitempty"`
}
// JsonWebKey represents a public or private key in JWK format.
type JsonWebKey struct {
Key interface{}
KeyID string
Algorithm string
Use string
}
// MarshalJSON serializes the given key to its JSON representation.
func (k JsonWebKey) MarshalJSON() ([]byte, error) {
var raw *rawJsonWebKey
var err error
switch key := k.Key.(type) {
case *ecdsa.PublicKey:
raw, err = fromEcPublicKey(key)
case *rsa.PublicKey:
raw = fromRsaPublicKey(key)
case *ecdsa.PrivateKey:
raw, err = fromEcPrivateKey(key)
case *rsa.PrivateKey:
raw, err = fromRsaPrivateKey(key)
case []byte:
raw, err = fromSymmetricKey(key)
default:
return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
}
if err != nil {
return nil, err
}
raw.Kid = k.KeyID
raw.Alg = k.Algorithm
raw.Use = k.Use
return MarshalJSON(raw)
}
// UnmarshalJSON reads a key from its JSON representation.
func (k *JsonWebKey) UnmarshalJSON(data []byte) (err error) {
var raw rawJsonWebKey
err = UnmarshalJSON(data, &raw)
if err != nil {
return err
}
var key interface{}
switch raw.Kty {
case "EC":
if raw.D != nil {
key, err = raw.ecPrivateKey()
} else {
key, err = raw.ecPublicKey()
}
case "RSA":
if raw.D != nil {
key, err = raw.rsaPrivateKey()
} else {
key, err = raw.rsaPublicKey()
}
case "oct":
key, err = raw.symmetricKey()
default:
err = fmt.Errorf("square/go-jose: unknown json web key type '%s'", raw.Kty)
}
if err == nil {
*k = JsonWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use}
}
return
}
// JsonWebKeySet represents a JWK Set object.
type JsonWebKeySet struct {
Keys []JsonWebKey `json:"keys"`
}
// Key convenience method returns keys by key ID. Specification states
// that a JWK Set "SHOULD" use distinct key IDs, but allows for some
// cases where they are not distinct. Hence method returns a slice
// of JsonWebKeys.
func (s *JsonWebKeySet) Key(kid string) []JsonWebKey {
var keys []JsonWebKey
for _, key := range s.Keys {
if key.KeyID == kid {
keys = append(keys, key)
}
}
return keys
}
const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
coordLength := curveSize(curve)
crv, err := curveName(curve)
if err != nil {
return "", err
}
return fmt.Sprintf(ecThumbprintTemplate, crv,
newFixedSizeBuffer(x.Bytes(), coordLength).base64(),
newFixedSizeBuffer(y.Bytes(), coordLength).base64()), nil
}
func rsaThumbprintInput(n *big.Int, e int) (string, error) {
return fmt.Sprintf(rsaThumbprintTemplate,
newBufferFromInt(uint64(e)).base64(),
newBuffer(n.Bytes()).base64()), nil
}
// Thumbprint computes the JWK Thumbprint of a key using the
// indicated hash algorithm.
func (k *JsonWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
var input string
var err error
switch key := k.Key.(type) {
case *ecdsa.PublicKey:
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
case *ecdsa.PrivateKey:
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
case *rsa.PublicKey:
input, err = rsaThumbprintInput(key.N, key.E)
case *rsa.PrivateKey:
input, err = rsaThumbprintInput(key.N, key.E)
default:
return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key))
}
if err != nil {
return nil, err
}
h := hash.New()
h.Write([]byte(input))
return h.Sum(nil), nil
}
// Valid checks that the key contains the expected parameters
func (k *JsonWebKey) Valid() bool {
if k.Key == nil {
return false
}
switch key := k.Key.(type) {
case *ecdsa.PublicKey:
if key.Curve == nil || key.X == nil || key.Y == nil {
return false
}
case *ecdsa.PrivateKey:
if key.Curve == nil || key.X == nil || key.Y == nil || key.D == nil {
return false
}
case *rsa.PublicKey:
if key.N == nil || key.E == 0 {
return false
}
case *rsa.PrivateKey:
if key.N == nil || key.E == 0 || key.D == nil || len(key.Primes) < 2 {
return false
}
default:
return false
}
return true
}
func (key rawJsonWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
if key.N == nil || key.E == nil {
return nil, fmt.Errorf("square/go-jose: invalid RSA key, missing n/e values")
}
return &rsa.PublicKey{
N: key.N.bigInt(),
E: key.E.toInt(),
}, nil
}
func fromRsaPublicKey(pub *rsa.PublicKey) *rawJsonWebKey {
return &rawJsonWebKey{
Kty: "RSA",
N: newBuffer(pub.N.Bytes()),
E: newBufferFromInt(uint64(pub.E)),
}
}
func (key rawJsonWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
var curve elliptic.Curve
switch key.Crv {
case "P-256":
curve = elliptic.P256()
case "P-384":
curve = elliptic.P384()
case "P-521":
curve = elliptic.P521()
default:
return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
}
if key.X == nil || key.Y == nil {
return nil, fmt.Errorf("square/go-jose: invalid EC key, missing x/y values")
}
return &ecdsa.PublicKey{
Curve: curve,
X: key.X.bigInt(),
Y: key.Y.bigInt(),
}, nil
}
func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJsonWebKey, error) {
if pub == nil || pub.X == nil || pub.Y == nil {
return nil, fmt.Errorf("square/go-jose: invalid EC key (nil, or X/Y missing)")
}
name, err := curveName(pub.Curve)
if err != nil {
return nil, err
}
size := curveSize(pub.Curve)
xBytes := pub.X.Bytes()
yBytes := pub.Y.Bytes()
if len(xBytes) > size || len(yBytes) > size {
return nil, fmt.Errorf("square/go-jose: invalid EC key (X/Y too large)")
}
key := &rawJsonWebKey{
Kty: "EC",
Crv: name,
X: newFixedSizeBuffer(xBytes, size),
Y: newFixedSizeBuffer(yBytes, size),
}
return key, nil
}
func (key rawJsonWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
var missing []string
switch {
case key.N == nil:
missing = append(missing, "N")
case key.E == nil:
missing = append(missing, "E")
case key.D == nil:
missing = append(missing, "D")
case key.P == nil:
missing = append(missing, "P")
case key.Q == nil:
missing = append(missing, "Q")
}
if len(missing) > 0 {
return nil, fmt.Errorf("square/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
}
rv := &rsa.PrivateKey{
PublicKey: rsa.PublicKey{
N: key.N.bigInt(),
E: key.E.toInt(),
},
D: key.D.bigInt(),
Primes: []*big.Int{
key.P.bigInt(),
key.Q.bigInt(),
},
}
if key.Dp != nil {
rv.Precomputed.Dp = key.Dp.bigInt()
}
if key.Dq != nil {
rv.Precomputed.Dq = key.Dq.bigInt()
}
if key.Qi != nil {
rv.Precomputed.Qinv = key.Qi.bigInt()
}
err := rv.Validate()
return rv, err
}
func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJsonWebKey, error) {
if len(rsa.Primes) != 2 {
return nil, ErrUnsupportedKeyType
}
raw := fromRsaPublicKey(&rsa.PublicKey)
raw.D = newBuffer(rsa.D.Bytes())
raw.P = newBuffer(rsa.Primes[0].Bytes())
raw.Q = newBuffer(rsa.Primes[1].Bytes())
return raw, nil
}
func (key rawJsonWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
var curve elliptic.Curve
switch key.Crv {
case "P-256":
curve = elliptic.P256()
case "P-384":
curve = elliptic.P384()
case "P-521":
curve = elliptic.P521()
default:
return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv)
}
if key.X == nil || key.Y == nil || key.D == nil {
return nil, fmt.Errorf("square/go-jose: invalid EC private key, missing x/y/d values")
}
return &ecdsa.PrivateKey{
PublicKey: ecdsa.PublicKey{
Curve: curve,
X: key.X.bigInt(),
Y: key.Y.bigInt(),
},
D: key.D.bigInt(),
}, nil
}
func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJsonWebKey, error) {
raw, err := fromEcPublicKey(&ec.PublicKey)
if err != nil {
return nil, err
}
if ec.D == nil {
return nil, fmt.Errorf("square/go-jose: invalid EC private key")
}
raw.D = newBuffer(ec.D.Bytes())
return raw, nil
}
func fromSymmetricKey(key []byte) (*rawJsonWebKey, error) {
return &rawJsonWebKey{
Kty: "oct",
K: newBuffer(key),
}, nil
}
func (key rawJsonWebKey) symmetricKey() ([]byte, error) {
if key.K == nil {
return nil, fmt.Errorf("square/go-jose: invalid OCT (symmetric) key, missing k value")
}
return key.K.bytes(), nil
}

252
vendor/gopkg.in/square/go-jose.v1/jws.go generated vendored Normal file
View file

@ -0,0 +1,252 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"fmt"
"strings"
)
// rawJsonWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
type rawJsonWebSignature struct {
Payload *byteBuffer `json:"payload,omitempty"`
Signatures []rawSignatureInfo `json:"signatures,omitempty"`
Protected *byteBuffer `json:"protected,omitempty"`
Header *rawHeader `json:"header,omitempty"`
Signature *byteBuffer `json:"signature,omitempty"`
}
// rawSignatureInfo represents a single JWS signature over the JWS payload and protected header.
type rawSignatureInfo struct {
Protected *byteBuffer `json:"protected,omitempty"`
Header *rawHeader `json:"header,omitempty"`
Signature *byteBuffer `json:"signature,omitempty"`
}
// JsonWebSignature represents a signed JWS object after parsing.
type JsonWebSignature struct {
payload []byte
Signatures []Signature
}
// Signature represents a single signature over the JWS payload and protected header.
type Signature struct {
// Header fields, such as the signature algorithm
Header JoseHeader
// The actual signature value
Signature []byte
protected *rawHeader
header *rawHeader
original *rawSignatureInfo
}
// ParseSigned parses an encrypted message in compact or full serialization format.
func ParseSigned(input string) (*JsonWebSignature, error) {
input = stripWhitespace(input)
if strings.HasPrefix(input, "{") {
return parseSignedFull(input)
}
return parseSignedCompact(input)
}
// Get a header value
func (sig Signature) mergedHeaders() rawHeader {
out := rawHeader{}
out.merge(sig.protected)
out.merge(sig.header)
return out
}
// Compute data to be signed
func (obj JsonWebSignature) computeAuthData(signature *Signature) []byte {
var serializedProtected string
if signature.original != nil && signature.original.Protected != nil {
serializedProtected = signature.original.Protected.base64()
} else if signature.protected != nil {
serializedProtected = base64URLEncode(mustSerializeJSON(signature.protected))
} else {
serializedProtected = ""
}
return []byte(fmt.Sprintf("%s.%s",
serializedProtected,
base64URLEncode(obj.payload)))
}
// parseSignedFull parses a message in full format.
func parseSignedFull(input string) (*JsonWebSignature, error) {
var parsed rawJsonWebSignature
err := UnmarshalJSON([]byte(input), &parsed)
if err != nil {
return nil, err
}
return parsed.sanitized()
}
// sanitized produces a cleaned-up JWS object from the raw JSON.
func (parsed *rawJsonWebSignature) sanitized() (*JsonWebSignature, error) {
if parsed.Payload == nil {
return nil, fmt.Errorf("square/go-jose: missing payload in JWS message")
}
obj := &JsonWebSignature{
payload: parsed.Payload.bytes(),
Signatures: make([]Signature, len(parsed.Signatures)),
}
if len(parsed.Signatures) == 0 {
// No signatures array, must be flattened serialization
signature := Signature{}
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
signature.protected = &rawHeader{}
err := UnmarshalJSON(parsed.Protected.bytes(), signature.protected)
if err != nil {
return nil, err
}
}
if parsed.Header != nil && parsed.Header.Nonce != "" {
return nil, ErrUnprotectedNonce
}
signature.header = parsed.Header
signature.Signature = parsed.Signature.bytes()
// Make a fake "original" rawSignatureInfo to store the unprocessed
// Protected header. This is necessary because the Protected header can
// contain arbitrary fields not registered as part of the spec. See
// https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
// If we unmarshal Protected into a rawHeader with its explicit list of fields,
// we cannot marshal losslessly. So we have to keep around the original bytes.
// This is used in computeAuthData, which will first attempt to use
// the original bytes of a protected header, and fall back on marshaling the
// header struct only if those bytes are not available.
signature.original = &rawSignatureInfo{
Protected: parsed.Protected,
Header: parsed.Header,
Signature: parsed.Signature,
}
signature.Header = signature.mergedHeaders().sanitized()
obj.Signatures = append(obj.Signatures, signature)
}
for i, sig := range parsed.Signatures {
if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
obj.Signatures[i].protected = &rawHeader{}
err := UnmarshalJSON(sig.Protected.bytes(), obj.Signatures[i].protected)
if err != nil {
return nil, err
}
}
// Check that there is not a nonce in the unprotected header
if sig.Header != nil && sig.Header.Nonce != "" {
return nil, ErrUnprotectedNonce
}
obj.Signatures[i].Signature = sig.Signature.bytes()
// Copy value of sig
original := sig
obj.Signatures[i].header = sig.Header
obj.Signatures[i].original = &original
obj.Signatures[i].Header = obj.Signatures[i].mergedHeaders().sanitized()
}
return obj, nil
}
// parseSignedCompact parses a message in compact format.
func parseSignedCompact(input string) (*JsonWebSignature, error) {
parts := strings.Split(input, ".")
if len(parts) != 3 {
return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts")
}
rawProtected, err := base64URLDecode(parts[0])
if err != nil {
return nil, err
}
payload, err := base64URLDecode(parts[1])
if err != nil {
return nil, err
}
signature, err := base64URLDecode(parts[2])
if err != nil {
return nil, err
}
raw := &rawJsonWebSignature{
Payload: newBuffer(payload),
Protected: newBuffer(rawProtected),
Signature: newBuffer(signature),
}
return raw.sanitized()
}
// CompactSerialize serializes an object using the compact serialization format.
func (obj JsonWebSignature) CompactSerialize() (string, error) {
if len(obj.Signatures) != 1 || obj.Signatures[0].header != nil || obj.Signatures[0].protected == nil {
return "", ErrNotSupported
}
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
return fmt.Sprintf(
"%s.%s.%s",
base64URLEncode(serializedProtected),
base64URLEncode(obj.payload),
base64URLEncode(obj.Signatures[0].Signature)), nil
}
// FullSerialize serializes an object using the full JSON serialization format.
func (obj JsonWebSignature) FullSerialize() string {
raw := rawJsonWebSignature{
Payload: newBuffer(obj.payload),
}
if len(obj.Signatures) == 1 {
if obj.Signatures[0].protected != nil {
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
raw.Protected = newBuffer(serializedProtected)
}
raw.Header = obj.Signatures[0].header
raw.Signature = newBuffer(obj.Signatures[0].Signature)
} else {
raw.Signatures = make([]rawSignatureInfo, len(obj.Signatures))
for i, signature := range obj.Signatures {
raw.Signatures[i] = rawSignatureInfo{
Header: signature.header,
Signature: newBuffer(signature.Signature),
}
if signature.protected != nil {
raw.Signatures[i].Protected = newBuffer(mustSerializeJSON(signature.protected))
}
}
}
return string(mustSerializeJSON(raw))
}

224
vendor/gopkg.in/square/go-jose.v1/shared.go generated vendored Normal file
View file

@ -0,0 +1,224 @@
/*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jose
import (
"crypto/elliptic"
"errors"
"fmt"
)
// KeyAlgorithm represents a key management algorithm.
type KeyAlgorithm string
// SignatureAlgorithm represents a signature (or MAC) algorithm.
type SignatureAlgorithm string
// ContentEncryption represents a content encryption algorithm.
type ContentEncryption string
// CompressionAlgorithm represents an algorithm used for plaintext compression.
type CompressionAlgorithm string
var (
// ErrCryptoFailure represents an error in cryptographic primitive. This
// occurs when, for example, a message had an invalid authentication tag or
// could not be decrypted.
ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive")
// ErrUnsupportedAlgorithm indicates that a selected algorithm is not
// supported. This occurs when trying to instantiate an encrypter for an
// algorithm that is not yet implemented.
ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm")
// ErrUnsupportedKeyType indicates that the given key type/format is not
// supported. This occurs when trying to instantiate an encrypter and passing
// it a key of an unrecognized type or with unsupported parameters, such as
// an RSA private key with more than two primes.
ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format")
// ErrNotSupported serialization of object is not supported. This occurs when
// trying to compact-serialize an object which can't be represented in
// compact form.
ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object")
// ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a
// nonce header parameter was included in an unprotected header object.
ErrUnprotectedNonce = errors.New("square/go-jose: Nonce parameter included in unprotected header")
)
// Key management algorithms
const (
RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5
RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1
RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256
A128KW = KeyAlgorithm("A128KW") // AES key wrap (128)
A192KW = KeyAlgorithm("A192KW") // AES key wrap (192)
A256KW = KeyAlgorithm("A256KW") // AES key wrap (256)
DIRECT = KeyAlgorithm("dir") // Direct encryption
ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES
ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128)
ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192)
ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256)
A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128)
A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192)
A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256)
PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128)
PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192)
PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256)
)
// Signature algorithms
const (
HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256
HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384
HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512
RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256
RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384
RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512
ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256
ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384
ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512
PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256
PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384
PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512
)
// Content encryption algorithms
const (
A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128)
A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192)
A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256)
A128GCM = ContentEncryption("A128GCM") // AES-GCM (128)
A192GCM = ContentEncryption("A192GCM") // AES-GCM (192)
A256GCM = ContentEncryption("A256GCM") // AES-GCM (256)
)
// Compression algorithms
const (
NONE = CompressionAlgorithm("") // No compression
DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951)
)
// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
type rawHeader struct {
Alg string `json:"alg,omitempty"`
Enc ContentEncryption `json:"enc,omitempty"`
Zip CompressionAlgorithm `json:"zip,omitempty"`
Crit []string `json:"crit,omitempty"`
Apu *byteBuffer `json:"apu,omitempty"`
Apv *byteBuffer `json:"apv,omitempty"`
Epk *JsonWebKey `json:"epk,omitempty"`
Iv *byteBuffer `json:"iv,omitempty"`
Tag *byteBuffer `json:"tag,omitempty"`
Jwk *JsonWebKey `json:"jwk,omitempty"`
Kid string `json:"kid,omitempty"`
Nonce string `json:"nonce,omitempty"`
}
// JoseHeader represents the read-only JOSE header for JWE/JWS objects.
type JoseHeader struct {
KeyID string
JsonWebKey *JsonWebKey
Algorithm string
Nonce string
}
// sanitized produces a cleaned-up header object from the raw JSON.
func (parsed rawHeader) sanitized() JoseHeader {
return JoseHeader{
KeyID: parsed.Kid,
JsonWebKey: parsed.Jwk,
Algorithm: parsed.Alg,
Nonce: parsed.Nonce,
}
}
// Merge headers from src into dst, giving precedence to headers from l.
func (dst *rawHeader) merge(src *rawHeader) {
if src == nil {
return
}
if dst.Alg == "" {
dst.Alg = src.Alg
}
if dst.Enc == "" {
dst.Enc = src.Enc
}
if dst.Zip == "" {
dst.Zip = src.Zip
}
if dst.Crit == nil {
dst.Crit = src.Crit
}
if dst.Crit == nil {
dst.Crit = src.Crit
}
if dst.Apu == nil {
dst.Apu = src.Apu
}
if dst.Apv == nil {
dst.Apv = src.Apv
}
if dst.Epk == nil {
dst.Epk = src.Epk
}
if dst.Iv == nil {
dst.Iv = src.Iv
}
if dst.Tag == nil {
dst.Tag = src.Tag
}
if dst.Kid == "" {
dst.Kid = src.Kid
}
if dst.Jwk == nil {
dst.Jwk = src.Jwk
}
if dst.Nonce == "" {
dst.Nonce = src.Nonce
}
}
// Get JOSE name of curve
func curveName(crv elliptic.Curve) (string, error) {
switch crv {
case elliptic.P256():
return "P-256", nil
case elliptic.P384():
return "P-384", nil
case elliptic.P521():
return "P-521", nil
default:
return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve")
}
}
// Get size of curve in bytes
func curveSize(crv elliptic.Curve) int {
bits := crv.Params().BitSize
div := bits / 8
mod := bits % 8
if mod == 0 {
return div
}
return div + 1
}

Some files were not shown because too many files have changed in this diff Show more