Merge v1.2.1-master

Signed-off-by: Emile Vauge <emile@vauge.com>
This commit is contained in:
Emile Vauge 2017-04-11 17:10:46 +02:00
parent a590155b0b
commit aeb17182b4
No known key found for this signature in database
GPG key ID: D808B4C167352E59
396 changed files with 27271 additions and 9969 deletions

21
vendor/github.com/ArthurHlt/go-eureka-client/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Arthur Halet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,11 +1,12 @@
package eureka
import (
"encoding/xml"
"strings"
)
func (c *Client) GetApplications() (*Applications, error) {
response, err := c.Get("apps");
response, err := c.Get("apps")
if err != nil {
return nil, err
}
@ -17,7 +18,7 @@ func (c *Client) GetApplications() (*Applications, error) {
func (c *Client) GetApplication(appId string) (*Application, error) {
values := []string{"apps", appId}
path := strings.Join(values, "/")
response, err := c.Get(path);
response, err := c.Get(path)
if err != nil {
return nil, err
}
@ -25,14 +26,39 @@ func (c *Client) GetApplication(appId string) (*Application, error) {
err = xml.Unmarshal(response.Body, application)
return application, err
}
func (c *Client) GetInstance(appId, instanceId string) (*InstanceInfo, error) {
values := []string{"apps", appId, instanceId}
path := strings.Join(values, "/")
response, err := c.Get(path);
response, err := c.Get(path)
if err != nil {
return nil, err
}
var instance *InstanceInfo = new(InstanceInfo)
err = xml.Unmarshal(response.Body, instance)
return instance, err
}
}
func (c *Client) GetVIP(vipId string) (*Applications, error) {
values := []string{"vips", vipId}
path := strings.Join(values, "/")
response, err := c.Get(path)
if err != nil {
return nil, err
}
var applications *Applications = new(Applications)
err = xml.Unmarshal(response.Body, applications)
return applications, err
}
func (c *Client) GetSVIP(svipId string) (*Applications, error) {
values := []string{"svips", svipId}
path := strings.Join(values, "/")
response, err := c.Get(path)
if err != nil {
return nil, err
}
var applications *Applications = new(Applications)
err = xml.Unmarshal(response.Body, applications)
return applications, err
}

View file

@ -67,7 +67,7 @@ type InstanceInfo struct {
type DataCenterInfo struct {
Name string `xml:"name" json:"name"`
Class string `xml:"class,attr" json:"@class"`
Metadata DataCenterMetadata `xml:"metadata,omitempty" json:"metadata,omitempty"`
Metadata *DataCenterMetadata `xml:"metadata,omitempty" json:"metadata,omitempty"`
}
type DataCenterMetadata struct {
@ -106,6 +106,8 @@ func NewRawRequest(method, relativePath string, body []byte, cancel <-chan bool)
func NewInstanceInfo(hostName, app, ip string, port int, ttl uint, isSsl bool) *InstanceInfo {
dataCenterInfo := &DataCenterInfo{
Name: "MyOwn",
Class: "com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo",
Metadata: nil,
}
leaseInfo := &LeaseInfo{
EvictionDurationInSecs: ttl,

View file

@ -1,26 +1,26 @@
package gominlog
import (
"fmt"
"github.com/daviddengcn/go-colortext"
"io"
"log"
"os"
"regexp"
"fmt"
"runtime"
"github.com/fatih/color"
"regexp"
"strings"
"io"
)
type Level int
const (
Loff = Level(^uint(0) >> 1)
Lsevere = Level(1000)
Lerror = Level(900)
Loff = Level(^uint(0) >> 1)
Lsevere = Level(1000)
Lerror = Level(900)
Lwarning = Level(800)
Linfo = Level(700)
Ldebug = Level(600)
Lall = Level(-Loff - 1)
Linfo = Level(700)
Ldebug = Level(600)
Lall = Level(-Loff - 1)
)
type MinLog struct {
@ -54,6 +54,14 @@ func NewMinLog(appName string, level Level, withColor bool, flag int) *MinLog {
minLog.level = level
return minLog
}
func NewMinLogWithWriter(appName string, level Level, withColor bool, flag int, logWriter io.Writer) *MinLog {
minLog := &MinLog{}
minLog.log = log.New(logWriter, "", flag)
minLog.isColorized = withColor
minLog.packageName = appName
minLog.level = level
return minLog
}
func NewMinLogWithLogger(packageName string, level Level, withColor bool, logger *log.Logger) *MinLog {
minLog := &MinLog{}
minLog.log = logger
@ -89,10 +97,11 @@ func (this *MinLog) IsColorized() bool {
return this.isColorized
}
func (this *MinLog) GetLogger() *log.Logger {
return this.log
}
func (this *MinLog) logMessage(typeLog string, colorFg ct.Color, colorBg ct.Color, args ...interface{}) {
func (this *MinLog) logMessage(typeLog string, colorFg color.Attribute, colorBg color.Attribute, args ...interface{}) {
var text string
msg := ""
flags := this.log.Flags()
@ -100,7 +109,7 @@ func (this *MinLog) logMessage(typeLog string, colorFg ct.Color, colorBg ct.Colo
msg += this.trace()
this.log.SetFlags(flags - log.Lshortfile)
}
text, ok := args[0].(string)
text, ok := args[0].(string);
if !ok {
panic("Firt argument should be a string")
}
@ -113,51 +122,47 @@ func (this *MinLog) logMessage(typeLog string, colorFg ct.Color, colorBg ct.Colo
this.writeMsgInLogger(msg, colorFg, colorBg)
this.log.SetFlags(flags)
}
func (this *MinLog) writeMsgInLogger(msg string, colorFg ct.Color, colorBg ct.Color) {
if this.isColorized && colorFg > 0 {
ct.Foreground(colorFg, false)
}
if this.isColorized && colorBg > 0 {
ct.ChangeColor(colorFg, false, colorBg, false)
func (this *MinLog) writeMsgInLogger(msg string, colorFg color.Attribute, colorBg color.Attribute) {
if this.isColorized && int(colorBg) == 0 {
msg = color.New(colorFg).Sprint(msg)
} else if this.isColorized {
msg = color.New(colorFg, colorBg).Sprint(msg)
}
this.log.Print(msg)
if this.isColorized {
ct.ResetColor()
}
}
func (this *MinLog) Error(args ...interface{}) {
if this.level > Lerror {
return
}
this.logMessage("ERROR", ct.Red, 0, args...)
this.logMessage("ERROR", color.FgRed, 0, args...)
}
func (this *MinLog) Severe(args ...interface{}) {
if this.level > Lsevere {
return
}
this.logMessage("SEVERE", ct.Red, ct.Yellow, args...)
this.logMessage("SEVERE", color.FgRed, color.BgYellow, args...)
}
func (this *MinLog) Debug(args ...interface{}) {
if this.level > Ldebug {
return
}
this.logMessage("DEBUG", ct.Blue, 0, args...)
this.logMessage("DEBUG", color.FgBlue, 0, args...)
}
func (this *MinLog) Info(args ...interface{}) {
if this.level > Linfo {
return
}
this.logMessage("INFO", ct.Cyan, 0, args...)
this.logMessage("INFO", color.FgCyan, 0, args...)
}
func (this *MinLog) Warning(args ...interface{}) {
if this.level > Lwarning {
return
}
this.logMessage("WARNING", ct.Yellow, 0, args...)
this.logMessage("WARNING", color.FgYellow, 0, args...)
}
func (this *MinLog) trace() string {
var shortFile string
@ -167,7 +172,7 @@ func (this *MinLog) trace() string {
file, line := f.FileLine(pc[2])
if this.packageName == "" {
execFileSplit := strings.Split(os.Args[0], "/")
this.packageName = execFileSplit[len(execFileSplit)-1]
this.packageName = execFileSplit[len(execFileSplit) - 1]
}
regex, err := regexp.Compile(regexp.QuoteMeta(this.packageName) + "/(.*)")
if err != nil {
@ -176,10 +181,10 @@ func (this *MinLog) trace() string {
subMatch := regex.FindStringSubmatch(file)
if len(subMatch) < 2 {
fileSplit := strings.Split(file, "/")
shortFile = fileSplit[len(fileSplit)-1]
shortFile = fileSplit[len(fileSplit) - 1]
} else {
shortFile = subMatch[1]
}
return fmt.Sprintf("/%s/%s:%d ", this.packageName, shortFile, line)
}
}

View file

@ -4,7 +4,7 @@ files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/mojombo/toml
The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the

View file

@ -241,7 +241,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra new line between top-level tables.
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}

View file

@ -30,24 +30,28 @@ const (
itemArrayTableEnd
itemKeyStart
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
const (
eof = 0
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
arrayValTerm = ','
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
@ -56,11 +60,18 @@ type lexer struct {
input string
start int
pos int
width int
line int
state stateFn
items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
@ -88,7 +99,7 @@ func (lx *lexer) nextItem() item {
func lex(input string) *lexer {
lx := &lexer{
input: input + "\n",
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
@ -103,7 +114,7 @@ func (lx *lexer) push(state stateFn) {
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop.")
return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
@ -125,16 +136,25 @@ func (lx *lexer) emitTrim(typ itemType) {
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.width = 0
lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.pos += lx.width
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r
}
@ -143,9 +163,20 @@ func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only once per call of next.
// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
lx.pos -= lx.width
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
@ -182,7 +213,7 @@ func (lx *lexer) skip(pred func(rune) bool) {
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (new lines, tabs, etc.).
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
@ -198,7 +229,6 @@ func lexTop(lx *lexer) stateFn {
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
@ -207,7 +237,7 @@ func lexTop(lx *lexer) stateFn {
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("Unexpected EOF.")
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
@ -222,12 +252,12 @@ func lexTop(lx *lexer) stateFn {
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a new line. If it sees EOF, it will quit the lexer successfully.
// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a new line for us.
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
@ -236,11 +266,11 @@ func lexTopEnd(lx *lexer) stateFn {
lx.ignore()
return lexTop
case r == eof:
lx.ignore()
return lexTop
lx.emit(itemEOF)
return nil
}
return lx.errorf("Expected a top-level item to end with a new line, "+
"comment or EOF, but got %q instead.", r)
return lx.errorf("expected a top-level item to end with a newline, "+
"comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
@ -267,8 +297,8 @@ func lexTableEnd(lx *lexer) stateFn {
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("Expected end of table array name delimiter %q, "+
"but got %q instead.", arrayTableEnd, r)
return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
@ -278,11 +308,11 @@ func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("Unexpected end of table name. (Table names cannot " +
"be empty.)")
return lx.errorf("unexpected end of table name " +
"(table names cannot be empty)")
case r == tableSep:
return lx.errorf("Unexpected table separator. (Table names cannot " +
"be empty.)")
return lx.errorf("unexpected table separator " +
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
@ -317,8 +347,8 @@ func lexTableNameEnd(lx *lexer) stateFn {
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
"instead.", r)
return lx.errorf("expected '.' or ']' to end table name, "+
"but got %q instead", r)
}
}
@ -328,7 +358,7 @@ func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("Unexpected key separator %q.", keySep)
return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
@ -359,7 +389,7 @@ func lexBareKey(lx *lexer) stateFn {
lx.emit(itemText)
return lexKeyEnd
default:
return lx.errorf("Bare keys cannot contain %q.", r)
return lx.errorf("bare keys cannot contain %q", r)
}
}
@ -372,7 +402,7 @@ func lexKeyEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("Expected key separator %q, but got %q instead.",
return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
@ -381,9 +411,8 @@ func lexKeyEnd(lx *lexer) stateFn {
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT new lines.
// In array syntax, the array states are responsible for ignoring new
// lines.
// We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
switch {
case isWhitespace(r):
@ -397,6 +426,10 @@ func lexValue(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
@ -420,7 +453,7 @@ func lexValue(lx *lexer) stateFn {
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("Floats must start with a digit, not '.'.")
return lx.errorf("floats must start with a digit, not '.'")
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
@ -430,11 +463,11 @@ func lexValue(lx *lexer) stateFn {
lx.backup()
return lexBool
}
return lx.errorf("Expected value but found %q instead.", r)
return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and new lines are ignored.
// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
@ -443,10 +476,11 @@ func lexArrayValue(lx *lexer) stateFn {
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == arrayValTerm:
return lx.errorf("Unexpected array value terminator %q.",
arrayValTerm)
case r == comma:
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
@ -455,8 +489,9 @@ func lexArrayValue(lx *lexer) stateFn {
return lexValue
}
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
// it ignores whitespace and expects either a ',' or a ']'.
// lexArrayValueEnd consumes everything between the end of an array value and
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
@ -465,31 +500,88 @@ func lexArrayValueEnd(lx *lexer) stateFn {
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == arrayValTerm:
case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf("Expected an array value terminator %q or an array "+
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
return lx.errorf(
"expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
}
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
// just been consumed.
// lexArrayEnd finishes the lexing of an array.
// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("Strings cannot contain new lines.")
return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
@ -506,11 +598,12 @@ func lexString(lx *lexer) stateFn {
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == '\\':
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape
case r == stringEnd:
case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
@ -534,8 +627,10 @@ func lexMultilineString(lx *lexer) stateFn {
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("Strings cannot contain new lines.")
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
@ -547,12 +642,13 @@ func lexRawString(lx *lexer) stateFn {
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'" has already been consumed and
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == rawStringEnd:
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
@ -605,10 +701,9 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("Invalid escape character %q. Only the following "+
return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
"\\uXXXX and \\UXXXXXXXX.", r)
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
@ -616,8 +711,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf("Expected four hexadecimal digits after '\\u', "+
"but got '%s' instead.", lx.current())
return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
@ -628,8 +723,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
"but got '%s' instead.", lx.current())
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
@ -647,9 +742,9 @@ func lexNumberOrDateStart(lx *lexer) stateFn {
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("Floats must start with a digit, not '.'.")
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("Expected a digit but got %q.", r)
return lx.errorf("expected a digit but got %q", r)
}
// lexNumberOrDate consumes either an integer, float or datetime.
@ -697,9 +792,9 @@ func lexNumberStart(lx *lexer) stateFn {
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("Floats must start with a digit, not '.'.")
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("Expected a digit but got %q.", r)
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
@ -745,7 +840,7 @@ func lexBool(lx *lexer) stateFn {
var rs []rune
for {
r := lx.next()
if r == eof || isWhitespace(r) || isNL(r) {
if !unicode.IsLetter(r) {
lx.backup()
break
}
@ -757,7 +852,7 @@ func lexBool(lx *lexer) stateFn {
lx.emit(itemBool)
return lx.pop()
}
return lx.errorf("Expected value but found %q instead.", s)
return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
@ -769,7 +864,7 @@ func lexCommentStart(lx *lexer) stateFn {
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first new line character, and pass control
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()

View file

@ -269,6 +269,41 @@ func (p *parser) value(it item) (interface{}, tomlType) {
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")

38
vendor/github.com/JamesClonk/vultr/lib/applications.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package lib
import (
"sort"
"strings"
)
// Application on Vultr
type Application struct {
ID string `json:"APPID"`
Name string `json:"name"`
ShortName string `json:"short_name"`
DeployName string `json:"deploy_name"`
Surcharge float64 `json:"surcharge"`
}
type applications []Application
func (s applications) Len() int { return len(s) }
func (s applications) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s applications) Less(i, j int) bool {
return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name)
}
// GetApplications returns a list of all available applications on Vultr
func (c *Client) GetApplications() ([]Application, error) {
var appMap map[string]Application
if err := c.get(`app/list`, &appMap); err != nil {
return nil, err
}
var appList []Application
for _, app := range appMap {
appList = append(appList, app)
}
sort.Sort(applications(appList))
return appList, nil
}

View file

@ -4,7 +4,9 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strconv"
"strings"
)
// BlockStorage on Vultr account
@ -19,6 +21,25 @@ type BlockStorage struct {
AttachedTo string `json:"attached_to_SUBID"`
}
type blockstorages []BlockStorage
func (b blockstorages) Len() int { return len(b) }
func (b blockstorages) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b blockstorages) Less(i, j int) bool {
// sort order: name, size, status
if strings.ToLower(b[i].Name) < strings.ToLower(b[j].Name) {
return true
} else if strings.ToLower(b[i].Name) > strings.ToLower(b[j].Name) {
return false
}
if b[i].SizeGB < b[j].SizeGB {
return true
} else if b[i].SizeGB > b[j].SizeGB {
return false
}
return b[i].Status < b[j].Status
}
// UnmarshalJSON implements json.Unmarshaller on BlockStorage.
// This is needed because the Vultr API is inconsistent in it's JSON responses.
// Some fields can change type, from JSON number to JSON string and vice-versa.
@ -87,6 +108,7 @@ func (c *Client) GetBlockStorages() (storages []BlockStorage, err error) {
if err := c.get(`block/list`, &storages); err != nil {
return nil, err
}
sort.Sort(blockstorages(storages))
return storages, nil
}

View file

@ -18,7 +18,7 @@ import (
const (
// Version of this libary
Version = "1.12.0"
Version = "1.13.0"
// APIVersion of Vultr
APIVersion = "v1"

View file

@ -3,6 +3,8 @@ package lib
import (
"fmt"
"net/url"
"sort"
"strings"
)
// DNSDomain represents a DNS domain on Vultr
@ -11,6 +13,14 @@ type DNSDomain struct {
Created string `json:"date_created"`
}
type dnsdomains []DNSDomain
func (d dnsdomains) Len() int { return len(d) }
func (d dnsdomains) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d dnsdomains) Less(i, j int) bool {
return strings.ToLower(d[i].Domain) < strings.ToLower(d[j].Domain)
}
// DNSRecord represents a DNS record on Vultr
type DNSRecord struct {
RecordID int `json:"RECORDID"`
@ -21,20 +31,41 @@ type DNSRecord struct {
TTL int `json:"ttl"`
}
type dnsrecords []DNSRecord
func (d dnsrecords) Len() int { return len(d) }
func (d dnsrecords) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d dnsrecords) Less(i, j int) bool {
// sort order: type, data, name
if d[i].Type < d[j].Type {
return true
} else if d[i].Type > d[j].Type {
return false
}
if d[i].Data < d[j].Data {
return true
} else if d[i].Data > d[j].Data {
return false
}
return strings.ToLower(d[i].Name) < strings.ToLower(d[j].Name)
}
// GetDNSDomains returns a list of available domains on Vultr account
func (c *Client) GetDNSDomains() (dnsdomains []DNSDomain, err error) {
if err := c.get(`dns/list`, &dnsdomains); err != nil {
func (c *Client) GetDNSDomains() (domains []DNSDomain, err error) {
if err := c.get(`dns/list`, &domains); err != nil {
return nil, err
}
return dnsdomains, nil
sort.Sort(dnsdomains(domains))
return domains, nil
}
// GetDNSRecords returns a list of all DNS records of a particular domain
func (c *Client) GetDNSRecords(domain string) (dnsrecords []DNSRecord, err error) {
if err := c.get(`dns/records?domain=`+domain, &dnsrecords); err != nil {
func (c *Client) GetDNSRecords(domain string) (records []DNSRecord, err error) {
if err := c.get(`dns/records?domain=`+domain, &records); err != nil {
return nil, err
}
return dnsrecords, nil
sort.Sort(dnsrecords(records))
return records, nil
}
// CreateDNSDomain creates a new DNS domain name on Vultr

View file

@ -1,6 +1,9 @@
package lib
import "net/url"
import (
"net/url"
"sort"
)
// IPv4 information of a virtual machine
type IPv4 struct {
@ -11,6 +14,20 @@ type IPv4 struct {
ReverseDNS string `json:"reverse"`
}
type ipv4s []IPv4
func (s ipv4s) Len() int { return len(s) }
func (s ipv4s) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ipv4s) Less(i, j int) bool {
// sort order: type, ip
if s[i].Type < s[j].Type {
return true
} else if s[i].Type > s[j].Type {
return false
}
return s[i].IP < s[j].IP
}
// IPv6 information of a virtual machine
type IPv6 struct {
IP string `json:"ip"`
@ -19,12 +36,32 @@ type IPv6 struct {
Type string `json:"type"`
}
type ipv6s []IPv6
func (s ipv6s) Len() int { return len(s) }
func (s ipv6s) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ipv6s) Less(i, j int) bool {
// sort order: type, ip
if s[i].Type < s[j].Type {
return true
} else if s[i].Type > s[j].Type {
return false
}
return s[i].IP < s[j].IP
}
// ReverseDNSIPv6 information of a virtual machine
type ReverseDNSIPv6 struct {
IP string `json:"ip"`
ReverseDNS string `json:"reverse"`
}
type reverseDNSIPv6s []ReverseDNSIPv6
func (s reverseDNSIPv6s) Len() int { return len(s) }
func (s reverseDNSIPv6s) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s reverseDNSIPv6s) Less(i, j int) bool { return s[i].IP < s[j].IP }
// ListIPv4 lists the IPv4 information of a virtual machine
func (c *Client) ListIPv4(id string) (list []IPv4, err error) {
var ipMap map[string][]IPv4
@ -37,6 +74,7 @@ func (c *Client) ListIPv4(id string) (list []IPv4, err error) {
list = append(list, ip)
}
}
sort.Sort(ipv4s(list))
return list, nil
}
@ -52,6 +90,7 @@ func (c *Client) ListIPv6(id string) (list []IPv6, err error) {
list = append(list, ip)
}
}
sort.Sort(ipv6s(list))
return list, nil
}
@ -67,6 +106,7 @@ func (c *Client) ListIPv6ReverseDNS(id string) (list []ReverseDNSIPv6, err error
list = append(list, ip)
}
}
sort.Sort(reverseDNSIPv6s(list))
return list, nil
}

View file

@ -1,5 +1,10 @@
package lib
import (
"sort"
"strings"
)
// ISO image on Vultr
type ISO struct {
ID int `json:"ISOID"`
@ -9,6 +14,20 @@ type ISO struct {
MD5sum string `json:"md5sum"`
}
type isos []ISO
func (s isos) Len() int { return len(s) }
func (s isos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s isos) Less(i, j int) bool {
// sort order: filename, created
if strings.ToLower(s[i].Filename) < strings.ToLower(s[j].Filename) {
return true
} else if strings.ToLower(s[i].Filename) > strings.ToLower(s[j].Filename) {
return false
}
return s[i].Created < s[j].Created
}
// GetISO returns a list of all ISO images on Vultr account
func (c *Client) GetISO() ([]ISO, error) {
var isoMap map[string]ISO
@ -20,5 +39,6 @@ func (c *Client) GetISO() ([]ISO, error) {
for _, iso := range isoMap {
isoList = append(isoList, iso)
}
sort.Sort(isos(isoList))
return isoList, nil
}

View file

@ -1,5 +1,10 @@
package lib
import (
"sort"
"strings"
)
// OS image on Vultr
type OS struct {
ID int `json:"OSID"`
@ -10,6 +15,12 @@ type OS struct {
Surcharge string `json:"surcharge"`
}
type oses []OS
func (s oses) Len() int { return len(s) }
func (s oses) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s oses) Less(i, j int) bool { return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name) }
// GetOS returns a list of all available operating systems on Vultr
func (c *Client) GetOS() ([]OS, error) {
var osMap map[string]OS
@ -21,5 +32,6 @@ func (c *Client) GetOS() ([]OS, error) {
for _, os := range osMap {
osList = append(osList, os)
}
sort.Sort(oses(osList))
return osList, nil
}

View file

@ -1,6 +1,11 @@
package lib
import "fmt"
import (
"fmt"
"sort"
"strconv"
"strings"
)
// Plan on Vultr
type Plan struct {
@ -14,6 +19,40 @@ type Plan struct {
Regions []int `json:"available_locations"`
}
type plans []Plan
func (p plans) Len() int { return len(p) }
func (p plans) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p plans) Less(i, j int) bool {
pa, _ := strconv.ParseFloat(strings.TrimSpace(p[i].Price), 64)
pb, _ := strconv.ParseFloat(strings.TrimSpace(p[j].Price), 64)
ra, _ := strconv.ParseInt(strings.TrimSpace(p[i].RAM), 10, 64)
rb, _ := strconv.ParseInt(strings.TrimSpace(p[j].RAM), 10, 64)
da, _ := strconv.ParseInt(strings.TrimSpace(p[i].Disk), 10, 64)
db, _ := strconv.ParseInt(strings.TrimSpace(p[j].Disk), 10, 64)
// sort order: price, vcpu, ram, disk
if pa < pb {
return true
} else if pa > pb {
return false
}
if p[i].VCpus < p[j].VCpus {
return true
} else if p[i].VCpus > p[j].VCpus {
return false
}
if ra < rb {
return true
} else if ra > rb {
return false
}
return da < db
}
// GetPlans returns a list of all available plans on Vultr account
func (c *Client) GetPlans() ([]Plan, error) {
var planMap map[string]Plan
@ -21,11 +60,13 @@ func (c *Client) GetPlans() ([]Plan, error) {
return nil, err
}
var planList []Plan
var p plans
for _, plan := range planMap {
planList = append(planList, plan)
p = append(p, plan)
}
return planList, nil
sort.Sort(plans(p))
return p, nil
}
// GetAvailablePlansForRegion returns available plans for specified region

View file

@ -1,5 +1,7 @@
package lib
import "sort"
// Region on Vultr
type Region struct {
ID int `json:"DCID,string"`
@ -12,6 +14,20 @@ type Region struct {
Code string `json:"regioncode"`
}
type regions []Region
func (s regions) Len() int { return len(s) }
func (s regions) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s regions) Less(i, j int) bool {
// sort order: continent, name
if s[i].Continent < s[j].Continent {
return true
} else if s[i].Continent > s[j].Continent {
return false
}
return s[i].Name < s[j].Name
}
// GetRegions returns a list of all available Vultr regions
func (c *Client) GetRegions() ([]Region, error) {
var regionMap map[string]Region
@ -23,5 +39,6 @@ func (c *Client) GetRegions() ([]Region, error) {
for _, os := range regionMap {
regionList = append(regionList, os)
}
sort.Sort(regions(regionList))
return regionList, nil
}

View file

@ -4,7 +4,9 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strconv"
"strings"
)
// IP on Vultr
@ -18,6 +20,25 @@ type IP struct {
AttachedTo string `json:"attached_SUBID,string"`
}
type ips []IP
func (s ips) Len() int { return len(s) }
func (s ips) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ips) Less(i, j int) bool {
// sort order: label, iptype, subnet
if strings.ToLower(s[i].Label) < strings.ToLower(s[j].Label) {
return true
} else if strings.ToLower(s[i].Label) > strings.ToLower(s[j].Label) {
return false
}
if s[i].IPType < s[j].IPType {
return true
} else if s[i].IPType > s[j].IPType {
return false
}
return s[i].Subnet < s[j].Subnet
}
// UnmarshalJSON implements json.Unmarshaller on IP.
// This is needed because the Vultr API is inconsistent in it's JSON responses.
// Some fields can change type, from JSON number to JSON string and vice-versa.
@ -89,11 +110,12 @@ func (c *Client) ListReservedIP() ([]IP, error) {
return nil, err
}
ips := make([]IP, 0)
ipList := make([]IP, 0)
for _, ip := range ipMap {
ips = append(ips, ip)
ipList = append(ipList, ip)
}
return ips, nil
sort.Sort(ips(ipList))
return ipList, nil
}
// GetReservedIP returns reserved IP with given ID

View file

@ -4,6 +4,8 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strings"
)
// StartupScript on Vultr account
@ -14,6 +16,14 @@ type StartupScript struct {
Content string `json:"script"`
}
type startupscripts []StartupScript
func (s startupscripts) Len() int { return len(s) }
func (s startupscripts) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s startupscripts) Less(i, j int) bool {
return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name)
}
// UnmarshalJSON implements json.Unmarshaller on StartupScript.
// Necessary because the SCRIPTID field has inconsistent types.
func (s *StartupScript) UnmarshalJSON(data []byte) (err error) {
@ -47,6 +57,7 @@ func (c *Client) GetStartupScripts() (scripts []StartupScript, err error) {
}
scripts = append(scripts, script)
}
sort.Sort(startupscripts(scripts))
return scripts, nil
}

View file

@ -5,7 +5,9 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strconv"
"strings"
)
// Server (virtual machine) on Vultr account
@ -36,6 +38,8 @@ type Server struct {
KVMUrl string `json:"kvm_url"`
AutoBackups string `json:"auto_backups"`
Tag string `json:"tag"`
OSID string `json:"OSID"`
AppID string `json:"APPID"`
}
// ServerOptions are optional parameters to be used during server creation
@ -52,6 +56,21 @@ type ServerOptions struct {
DontNotifyOnActivate bool
Hostname string
Tag string
AppID string
}
type servers []Server
func (s servers) Len() int { return len(s) }
func (s servers) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s servers) Less(i, j int) bool {
// sort order: name, ip
if strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name) {
return true
} else if strings.ToLower(s[i].Name) > strings.ToLower(s[j].Name) {
return false
}
return s[i].MainIP < s[j].MainIP
}
// V6Network represents a IPv6 network of a Vultr server
@ -140,6 +159,18 @@ func (s *Server) UnmarshalJSON(data []byte) (err error) {
}
s.AllowedBandwidth = ab
value = fmt.Sprintf("%v", fields["OSID"])
if value == "<nil>" {
value = ""
}
s.OSID = value
value = fmt.Sprintf("%v", fields["APPID"])
if value == "<nil>" {
value = ""
}
s.AppID = value
s.ID = fmt.Sprintf("%v", fields["SUBID"])
s.Name = fmt.Sprintf("%v", fields["label"])
s.OS = fmt.Sprintf("%v", fields["os"])
@ -180,29 +211,31 @@ func (s *Server) UnmarshalJSON(data []byte) (err error) {
}
// GetServers returns a list of current virtual machines on Vultr account
func (c *Client) GetServers() (servers []Server, err error) {
func (c *Client) GetServers() (serverList []Server, err error) {
var serverMap map[string]Server
if err := c.get(`server/list`, &serverMap); err != nil {
return nil, err
}
for _, server := range serverMap {
servers = append(servers, server)
serverList = append(serverList, server)
}
return servers, nil
sort.Sort(servers(serverList))
return serverList, nil
}
// GetServersByTag returns a list of all virtual machines matching by tag
func (c *Client) GetServersByTag(tag string) (servers []Server, err error) {
func (c *Client) GetServersByTag(tag string) (serverList []Server, err error) {
var serverMap map[string]Server
if err := c.get(`server/list?tag=`+tag, &serverMap); err != nil {
return nil, err
}
for _, server := range serverMap {
servers = append(servers, server)
serverList = append(serverList, server)
}
return servers, nil
sort.Sort(servers(serverList))
return serverList, nil
}
// GetServer returns the virtual machine with the given ID
@ -274,6 +307,10 @@ func (c *Client) CreateServer(name string, regionID, planID, osID int, options *
if options.Tag != "" {
values.Add("tag", options.Tag)
}
if options.AppID != "" {
values.Add("APPID", options.AppID)
}
}
var server Server
@ -371,6 +408,7 @@ func (c *Client) ListOSforServer(id string) (os []OS, err error) {
for _, o := range osMap {
os = append(os, o)
}
sort.Sort(oses(os))
return os, nil
}
@ -446,3 +484,30 @@ func (c *Client) BandwidthOfServer(id string) (bandwidth []map[string]string, er
return bandwidth, nil
}
// ChangeApplicationofServer changes the virtual machine to a different application
func (c *Client) ChangeApplicationofServer(id string, appID string) error {
values := url.Values{
"SUBID": {id},
"APPID": {appID},
}
if err := c.post(`server/app_change`, values, nil); err != nil {
return err
}
return nil
}
// ListApplicationsforServer lists all available operating systems to which an existing virtual machine can be changed
func (c *Client) ListApplicationsforServer(id string) (apps []Application, err error) {
var appMap map[string]Application
if err := c.get(`server/app_change_list?SUBID=`+id, &appMap); err != nil {
return nil, err
}
for _, app := range appMap {
apps = append(apps, app)
}
sort.Sort(applications(apps))
return apps, nil
}

View file

@ -1,6 +1,10 @@
package lib
import "net/url"
import (
"net/url"
"sort"
"strings"
)
// Snapshot of a virtual machine on Vultr account
type Snapshot struct {
@ -11,17 +15,32 @@ type Snapshot struct {
Created string `json:"date_created"`
}
type snapshots []Snapshot
func (s snapshots) Len() int { return len(s) }
func (s snapshots) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s snapshots) Less(i, j int) bool {
// sort order: description, created
if strings.ToLower(s[i].Description) < strings.ToLower(s[j].Description) {
return true
} else if strings.ToLower(s[i].Description) > strings.ToLower(s[j].Description) {
return false
}
return s[i].Created < s[j].Created
}
// GetSnapshots retrieves a list of all snapshots on Vultr account
func (c *Client) GetSnapshots() (snapshots []Snapshot, err error) {
func (c *Client) GetSnapshots() (snapshotList []Snapshot, err error) {
var snapshotMap map[string]Snapshot
if err := c.get(`snapshot/list`, &snapshotMap); err != nil {
return nil, err
}
for _, snapshot := range snapshotMap {
snapshots = append(snapshots, snapshot)
snapshotList = append(snapshotList, snapshot)
}
return snapshots, nil
sort.Sort(snapshots(snapshotList))
return snapshotList, nil
}
// CreateSnapshot creates a new virtual machine snapshot

View file

@ -1,6 +1,10 @@
package lib
import "net/url"
import (
"net/url"
"sort"
"strings"
)
// SSHKey on Vultr account
type SSHKey struct {
@ -10,6 +14,12 @@ type SSHKey struct {
Created string `json:"date_created"`
}
type sshkeys []SSHKey
func (s sshkeys) Len() int { return len(s) }
func (s sshkeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s sshkeys) Less(i, j int) bool { return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name) }
// GetSSHKeys returns a list of SSHKeys from Vultr account
func (c *Client) GetSSHKeys() (keys []SSHKey, err error) {
var keyMap map[string]SSHKey
@ -20,6 +30,7 @@ func (c *Client) GetSSHKeys() (keys []SSHKey, err error) {
for _, key := range keyMap {
keys = append(keys, key)
}
sort.Sort(sshkeys(keys))
return keys, nil
}

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (
@ -83,7 +85,7 @@ func RunWithPrivileges(names []string, fn func() error) error {
return err
}
defer releaseThreadToken(token)
err = adjustPrivileges(token, privileges)
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
if err != nil {
return err
}
@ -110,6 +112,15 @@ func mapPrivileges(names []string) ([]uint64, error) {
// EnableProcessPrivileges enables privileges globally for the process.
func EnableProcessPrivileges(names []string) error {
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
}
// DisableProcessPrivileges disables privileges globally for the process.
func DisableProcessPrivileges(names []string) error {
return enableDisableProcessPrivilege(names, 0)
}
func enableDisableProcessPrivilege(names []string, action uint32) error {
privileges, err := mapPrivileges(names)
if err != nil {
return err
@ -123,15 +134,15 @@ func EnableProcessPrivileges(names []string) error {
}
defer token.Close()
return adjustPrivileges(token, privileges)
return adjustPrivileges(token, privileges, action)
}
func adjustPrivileges(token windows.Token, privileges []uint64) error {
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
for _, p := range privileges {
binary.Write(&b, binary.LittleEndian, p)
binary.Write(&b, binary.LittleEndian, uint32(SE_PRIVILEGE_ENABLED))
binary.Write(&b, binary.LittleEndian, action)
}
prevState := make([]byte, b.Len())
reqSize := uint32(0)

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,3 @@
package winio
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go

View file

@ -12,9 +12,9 @@ import (
var _ unsafe.Pointer
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
modwinmm = syscall.NewLazyDLL("winmm.dll")
modadvapi32 = syscall.NewLazyDLL("advapi32.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modwinmm = windows.NewLazySystemDLL("winmm.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")

View file

@ -4,6 +4,7 @@ import (
"bufio"
"compress/gzip"
"fmt"
"io"
"net"
"net/http"
"strconv"
@ -21,10 +22,16 @@ const (
type codings map[string]float64
// The default qvalue to assign to an encoding if no explicit qvalue is set.
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
// The examples seem to indicate that it is.
const DEFAULT_QVALUE = 1.0
const (
// DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set.
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
// The examples seem to indicate that it is.
DefaultQValue = 1.0
// DefaultMinSize defines the minimum size to reach to enable compression.
// It's 512 bytes.
DefaultMinSize = 512
)
// gzipWriterPools stores a sync.Pool for each compression level for reuse of
// gzip.Writers. Use poolIndex to covert a compression level to an index into
@ -63,35 +70,88 @@ func addLevelPool(level int) {
// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
// bytes before writing them to the underlying response. This doesn't close the
// writers, so don't forget to do that.
// It can be configured to skip response smaller than minSize.
type GzipResponseWriter struct {
http.ResponseWriter
index int // Index for gzipWriterPools.
gw *gzip.Writer
code int // Saves the WriteHeader value.
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
}
// Write appends data to the gzip writer.
func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// Lazily create the gzip.Writer, this allows empty bodies to be actually
// empty, for example in the case of status code 204 (no content).
if w.gw == nil {
w.init()
}
// If content type is not set.
if _, ok := w.Header()[contentType]; !ok {
// If content type is not set, infer it from the uncompressed body.
// It infer it from the uncompressed body.
w.Header().Set(contentType, http.DetectContentType(b))
}
return w.gw.Write(b)
// GZIP responseWriter is initialized. Use the GZIP responseWriter.
if w.gw != nil {
n, err := w.gw.Write(b)
return n, err
}
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
w.buf = append(w.buf, b...)
// If the global writes are bigger than the minSize, compression is enable.
if len(w.buf) >= w.minSize {
err := w.startGzip()
if err != nil {
return 0, err
}
}
return len(b), nil
}
// WriteHeader will check if the gzip writer needs to be lazily initiated and
// then pass the code along to the underlying ResponseWriter.
func (w *GzipResponseWriter) WriteHeader(code int) {
if w.gw == nil &&
code != http.StatusNotModified && code != http.StatusNoContent {
w.init()
// startGzip initialize any GZIP specific informations.
func (w *GzipResponseWriter) startGzip() error {
// Set the GZIP header.
w.Header().Set(contentEncoding, "gzip")
// if the Content-Length is already set, then calls to Write on gzip
// will fail to set the Content-Length header since its already set
// See: https://github.com/golang/go/issues/14975.
w.Header().Del(contentLength)
// Write the header to gzip response.
w.writeHeader()
// Initialize the GZIP response.
w.init()
// Flush the buffer into the gzip reponse.
n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't
// accept the entire buffer but returned no specific error, we have no clue
// what's going on, so abort just to be safe.
if err == nil && n < len(w.buf) {
return io.ErrShortWrite
}
w.ResponseWriter.WriteHeader(code)
w.buf = nil
return err
}
// WriteHeader just saves the response code until close or GZIP effective writes.
func (w *GzipResponseWriter) WriteHeader(code int) {
w.code = code
}
// writeHeader uses the saved code to send it to the ResponseWriter.
func (w *GzipResponseWriter) writeHeader() {
if w.code == 0 {
w.code = http.StatusOK
}
w.ResponseWriter.WriteHeader(w.code)
}
// init graps a new gzip writer from the gzipWriterPool and writes the correct
@ -102,21 +162,29 @@ func (w *GzipResponseWriter) init() {
gzw := gzipWriterPools[w.index].Get().(*gzip.Writer)
gzw.Reset(w.ResponseWriter)
w.gw = gzw
w.ResponseWriter.Header().Set(contentEncoding, "gzip")
// if the Content-Length is already set, then calls to Write on gzip
// will fail to set the Content-Length header since its already set
// See: https://github.com/golang/go/issues/14975
w.ResponseWriter.Header().Del(contentLength)
}
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
func (w *GzipResponseWriter) Close() error {
// Buffer not nil means the regular response must be returned.
if w.buf != nil {
w.writeHeader()
// Make the write into the regular response.
_, writeErr := w.ResponseWriter.Write(w.buf)
// Returns the error if any at write.
if writeErr != nil {
return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error())
}
}
// If the GZIP responseWriter is not set no needs to close it.
if w.gw == nil {
return nil
}
err := w.gw.Close()
gzipWriterPools[w.index].Put(w.gw)
w.gw = nil
return err
}
@ -162,9 +230,18 @@ func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
// if an invalid gzip compression level is given, so if one can ensure the level
// is valid, the returned error can be safely ignored.
func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
return NewGzipLevelAndMinSize(level, DefaultMinSize)
}
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
// specify the minimum size before compression.
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) {
return nil, fmt.Errorf("invalid compression level requested: %d", level)
}
if minSize < 0 {
return nil, fmt.Errorf("minimum size must be more than zero")
}
return func(h http.Handler) http.Handler {
index := poolIndex(level)
@ -175,6 +252,9 @@ func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
gw := &GzipResponseWriter{
ResponseWriter: w,
index: index,
minSize: minSize,
buf: []byte{},
}
defer gw.Close()
@ -237,7 +317,7 @@ func parseEncodings(s string) (codings, error) {
func parseCoding(s string) (coding string, qvalue float64, err error) {
for n, part := range strings.Split(s, ";") {
part = strings.TrimSpace(part)
qvalue = DEFAULT_QVALUE
qvalue = DefaultQValue
if n == 0 {
coding = strings.ToLower(part)

43
vendor/github.com/NYTimes/gziphandler/gzip_go18.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
// +build go1.8
package gziphandler
import "net/http"
// Push initiates an HTTP/2 server push.
// Push returns ErrNotSupported if the client has disabled push or if push
// is not supported on the underlying connection.
func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error {
pusher, ok := w.ResponseWriter.(http.Pusher)
if ok && pusher != nil {
return pusher.Push(target, setAcceptEncodingForPushOptions(opts))
}
return http.ErrNotSupported
}
// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers.
func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions {
if opts == nil {
opts = &http.PushOptions{
Header: http.Header{
acceptEncoding: []string{"gzip"},
},
}
return opts
}
if opts.Header == nil {
opts.Header = http.Header{
acceptEncoding: []string{"gzip"},
}
return opts
}
if encoding := opts.Header.Get(acceptEncoding); encoding == "" {
opts.Header.Add(acceptEncoding, "gzip")
return opts
}
return opts
}

View file

@ -1,7 +1,7 @@
package logrus
// The following code was sourced and modified from the
// https://bitbucket.org/tebeka/atexit package governed by the following license:
// https://github.com/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//

View file

@ -3,11 +3,21 @@ package logrus
import (
"bytes"
"fmt"
"io"
"os"
"sync"
"time"
)
var bufferPool *sync.Pool
func init() {
bufferPool = &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
@ -29,6 +39,9 @@ type Entry struct {
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
Buffer *bytes.Buffer
}
func NewEntry(logger *Logger) *Entry {
@ -39,21 +52,15 @@ func NewEntry(logger *Logger) *Entry {
}
}
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
reader, err := entry.Reader()
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
return "", err
}
return reader.String(), err
str := string(serialized)
return str, nil
}
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
@ -81,6 +88,7 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
@ -90,20 +98,23 @@ func (entry Entry) log(level Level, msg string) {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
reader, err := entry.Reader()
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
}
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
_, err = io.Copy(entry.Logger.Out, reader)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
} else {
entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for

View file

@ -5,9 +5,40 @@ import (
"fmt"
)
type fieldKey string
type FieldMap map[fieldKey]string
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
}
return string(key)
}
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// FieldMap allows users to customize the names of keys for various fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyLevel: "@message",
// },
// }
FieldMap FieldMap
}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
@ -29,9 +60,11 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
timestampFormat = DefaultTimestampFormat
}
data["time"] = entry.Time.Format(timestampFormat)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {

View file

@ -26,8 +26,31 @@ type Logger struct {
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log.
mu sync.Mutex
// Used to sync writing to the log. Locking is enabled by Default
mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
}
type MutexWrap struct {
lock sync.Mutex
disabled bool
}
func (mw *MutexWrap) Lock() {
if !mw.disabled {
mw.lock.Lock()
}
}
func (mw *MutexWrap) Unlock() {
if !mw.disabled {
mw.lock.Unlock()
}
}
func (mw *MutexWrap) Disable() {
mw.disabled = true
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
@ -51,162 +74,235 @@ func New() *Logger {
}
}
func (logger *Logger) newEntry() *Entry {
entry, ok := logger.entryPool.Get().(*Entry)
if ok {
return entry
}
return NewEntry(logger)
}
func (logger *Logger) releaseEntry(entry *Entry) {
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
return NewEntry(logger).WithError(err)
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...)
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infof(format, args...)
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...)
entry := logger.newEntry()
entry.Printf(format, args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorf(format, args...)
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...)
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicf(format, args...)
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debug(args...)
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Info(args...)
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...)
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Error(args...)
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...)
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panic(args...)
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugln(args...)
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infoln(args...)
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...)
entry := logger.newEntry()
entry.Println(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorln(args...)
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...)
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicln(args...)
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
}
}
//When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock.
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}

View file

@ -0,0 +1,10 @@
// +build appengine
package logrus
import "io"
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
return true
}

View file

@ -1,4 +1,5 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus

View file

@ -3,6 +3,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
package logrus
import "syscall"

View file

@ -4,18 +4,25 @@
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import (
"io"
"os"
"syscall"
"unsafe"
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
func IsTerminal(f io.Writer) bool {
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
switch v := f.(type) {
case *os.File:
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
default:
return false
}
}

View file

@ -1,15 +1,21 @@
// +build solaris
// +build solaris,!appengine
package logrus
import (
"io"
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
return err == nil
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
return err == nil
default:
return false
}
}

View file

@ -3,11 +3,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
// +build windows,!appengine
package logrus
import (
"io"
"os"
"syscall"
"unsafe"
)
@ -19,9 +21,13 @@ var (
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
default:
return false
}
}

View file

@ -3,9 +3,9 @@ package logrus
import (
"bytes"
"fmt"
"runtime"
"sort"
"strings"
"sync"
"time"
)
@ -20,16 +20,10 @@ const (
var (
baseTimestamp time.Time
isTerminal bool
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
@ -54,10 +48,32 @@ type TextFormatter struct {
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// QuoteCharacter can be set to the override the default quoting character "
// with something else. For example: ', or `.
QuoteCharacter string
// Whether the logger's out is to a terminal
isTerminal bool
sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if len(f.QuoteCharacter) == 0 {
f.QuoteCharacter = "\""
}
if entry.Logger != nil {
f.isTerminal = IsTerminal(entry.Logger.Out)
}
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var keys []string = make([]string, 0, len(entry.Data))
var b *bytes.Buffer
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
@ -65,13 +81,17 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
if !f.DisableSorting {
sort.Strings(keys)
}
b := &bytes.Buffer{}
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data)
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
@ -111,18 +131,24 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelText := strings.ToUpper(entry.Level.String())[0:4]
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}
}
func needsQuoting(text string) bool {
func (f *TextFormatter) needsQuoting(text string) bool {
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
@ -138,24 +164,26 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
b.WriteByte(' ')
}
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
switch value := value.(type) {
case string:
if !needsQuoting(value) {
if !f.needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%q", value)
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
}
case error:
errmsg := value.Error()
if !needsQuoting(errmsg) {
if !f.needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%q", value)
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
}
default:
fmt.Fprint(b, value)
}
b.WriteByte(' ')
}

View file

@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter {
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = logger.Debug
printFunc = entry.Debug
case InfoLevel:
printFunc = logger.Info
printFunc = entry.Info
case WarnLevel:
printFunc = logger.Warn
printFunc = entry.Warn
case ErrorLevel:
printFunc = logger.Error
printFunc = entry.Error
case FatalLevel:
printFunc = logger.Fatal
printFunc = entry.Fatal
case PanicLevel:
printFunc = logger.Panic
printFunc = entry.Panic
default:
printFunc = logger.Print
printFunc = entry.Print
}
go logger.writerScanner(reader, printFunc)
go entry.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
entry.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}

View file

@ -1,7 +1,10 @@
package auth
import "encoding/csv"
import "os"
import (
"encoding/csv"
"os"
"sync"
)
/*
SecretProvider is used by authenticators. Takes user name and realm
@ -20,6 +23,7 @@ type File struct {
Info os.FileInfo
/* must be set in inherited types during initialization */
Reload func()
mu sync.Mutex
}
func (f *File) ReloadIfNeeded() {
@ -27,6 +31,8 @@ func (f *File) ReloadIfNeeded() {
if err != nil {
panic(err)
}
f.mu.Lock()
defer f.mu.Unlock()
if f.Info == nil || f.Info.ModTime() != info.ModTime() {
f.Info = info
f.Reload()
@ -40,6 +46,7 @@ func (f *File) ReloadIfNeeded() {
type HtdigestFile struct {
File
Users map[string]map[string]string
mu sync.RWMutex
}
func reload_htdigest(hf *HtdigestFile) {
@ -57,6 +64,8 @@ func reload_htdigest(hf *HtdigestFile) {
panic(err)
}
hf.mu.Lock()
defer hf.mu.Unlock()
hf.Users = make(map[string]map[string]string)
for _, record := range records {
_, exists := hf.Users[record[1]]
@ -77,6 +86,8 @@ func HtdigestFileProvider(filename string) SecretProvider {
hf.Reload = func() { reload_htdigest(hf) }
return func(user, realm string) string {
hf.ReloadIfNeeded()
hf.mu.RLock()
defer hf.mu.RUnlock()
_, exists := hf.Users[realm]
if !exists {
return ""
@ -96,6 +107,7 @@ func HtdigestFileProvider(filename string) SecretProvider {
type HtpasswdFile struct {
File
Users map[string]string
mu sync.RWMutex
}
func reload_htpasswd(h *HtpasswdFile) {
@ -113,6 +125,8 @@ func reload_htpasswd(h *HtpasswdFile) {
panic(err)
}
h.mu.Lock()
defer h.mu.Unlock()
h.Users = make(map[string]string)
for _, record := range records {
h.Users[record[0]] = record[1]
@ -129,7 +143,9 @@ func HtpasswdFileProvider(filename string) SecretProvider {
h.Reload = func() { reload_htpasswd(h) }
return func(user, realm string) string {
h.ReloadIfNeeded()
h.mu.RLock()
password, exists := h.Users[user]
h.mu.RUnlock()
if !exists {
return ""
}

20
vendor/github.com/beorn7/perks/LICENSE generated vendored Normal file
View file

@ -0,0 +1,20 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -133,7 +133,7 @@ func (s *Stream) Query(q float64) float64 {
if l == 0 {
return 0
}
i := int(float64(l) * q)
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}

View file

@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,7 +1,28 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

View file

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -89,7 +89,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path+lockExt)
os.Remove(db.path + lockExt)
return err
}

View file

@ -130,9 +130,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry.
if b.tx.writable {
if b.tx.writable && !unaligned {
child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else {
@ -167,9 +175,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists
} else {
return nil, ErrIncompatibleValue
}
return nil, ErrIncompatibleValue
}
// Create empty, inline bucket.
@ -329,6 +336,28 @@ func (b *Bucket) Delete(key []byte) error {
return nil
}
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {

View file

@ -552,7 +552,10 @@ func (db *DB) removeTx(tx *Tx) {
// Remove the transaction.
for i, t := range db.txs {
if t == tx {
db.txs = append(db.txs[:i], db.txs[i+1:]...)
last := len(db.txs) - 1
db.txs[i] = db.txs[last]
db.txs[last] = nil
db.txs = db.txs[:last]
break
}
}
@ -952,7 +955,7 @@ func (s *Stats) Sub(other *Stats) Stats {
diff.PendingPageN = s.PendingPageN
diff.FreeAlloc = s.FreeAlloc
diff.FreelistInuse = s.FreelistInuse
diff.TxN = other.TxN - s.TxN
diff.TxN = s.TxN - other.TxN
diff.TxStats = s.TxStats.Sub(&other.TxStats)
return diff
}

View file

@ -24,7 +24,12 @@ func newFreelist() *freelist {
// size returns the size of the page after serialization.
func (f *freelist) size() int {
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
n := f.count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
}
// count returns count of pages on the freelist
@ -46,16 +51,15 @@ func (f *freelist) pending_count() int {
return count
}
// all returns a list of all free ids and all pending ids in one sorted list.
func (f *freelist) all() []pgid {
m := make(pgids, 0)
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
for _, list := range f.pending {
m = append(m, list...)
}
sort.Sort(m)
return pgids(f.ids).merge(m)
mergepgids(dst, f.ids, m)
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
@ -166,12 +170,16 @@ func (f *freelist) read(p *page) {
}
// Copy the list of page ids from the freelist.
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
if count == 0 {
f.ids = nil
} else {
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
}
// Rebuild the page cache.
f.reindex()
@ -182,20 +190,22 @@ func (f *freelist) read(p *page) {
// become free.
func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction.
ids := f.all()
// Update the header flag.
p.flags |= freelistPageFlag
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
if len(ids) < 0xFFFF {
p.count = uint16(len(ids))
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
} else if lenids < 0xFFFF {
p.count = uint16(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
} else {
p.count = 0xFFFF
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
}
return nil
@ -230,7 +240,7 @@ func (f *freelist) reload(p *page) {
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
f.cache = make(map[pgid]bool)
f.cache = make(map[pgid]bool, len(f.ids))
for _, id := range f.ids {
f.cache[id] = true
}

View file

@ -201,6 +201,11 @@ func (n *node) write(p *page) {
}
p.count = uint16(len(n.inodes))
// Stop here if there are no items to write.
if p.count == 0 {
return
}
// Loop over each item and write it to the page.
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
for i, item := range n.inodes {

View file

@ -62,6 +62,9 @@ func (p *page) leafPageElement(index uint16) *leafPageElement {
// leafPageElements retrieves a list of leaf nodes.
func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
@ -72,6 +75,9 @@ func (p *page) branchPageElement(index uint16) *branchPageElement {
// branchPageElements retrieves a list of branch nodes.
func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
@ -139,12 +145,33 @@ func (a pgids) merge(b pgids) pgids {
// Return the opposite slice if one is nil.
if len(a) == 0 {
return b
} else if len(b) == 0 {
}
if len(b) == 0 {
return a
}
merged := make(pgids, len(a)+len(b))
mergepgids(merged, a, b)
return merged
}
// Create a list to hold all elements from both lists.
merged := make(pgids, 0, len(a)+len(b))
// mergepgids copies the sorted union of a and b into dst.
// If dst is too small, it panics.
func mergepgids(dst, a, b pgids) {
if len(dst) < len(a)+len(b) {
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
copy(dst, b)
return
}
if len(b) == 0 {
copy(dst, a)
return
}
// Merged will hold all elements from both lists.
merged := dst[:0]
// Assign lead to the slice with a lower starting value, follow to the higher value.
lead, follow := a, b
@ -166,7 +193,5 @@ func (a pgids) merge(b pgids) pgids {
}
// Append what's left in follow.
merged = append(merged, follow...)
return merged
_ = append(merged, follow...)
}

View file

@ -381,7 +381,9 @@ func (tx *Tx) Check() <-chan error {
func (tx *Tx) check(ch chan error) {
// Check if any pages are double freed.
freed := make(map[pgid]bool)
for _, id := range tx.db.freelist.all() {
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}

60
vendor/github.com/cenk/backoff/context.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
package backoff
import (
"time"
"golang.org/x/net/context"
)
// BackOffContext is a backoff policy that stops retrying after the context
// is canceled.
type BackOffContext interface {
BackOff
Context() context.Context
}
type backOffContext struct {
BackOff
ctx context.Context
}
// WithContext returns a BackOffContext with context ctx
//
// ctx must not be nil
func WithContext(b BackOff, ctx context.Context) BackOffContext {
if ctx == nil {
panic("nil context")
}
if b, ok := b.(*backOffContext); ok {
return &backOffContext{
BackOff: b.BackOff,
ctx: ctx,
}
}
return &backOffContext{
BackOff: b,
ctx: ctx,
}
}
func ensureContext(b BackOff) BackOffContext {
if cb, ok := b.(BackOffContext); ok {
return cb
}
return WithContext(b, context.Background())
}
func (b *backOffContext) Context() context.Context {
return b.ctx
}
func (b *backOffContext) NextBackOff() time.Duration {
select {
case <-b.Context().Done():
return Stop
default:
return b.BackOff.NextBackOff()
}
}

View file

@ -89,11 +89,6 @@ func NewExponentialBackOff() *ExponentialBackOff {
MaxElapsedTime: DefaultMaxElapsedTime,
Clock: SystemClock,
}
if b.RandomizationFactor < 0 {
b.RandomizationFactor = 0
} else if b.RandomizationFactor > 1 {
b.RandomizationFactor = 1
}
b.Reset()
return b
}

View file

@ -17,6 +17,9 @@ type Notify func(error, time.Duration)
// o is guaranteed to be run at least once.
// It is the caller's responsibility to reset b after Retry returns.
//
// If o returns a *PermanentError, the operation is not retried, and the
// wrapped error is returned.
//
// Retry sleeps the goroutine for the duration returned by BackOff after a
// failed operation returns.
func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) }
@ -27,12 +30,18 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error {
var err error
var next time.Duration
cb := ensureContext(b)
b.Reset()
for {
if err = operation(); err == nil {
return nil
}
if permanent, ok := err.(*PermanentError); ok {
return permanent.Err
}
if next = b.NextBackOff(); next == Stop {
return err
}
@ -41,6 +50,29 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error {
notify(err, next)
}
time.Sleep(next)
t := time.NewTimer(next)
select {
case <-cb.Context().Done():
t.Stop()
return err
case <-t.C:
}
}
}
// PermanentError signals that the operation should not be retried.
type PermanentError struct {
Err error
}
func (e *PermanentError) Error() string {
return e.Err.Error()
}
// Permanent wraps the given err in a *PermanentError.
func Permanent(err error) *PermanentError {
return &PermanentError{
Err: err,
}
}

View file

@ -13,7 +13,7 @@ import (
type Ticker struct {
C <-chan time.Time
c chan time.Time
b BackOff
b BackOffContext
stop chan struct{}
stopOnce sync.Once
}
@ -26,7 +26,7 @@ func NewTicker(b BackOff) *Ticker {
t := &Ticker{
C: c,
c: c,
b: b,
b: ensureContext(b),
stop: make(chan struct{}),
}
go t.run()
@ -58,6 +58,8 @@ func (t *Ticker) run() {
case <-t.stop:
t.c = nil // Prevent future ticks from being sent to the channel.
return
case <-t.b.Context().Done():
return
}
}
}

View file

@ -1,29 +1,80 @@
package negroni
import (
"bytes"
"log"
"net/http"
"os"
"text/template"
"time"
)
// LoggerEntry is the structure
// passed to the template.
type LoggerEntry struct {
StartTime string
Status int
Duration time.Duration
Hostname string
Method string
Path string
}
// LoggerDefaultFormat is the format
// logged used by the default Logger instance.
var LoggerDefaultFormat = "{{.StartTime}} | {{.Status}} | \t {{.Duration}} | {{.Hostname}} | {{.Method}} {{.Path}} \n"
// LoggerDefaultDateFormat is the
// format used for date by the
// default Logger instance.
var LoggerDefaultDateFormat = time.RFC3339
// ALogger interface
type ALogger interface {
Println(v ...interface{})
Printf(format string, v ...interface{})
}
// Logger is a middleware handler that logs the request as it goes in and the response as it goes out.
type Logger struct {
// Logger inherits from log.Logger used to log messages with the Logger middleware
*log.Logger
// ALogger implements just enough log.Logger interface to be compatible with other implementations
ALogger
dateFormat string
template *template.Template
}
// NewLogger returns a new Logger instance
func NewLogger() *Logger {
return &Logger{log.New(os.Stdout, "[negroni] ", 0)}
logger := &Logger{ALogger: log.New(os.Stdout, "[negroni] ", 0), dateFormat: LoggerDefaultDateFormat}
logger.SetFormat(LoggerDefaultFormat)
return logger
}
func (l *Logger) SetFormat(format string) {
l.template = template.Must(template.New("negroni_parser").Parse(format))
}
func (l *Logger) SetDateFormat(format string) {
l.dateFormat = format
}
func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
start := time.Now()
l.Printf("Started %s %s", r.Method, r.URL.Path)
next(rw, r)
res := rw.(ResponseWriter)
l.Printf("Completed %v %s in %v", res.Status(), http.StatusText(res.Status()), time.Since(start))
log := LoggerEntry{
StartTime: start.Format(l.dateFormat),
Status: res.Status(),
Duration: time.Since(start),
Hostname: r.Host,
Method: r.Method,
Path: r.URL.Path,
}
buff := &bytes.Buffer{}
l.template.Execute(buff, log)
l.Printf(buff.String())
}

View file

@ -59,6 +59,14 @@ func New(handlers ...Handler) *Negroni {
}
}
// With returns a new Negroni instance that is a combination of the negroni
// receiver's handlers and the provided handlers.
func (n *Negroni) With(handlers ...Handler) *Negroni {
return New(
append(n.handlers, handlers...)...,
)
}
// Classic returns a new Negroni instance with the default middleware already
// in the stack.
//

View file

@ -11,7 +11,7 @@ import (
// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one.
type Recovery struct {
Logger *log.Logger
Logger ALogger
PrintStack bool
ErrorHandlerFunc func(interface{})
StackAll bool

View file

@ -29,9 +29,15 @@ type beforeFunc func(ResponseWriter)
// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter
func NewResponseWriter(rw http.ResponseWriter) ResponseWriter {
return &responseWriter{
nrw := &responseWriter{
ResponseWriter: rw,
}
if _, ok := rw.(http.CloseNotifier); ok {
return &responseWriterCloseNotifer{nrw}
}
return nrw
}
type responseWriter struct {
@ -81,10 +87,6 @@ func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return hijacker.Hijack()
}
func (rw *responseWriter) CloseNotify() <-chan bool {
return rw.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (rw *responseWriter) callBefore() {
for i := len(rw.beforeFuncs) - 1; i >= 0; i-- {
rw.beforeFuncs[i](rw)
@ -94,6 +96,18 @@ func (rw *responseWriter) callBefore() {
func (rw *responseWriter) Flush() {
flusher, ok := rw.ResponseWriter.(http.Flusher)
if ok {
if !rw.Written() {
// The status will be StatusOK if WriteHeader has not been called yet
rw.WriteHeader(http.StatusOK)
}
flusher.Flush()
}
}
type responseWriterCloseNotifer struct {
*responseWriter
}
func (rw *responseWriterCloseNotifer) CloseNotify() <-chan bool {
return rw.ResponseWriter.(http.CloseNotifier).CloseNotify()
}

63
vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go generated vendored Normal file
View file

@ -0,0 +1,63 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fileutil implements utility functions related to files and paths.
package fileutil
import (
"io/ioutil"
"os"
"path"
"sort"
"github.com/coreos/pkg/capnslog"
)
const (
privateFileMode = 0600
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil")
)
// IsDirWriteable checks if dir is writable by writing and removing a file
// to dir. It returns nil if dir is writable.
func IsDirWriteable(dir string) error {
f := path.Join(dir, ".touch")
if err := ioutil.WriteFile(f, []byte(""), privateFileMode); err != nil {
return err
}
return os.Remove(f)
}
// ReadDir returns the filenames in the given directory in sorted order.
func ReadDir(dirpath string) ([]string, error) {
dir, err := os.Open(dirpath)
if err != nil {
return nil, err
}
defer dir.Close()
names, err := dir.Readdirnames(-1)
if err != nil {
return nil, err
}
sort.Strings(names)
return names, nil
}
func Exist(name string) bool {
_, err := os.Stat(name)
return err == nil
}

View file

@ -0,0 +1,90 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"errors"
"os"
"syscall"
"time"
)
var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fname string
file *os.File
}
func (l *lock) Name() string {
return l.fname
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
err := os.Chmod(l.fname, syscall.DMEXCL|0600)
if err != nil {
return err
}
f, err := os.Open(l.fname)
if err != nil {
return ErrLocked
}
l.file = f
return nil
}
// Lock acquires exclusivity on the lock with blocking
func (l *lock) Lock() error {
err := os.Chmod(l.fname, syscall.DMEXCL|0600)
if err != nil {
return err
}
for {
f, err := os.Open(l.fname)
if err == nil {
l.file = f
return nil
}
time.Sleep(10 * time.Millisecond)
}
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
return l.file.Close()
}
func (l *lock) Destroy() error {
return nil
}
func NewLock(file string) (Lock, error) {
l := &lock{fname: file}
return l, nil
}

View file

@ -0,0 +1,98 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build solaris
package fileutil
import (
"errors"
"os"
"syscall"
)
var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fd int
file *os.File
}
func (l *lock) Name() string {
return l.file.Name()
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Type = syscall.F_WRLCK
lock.Whence = 0
lock.Pid = 0
err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
if err != nil && err == syscall.EAGAIN {
return ErrLocked
}
return err
}
// Lock acquires exclusivity on the lock without blocking
func (l *lock) Lock() error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_WRLCK
lock.Whence = 0
lock.Pid = 0
return syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
if err != nil && err == syscall.EAGAIN {
return ErrLocked
}
return err
}
func (l *lock) Destroy() error {
return l.file.Close()
}
func NewLock(file string) (Lock, error) {
f, err := os.OpenFile(file, os.O_WRONLY, 0600)
if err != nil {
return nil, err
}
l := &lock{int(f.Fd()), f}
return l, nil
}

View file

@ -0,0 +1,76 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows,!plan9,!solaris
package fileutil
import (
"errors"
"os"
"syscall"
)
var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fd int
file *os.File
}
func (l *lock) Name() string {
return l.file.Name()
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
if err != nil && err == syscall.EWOULDBLOCK {
return ErrLocked
}
return err
}
// Lock acquires exclusivity on the lock without blocking
func (l *lock) Lock() error {
return syscall.Flock(l.fd, syscall.LOCK_EX)
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
return syscall.Flock(l.fd, syscall.LOCK_UN)
}
func (l *lock) Destroy() error {
return l.file.Close()
}
func NewLock(file string) (Lock, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
l := &lock{int(f.Fd()), f}
return l, nil
}

View file

@ -0,0 +1,71 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package fileutil
import (
"errors"
"os"
)
var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fd int
file *os.File
}
func (l *lock) Name() string {
return l.file.Name()
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
return nil
}
// Lock acquires exclusivity on the lock without blocking
func (l *lock) Lock() error {
return nil
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
return nil
}
func (l *lock) Destroy() error {
return l.file.Close()
}
func NewLock(file string) (Lock, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
l := &lock{int(f.Fd()), f}
return l, nil
}

View file

@ -0,0 +1,28 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux
package fileutil
import "os"
// Preallocate tries to allocate the space for given
// file. This operation is only supported on linux by a
// few filesystems (btrfs, ext4, etc.).
// If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int) error {
return nil
}

View file

@ -0,0 +1,42 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package fileutil
import (
"os"
"syscall"
)
// Preallocate tries to allocate the space for given
// file. This operation is only supported on linux by a
// few filesystems (btrfs, ext4, etc.).
// If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int) error {
// use mode = 1 to keep size
// see FALLOC_FL_KEEP_SIZE
err := syscall.Fallocate(int(f.Fd()), 1, 0, int64(sizeInBytes))
if err != nil {
errno, ok := err.(syscall.Errno)
// treat not support as nil error
if ok && errno == syscall.ENOTSUP {
return nil
}
return err
}
return nil
}

80
vendor/github.com/coreos/etcd/pkg/fileutil/purge.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"path"
"sort"
"strings"
"time"
)
func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
errC := make(chan error, 1)
go func() {
for {
fnames, err := ReadDir(dirname)
if err != nil {
errC <- err
return
}
newfnames := make([]string, 0)
for _, fname := range fnames {
if strings.HasSuffix(fname, suffix) {
newfnames = append(newfnames, fname)
}
}
sort.Strings(newfnames)
for len(newfnames) > int(max) {
f := path.Join(dirname, newfnames[0])
l, err := NewLock(f)
if err != nil {
errC <- err
return
}
err = l.TryLock()
if err != nil {
break
}
err = os.Remove(f)
if err != nil {
errC <- err
return
}
err = l.Unlock()
if err != nil {
plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
errC <- err
return
}
err = l.Destroy()
if err != nil {
plog.Errorf("error destroying lock %s when purging file (%v)", l.Name(), err)
errC <- err
return
}
plog.Infof("purged file %s successfully", f)
newfnames = newfnames[1:]
}
select {
case <-time.After(interval):
case <-stop:
return
}
}
}()
return errC
}

91
vendor/github.com/coreos/etcd/version/version.go generated vendored Normal file
View file

@ -0,0 +1,91 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package version implements etcd version parsing and contains latest version
// information.
package version
import (
"fmt"
"os"
"path"
"strings"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/types"
)
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "2.2.0"
Version = "2.3.0-alpha.0+git"
// Git SHA Value will be set during build
GitSHA = "Not provided (use ./build instead of go build)"
)
// WalVersion is an enum for versions of etcd logs.
type DataDirVersion string
const (
DataDirUnknown DataDirVersion = "Unknown WAL"
DataDir2_0 DataDirVersion = "2.0.0"
DataDir2_0Proxy DataDirVersion = "2.0 proxy"
DataDir2_0_1 DataDirVersion = "2.0.1"
)
type Versions struct {
Server string `json:"etcdserver"`
Cluster string `json:"etcdcluster"`
// TODO: raft state machine version
}
func DetectDataDir(dirpath string) (DataDirVersion, error) {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
if os.IsNotExist(err) {
err = nil
}
// Error reading the directory
return DataDirUnknown, err
}
nameSet := types.NewUnsafeSet(names...)
if nameSet.Contains("member") {
ver, err := DetectDataDir(path.Join(dirpath, "member"))
if ver == DataDir2_0 {
return DataDir2_0_1, nil
}
return ver, err
}
if nameSet.ContainsAll([]string{"snap", "wal"}) {
// .../wal cannot be empty to exist.
walnames, err := fileutil.ReadDir(path.Join(dirpath, "wal"))
if err == nil && len(walnames) > 0 {
return DataDir2_0, nil
}
}
if nameSet.ContainsAll([]string{"proxy"}) {
return DataDir2_0Proxy, nil
}
return DataDirUnknown, nil
}
// Cluster only keeps the major.minor.
func Cluster(v string) string {
vs := strings.Split(v, ".")
if len(vs) <= 2 {
return v
}
return fmt.Sprintf("%s.%s", vs[0], vs[1])
}

View file

@ -67,6 +67,15 @@ type ConfigState struct {
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer

View file

@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
d.w.Write(closeParenBytes)
// Display pointer information.
if len(pointerChain) > 0 {
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) {
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || valueCap != 0 {
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if valueCap != 0 {
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}

View file

@ -1,27 +0,0 @@
Copyright (c) 2015, David Deng
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of go-colortext nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,47 +0,0 @@
/*
ct package provides functions to change the color of console text.
Under windows platform, the Console api is used. Under other systems, ANSI text mode is used.
*/
package ct
// Color is the type of color to be set.
type Color int
const (
// No change of color
None = Color(iota)
Black
Red
Green
Yellow
Blue
Magenta
Cyan
White
)
/*
ResetColor resets the foreground and background to original colors
*/
func ResetColor() {
resetColor()
}
// ChangeColor sets the foreground and background colors. If the value of the color is None,
// the corresponding color keeps unchanged.
// If fgBright or bgBright is set true, corresponding color use bright color. bgBright may be
// ignored in some OS environment.
func ChangeColor(fg Color, fgBright bool, bg Color, bgBright bool) {
changeColor(fg, fgBright, bg, bgBright)
}
// Foreground changes the foreground color.
func Foreground(cl Color, bright bool) {
ChangeColor(cl, bright, None, false)
}
// Background changes the background color.
func Background(cl Color, bright bool) {
ChangeColor(None, false, cl, bright)
}

View file

@ -1,35 +0,0 @@
// +build !windows
package ct
import (
"fmt"
)
func resetColor() {
fmt.Print("\x1b[0m")
}
func changeColor(fg Color, fgBright bool, bg Color, bgBright bool) {
if fg == None && bg == None {
return
} // if
s := ""
if fg != None {
s = fmt.Sprintf("%s%d", s, 30+(int)(fg-Black))
if fgBright {
s += ";1"
} // if
} // if
if bg != None {
if s != "" {
s += ";"
} // if
s = fmt.Sprintf("%s%d", s, 40+(int)(bg-Black))
} // if
s = "\x1b[0;" + s + "m"
fmt.Print(s)
}

View file

@ -1,139 +0,0 @@
// +build windows
package ct
import (
"syscall"
"unsafe"
)
var fg_colors = []uint16{
0,
0,
foreground_red,
foreground_green,
foreground_red | foreground_green,
foreground_blue,
foreground_red | foreground_blue,
foreground_green | foreground_blue,
foreground_red | foreground_green | foreground_blue}
var bg_colors = []uint16{
0,
0,
background_red,
background_green,
background_red | background_green,
background_blue,
background_red | background_blue,
background_green | background_blue,
background_red | background_green | background_blue}
const (
foreground_blue = uint16(0x0001)
foreground_green = uint16(0x0002)
foreground_red = uint16(0x0004)
foreground_intensity = uint16(0x0008)
background_blue = uint16(0x0010)
background_green = uint16(0x0020)
background_red = uint16(0x0040)
background_intensity = uint16(0x0080)
foreground_mask = foreground_blue | foreground_green | foreground_red | foreground_intensity
background_mask = background_blue | background_green | background_red | background_intensity
)
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetStdHandle = kernel32.NewProc("GetStdHandle")
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
hStdout uintptr
initScreenInfo *console_screen_buffer_info
)
func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool {
ret, _, _ := procSetConsoleTextAttribute.Call(
hConsoleOutput,
uintptr(wAttributes))
return ret != 0
}
type coord struct {
X, Y int16
}
type small_rect struct {
Left, Top, Right, Bottom int16
}
type console_screen_buffer_info struct {
DwSize coord
DwCursorPosition coord
WAttributes uint16
SrWindow small_rect
DwMaximumWindowSize coord
}
func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *console_screen_buffer_info {
var csbi console_screen_buffer_info
ret, _, _ := procGetConsoleScreenBufferInfo.Call(
hConsoleOutput,
uintptr(unsafe.Pointer(&csbi)))
if ret == 0 {
return nil
}
return &csbi
}
const (
std_output_handle = uint32(-11 & 0xFFFFFFFF)
)
func init() {
kernel32 := syscall.NewLazyDLL("kernel32.dll")
procGetStdHandle = kernel32.NewProc("GetStdHandle")
hStdout, _, _ = procGetStdHandle.Call(uintptr(std_output_handle))
initScreenInfo = getConsoleScreenBufferInfo(hStdout)
syscall.LoadDLL("")
}
func resetColor() {
if initScreenInfo == nil { // No console info - Ex: stdout redirection
return
}
setConsoleTextAttribute(hStdout, initScreenInfo.WAttributes)
}
func changeColor(fg Color, fgBright bool, bg Color, bgBright bool) {
attr := uint16(0)
if fg == None || bg == None {
cbufinfo := getConsoleScreenBufferInfo(hStdout)
if cbufinfo == nil { // No console info - Ex: stdout redirection
return
}
attr = getConsoleScreenBufferInfo(hStdout).WAttributes
} // if
if fg != None {
attr = attr & ^foreground_mask | fg_colors[fg]
if fgBright {
attr |= foreground_intensity
} // if
} // if
if bg != None {
attr = attr & ^background_mask | bg_colors[bg]
if bgBright {
attr |= background_intensity
} // if
} // if
setConsoleTextAttribute(hStdout, attr)
}

21
vendor/github.com/decker502/dnspod-go/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 decker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -51,9 +51,13 @@ func (e ValidationError) Error() string {
} else {
return "token is invalid"
}
return e.Inner.Error()
}
// No errors
func (e *ValidationError) valid() bool {
return e.Errors == 0
if e.Errors > 0 {
return false
}
return true
}

View file

@ -8,9 +8,8 @@ import (
)
type Parser struct {
ValidMethods []string // If populated, only these methods will be considered valid
UseJSONNumber bool // Use JSON Number format in JSON decoder
SkipClaimsValidation bool // Skip claims validation during token parsing
ValidMethods []string // If populated, only these methods will be considered valid
UseJSONNumber bool // Use JSON Number format in JSON decoder
}
// Parse, validate, and return a token.
@ -102,16 +101,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
vErr := &ValidationError{}
// Validate Claims
if !p.SkipClaimsValidation {
if err := token.Claims.Valid(); err != nil {
if err := token.Claims.Valid(); err != nil {
// If the Claims Valid returned an error, check if it is a validation error,
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
if e, ok := err.(*ValidationError); !ok {
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
} else {
vErr = e
}
// If the Claims Valid returned an error, check if it is a validation error,
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
if e, ok := err.(*ValidationError); !ok {
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
} else {
vErr = e
}
}

View file

@ -59,6 +59,7 @@ func New(endpoints []string, options *store.Config) (store.Store, error) {
db *bolt.DB
err error
boltOptions *bolt.Options
timeout = transientTimeout
)
if len(endpoints) > 1 {
@ -82,11 +83,15 @@ func New(endpoints []string, options *store.Config) (store.Store, error) {
}
}
if options.ConnectionTimeout != 0 {
timeout = options.ConnectionTimeout
}
b := &BoltDB{
client: db,
path: endpoints[0],
boltBucket: []byte(options.Bucket),
timeout: transientTimeout,
timeout: timeout,
PersistConnection: options.PersistConnection,
}

View file

@ -252,7 +252,7 @@ func (s *Zookeeper) List(directory string) ([]*store.KVPair, error) {
pair, err := s.Get(strings.TrimSuffix(directory, "/") + s.normalize(key))
if err != nil {
// If node is not found: List is out of date, retry
if err == zk.ErrNoNode {
if err == store.ErrKeyNotFound {
return s.List(directory)
}
return nil, err

View file

@ -9,6 +9,7 @@ import (
"os"
"path"
"path/filepath"
"strings"
"time"
)
@ -145,14 +146,22 @@ func (fs *AssetFS) Open(name string) (http.File, error) {
}
if b, err := fs.Asset(name); err == nil {
timestamp := defaultFileTimestamp
if info, err := fs.AssetInfo(name); err == nil {
timestamp = info.ModTime()
if fs.AssetInfo != nil {
if info, err := fs.AssetInfo(name); err == nil {
timestamp = info.ModTime()
}
}
return NewAssetFile(name, b, timestamp), nil
}
if children, err := fs.AssetDir(name); err == nil {
return NewAssetDirectory(name, children, fs), nil
} else {
// If the error is not found, return an error that will
// result in a 404 error. Otherwise the server returns
// a 500 error for files not found.
if strings.Contains(err.Error(), "not found") {
return nil, os.ErrNotExist
}
return nil, err
}
}

20
vendor/github.com/fatih/color/LICENSE.md generated vendored Normal file
View file

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

526
vendor/github.com/fatih/color/color.go generated vendored Normal file
View file

@ -0,0 +1,526 @@
package color
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
var (
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. This is a global option and affects all colors. For more control
// over each color block use the methods DisableColor() individually.
NoColor = os.Getenv("TERM") == "dumb" ||
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
// Output defines the standard output of the print functions. By default
// os.Stdout is used.
Output = colorable.NewColorableStdout()
// colorsCache is used to reduce the count of created Color objects and
// allows to reuse already created objects with required Attribute.
colorsCache = make(map[Attribute]*Color)
colorsCacheMu sync.Mutex // protects colorsCache
)
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack Attribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack Attribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{params: make([]Attribute, 0)}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
func (c *Color) setWriter(w io.Writer) *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprintf(w, c.format())
return c
}
func (c *Color) unsetWriter(w io.Writer) {
if c.isNoColorSet() {
return
}
if NoColor {
return
}
fmt.Fprintf(w, "%s[%dm", escape, Reset)
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
func (c *Color) prepend(value Attribute) {
c.params = append(c.params, 0)
copy(c.params[1:], c.params[0:])
c.params[0] = value
}
// Fprint formats using the default formats for its operands and writes to w.
// Spaces are added between operands when neither is a string.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprint(w, a...)
}
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Fprintf formats according to a format specifier and writes to w.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprintf(w, format, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Fprintln formats using the default formats for its operands and writes to w.
// Spaces are always added between operands and a newline is appended.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
c.setWriter(w)
defer c.unsetWriter(w)
return fmt.Fprintln(w, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// Sprint is just like Print, but returns a string instead of printing it.
func (c *Color) Sprint(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
// Sprintln is just like Println, but returns a string instead of printing it.
func (c *Color) Sprintln(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
// Sprintf is just like Printf, but returns a string instead of printing it.
func (c *Color) Sprintf(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
// FprintFunc returns a new function that prints the passed arguments as
// colorized with color.Fprint().
func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprint(w, a...)
}
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Print(a...)
}
}
// FprintfFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintf().
func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
return func(w io.Writer, format string, a ...interface{}) {
c.Fprintf(w, format, a...)
}
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
c.Printf(format, a...)
}
}
// FprintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintln().
func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprintln(w, a...)
}
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Println(a...)
}
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjunction with
// DisableColor(). Otherwise this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user setted action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
if len(c.params) != len(c2.params) {
return false
}
for _, attr := range c.params {
if !c2.attrExists(attr) {
return false
}
}
return true
}
func (c *Color) attrExists(a Attribute) bool {
for _, attr := range c.params {
if attr == a {
return true
}
}
return false
}
func boolPtr(v bool) *bool {
return &v
}
func getCachedColor(p Attribute) *Color {
colorsCacheMu.Lock()
defer colorsCacheMu.Unlock()
c, ok := colorsCache[p]
if !ok {
c = New(p)
colorsCache[p] = c
}
return c
}
func colorPrint(format string, p Attribute, a ...interface{}) {
c := getCachedColor(p)
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
if len(a) == 0 {
c.Print(format)
} else {
c.Printf(format, a...)
}
}
func colorString(format string, p Attribute, a ...interface{}) string {
c := getCachedColor(p)
if len(a) == 0 {
return c.SprintFunc()(format)
}
return c.SprintfFunc()(format, a...)
}
// Black is an convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
// Red is an convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
// Green is an convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
// Yellow is an convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
// Blue is an convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
// Magenta is an convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
// Cyan is an convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
// White is an convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
// BlackString is an convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
// RedString is an convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
// GreenString is an convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
// YellowString is an convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
// BlueString is an convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
// MagentaString is an convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return colorString(format, FgMagenta, a...)
}
// CyanString is an convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
// WhiteString is an convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }

128
vendor/github.com/fatih/color/doc.go generated vendored Normal file
View file

@ -0,0 +1,128 @@
/*
Package color is an ANSI color package to output colorized or SGR defined
output to the standard output. The API can be used in several way, pick one
that suits you.
Use simple and default helper functions with predefined foreground colors:
color.Cyan("Prints text in cyan.")
// a newline will be appended automatically
color.Blue("Prints %s in blue.", "text")
// More default foreground colors..
color.Red("We have red")
color.Yellow("Yellow color too!")
color.Magenta("And many others ..")
However there are times where custom color mixes are required. Below are some
examples to create custom color objects and use the print functions of each
separate color object.
// Create a new color object
c := color.New(color.FgCyan).Add(color.Underline)
c.Println("Prints cyan text with an underline.")
// Or just add them to New()
d := color.New(color.FgCyan, color.Bold)
d.Printf("This prints bold cyan %s\n", "too!.")
// Mix up foreground and background colors, create new mixes!
red := color.New(color.FgRed)
boldRed := red.Add(color.Bold)
boldRed.Println("This will print text in bold red.")
whiteBackground := red.Add(color.BgWhite)
whiteBackground.Println("Red text with White background.")
// Use your own io.Writer output
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
blue := color.New(color.FgBlue)
blue.Fprint(myWriter, "This will print text in blue.")
You can create PrintXxx functions to simplify even more:
// Create a custom print function for convenient
red := color.New(color.FgRed).PrintfFunc()
red("warning")
red("error: %s", err)
// Mix up multiple attributes
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
notice("don't forget this...")
You can also FprintXxx functions to pass your own io.Writer:
blue := color.New(FgBlue).FprintfFunc()
blue(myWriter, "important notice: %s", stars)
// Mix up with multiple attributes
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
success(myWriter, don't forget this...")
Or create SprintXxx functions to mix strings with other non-colorized strings:
yellow := New(FgYellow).SprintFunc()
red := New(FgRed).SprintFunc()
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Printf("this %s rocks!\n", info("package"))
Windows support is enabled by default. All Print functions works as intended.
However only for color.SprintXXX functions, user should use fmt.FprintXXX and
set the output to color.Output:
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
Using with existing code is possible. Just use the Set() method to set the
standard output to the given parameters. That way a rewrite of an existing
code is not required.
// Use handy standard colors.
color.Set(color.FgYellow)
fmt.Println("Existing text will be now in Yellow")
fmt.Printf("This one %s\n", "too")
color.Unset() // don't forget to unset
// You can mix up parameters
color.Set(color.FgMagenta, color.Bold)
defer color.Unset() // use it in your function
fmt.Println("All text will be now bold magenta.")
There might be a case where you want to disable color output (for example to
pipe the standard output of your app to somewhere else). `Color` has support to
disable colors both globally and for single color definition. For example
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
the color output with:
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
if *flagNoColor {
color.NoColor = true // disables colorized output
}
It also has support for single color definitions (local). You can
disable/enable color output on the fly:
c := color.New(color.FgCyan)
c.Println("Prints cyan text")
c.DisableColor()
c.Println("This is printed without any color")
c.EnableColor()
c.Println("This prints again cyan...")
*/
package color

61
vendor/github.com/go-ini/ini/ini.go generated vendored
View file

@ -37,7 +37,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.23.0"
_VERSION = "1.27.0"
)
// Version returns current package version literal.
@ -173,9 +173,13 @@ type LoadOptions struct {
Insensitive bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
// IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
IgnoreInlineComment bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
// AllowShadows indicates whether to keep track of keys with same name under same section.
AllowShadows bool
// Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
// conform to key/value pairs. Specify the names of those blocks here.
UnparseableSections []string
@ -219,6 +223,12 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
}
// InsensitiveLoad has exactly same functionality as Load function
// except it allows have shadow keys.
func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
}
// Empty returns an empty file object.
func Empty() *File {
// Ignore error here, we sure our data is good.
@ -311,6 +321,11 @@ func (f *File) Sections() []*Section {
return sections
}
// ChildSections returns a list of child sections of given section name.
func (f *File) ChildSections(name string) []*Section {
return f.Section(name).ChildSections()
}
// SectionStrings returns list of section names.
func (f *File) SectionStrings() []string {
list := make([]string, len(f.sectionList))
@ -441,6 +456,7 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
KEY_LIST:
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
@ -467,28 +483,33 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
if _, err = buf.WriteString(kname); err != nil {
return 0, err
}
if key.isBooleanType {
continue
}
for _, val := range key.ValueWithShadows() {
if _, err = buf.WriteString(kname); err != nil {
return 0, err
}
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
if key.isBooleanType {
if kname != sec.keyList[len(sec.keyList)-1] {
buf.WriteString(LineBreak)
}
continue KEY_LIST
}
val := key.value
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
return 0, err
// Write out alignment spaces before "=" sign
if PrettyFormat {
buf.Write(alignSpaces[:alignLength-len(kname)])
}
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
return 0, err
}
}
}

148
vendor/github.com/go-ini/ini/key.go generated vendored
View file

@ -15,6 +15,7 @@
package ini
import (
"errors"
"fmt"
"strconv"
"strings"
@ -29,9 +30,42 @@ type Key struct {
isAutoIncrement bool
isBooleanType bool
isShadow bool
shadows []*Key
Comment string
}
// newKey simply return a key object with given values.
func newKey(s *Section, name, val string) *Key {
return &Key{
s: s,
name: name,
value: val,
}
}
func (k *Key) addShadow(val string) error {
if k.isShadow {
return errors.New("cannot add shadow to another shadow key")
} else if k.isAutoIncrement || k.isBooleanType {
return errors.New("cannot add shadow to auto-increment or boolean key")
}
shadow := newKey(k.s, k.name, val)
shadow.isShadow = true
k.shadows = append(k.shadows, shadow)
return nil
}
// AddShadow adds a new shadow key to itself.
func (k *Key) AddShadow(val string) error {
if !k.s.f.options.AllowShadows {
return errors.New("shadow key is not allowed")
}
return k.addShadow(val)
}
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
type ValueMapper func(string) string
@ -45,16 +79,29 @@ func (k *Key) Value() string {
return k.value
}
// String returns string representation of value.
func (k *Key) String() string {
val := k.value
// ValueWithShadows returns raw values of key and its shadows if any.
func (k *Key) ValueWithShadows() []string {
if len(k.shadows) == 0 {
return []string{k.value}
}
vals := make([]string, len(k.shadows)+1)
vals[0] = k.value
for i := range k.shadows {
vals[i+1] = k.shadows[i].value
}
return vals
}
// transformValue takes a raw value and transforms to its final string.
func (k *Key) transformValue(val string) string {
if k.s.f.ValueMapper != nil {
val = k.s.f.ValueMapper(val)
}
if strings.Index(val, "%") == -1 {
// Fail-fast if no indicate char found for recursive value
if !strings.Contains(val, "%") {
return val
}
for i := 0; i < _DEPTH_VALUES; i++ {
vr := varPattern.FindString(val)
if len(vr) == 0 {
@ -78,6 +125,11 @@ func (k *Key) String() string {
return val
}
// String returns string representation of value.
func (k *Key) String() string {
return k.transformValue(k.value)
}
// Validate accepts a validate function which can
// return modifed result as key value.
func (k *Key) Validate(fn func(string) string) string {
@ -394,45 +446,65 @@ func (k *Key) Strings(delim string) []string {
vals := strings.Split(str, delim)
for i := range vals {
// vals[i] = k.transformValue(strings.TrimSpace(vals[i]))
vals[i] = strings.TrimSpace(vals[i])
}
return vals
}
// StringsWithShadows returns list of string divided by given delimiter.
// Shadows will also be appended if any.
func (k *Key) StringsWithShadows(delim string) []string {
vals := k.ValueWithShadows()
results := make([]string, 0, len(vals)*2)
for i := range vals {
if len(vals) == 0 {
continue
}
results = append(results, strings.Split(vals[i], delim)...)
}
for i := range results {
results[i] = k.transformValue(strings.TrimSpace(results[i]))
}
return results
}
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Float64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, true, false)
vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
return vals
}
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Ints(delim string) []int {
vals, _ := k.getInts(delim, true, false)
vals, _ := k.parseInts(k.Strings(delim), true, false)
return vals
}
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Int64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, true, false)
vals, _ := k.parseInt64s(k.Strings(delim), true, false)
return vals
}
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uints(delim string) []uint {
vals, _ := k.getUints(delim, true, false)
vals, _ := k.parseUints(k.Strings(delim), true, false)
return vals
}
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, true, false)
vals, _ := k.parseUint64s(k.Strings(delim), true, false)
return vals
}
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) TimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, true, false)
vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
return vals
}
@ -445,41 +517,41 @@ func (k *Key) Times(delim string) []time.Time {
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
// it will not be included to result list.
func (k *Key) ValidFloat64s(delim string) []float64 {
vals, _ := k.getFloat64s(delim, false, false)
vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
return vals
}
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
// not be included to result list.
func (k *Key) ValidInts(delim string) []int {
vals, _ := k.getInts(delim, false, false)
vals, _ := k.parseInts(k.Strings(delim), false, false)
return vals
}
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
// then it will not be included to result list.
func (k *Key) ValidInt64s(delim string) []int64 {
vals, _ := k.getInt64s(delim, false, false)
vals, _ := k.parseInt64s(k.Strings(delim), false, false)
return vals
}
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
// then it will not be included to result list.
func (k *Key) ValidUints(delim string) []uint {
vals, _ := k.getUints(delim, false, false)
vals, _ := k.parseUints(k.Strings(delim), false, false)
return vals
}
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidUint64s(delim string) []uint64 {
vals, _ := k.getUint64s(delim, false, false)
vals, _ := k.parseUint64s(k.Strings(delim), false, false)
return vals
}
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
vals, _ := k.getTimesFormat(format, delim, false, false)
vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
return vals
}
@ -490,33 +562,33 @@ func (k *Key) ValidTimes(delim string) []time.Time {
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
return k.getFloat64s(delim, false, true)
return k.parseFloat64s(k.Strings(delim), false, true)
}
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
func (k *Key) StrictInts(delim string) ([]int, error) {
return k.getInts(delim, false, true)
return k.parseInts(k.Strings(delim), false, true)
}
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
return k.getInt64s(delim, false, true)
return k.parseInt64s(k.Strings(delim), false, true)
}
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
func (k *Key) StrictUints(delim string) ([]uint, error) {
return k.getUints(delim, false, true)
return k.parseUints(k.Strings(delim), false, true)
}
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
return k.getUint64s(delim, false, true)
return k.parseUint64s(k.Strings(delim), false, true)
}
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
return k.getTimesFormat(format, delim, false, true)
return k.parseTimesFormat(format, k.Strings(delim), false, true)
}
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
@ -525,9 +597,8 @@ func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
return k.StrictTimesFormat(time.RFC3339, delim)
}
// getFloat64s returns list of float64 divided by given delimiter.
func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
strs := k.Strings(delim)
// parseFloat64s transforms strings to float64s.
func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
vals := make([]float64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseFloat(str, 64)
@ -541,9 +612,8 @@ func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]flo
return vals, nil
}
// getInts returns list of int divided by given delimiter.
func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
strs := k.Strings(delim)
// parseInts transforms strings to ints.
func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
vals := make([]int, 0, len(strs))
for _, str := range strs {
val, err := strconv.Atoi(str)
@ -557,9 +627,8 @@ func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, er
return vals, nil
}
// getInt64s returns list of int64 divided by given delimiter.
func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
strs := k.Strings(delim)
// parseInt64s transforms strings to int64s.
func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
vals := make([]int64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseInt(str, 10, 64)
@ -573,9 +642,8 @@ func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64
return vals, nil
}
// getUints returns list of uint divided by given delimiter.
func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
strs := k.Strings(delim)
// parseUints transforms strings to uints.
func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
vals := make([]uint, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 0)
@ -589,9 +657,8 @@ func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint,
return vals, nil
}
// getUint64s returns list of uint64 divided by given delimiter.
func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
strs := k.Strings(delim)
// parseUint64s transforms strings to uint64s.
func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
vals := make([]uint64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 64)
@ -605,9 +672,8 @@ func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint
return vals, nil
}
// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
strs := k.Strings(delim)
// parseTimesFormat transforms strings to times in given format.
func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
vals := make([]time.Time, 0, len(strs))
for _, str := range strs {
val, err := time.Parse(format, str)

View file

@ -193,7 +193,7 @@ func hasSurroundedQuote(in string, quote byte) bool {
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
return "", nil
@ -217,18 +217,21 @@ func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
return line[startIdx : pos+startIdx], nil
}
// Won't be able to reach here if value only contains whitespace.
// Won't be able to reach here if value only contains whitespace
line = strings.TrimSpace(line)
// Check continuation lines when desired.
// Check continuation lines when desired
if !ignoreContinuation && line[len(line)-1] == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
i := strings.IndexAny(line, "#;")
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
// Check if ignore inline comment
if !ignoreInlineComment {
i := strings.IndexAny(line, "#;")
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
}
}
// Trim single quotes
@ -318,11 +321,14 @@ func (f *File) parse(reader io.Reader) (err error) {
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
key, err := section.NewKey(string(line), "true")
kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
if err != nil {
return err
}
key, err := section.NewBooleanKey(kname)
if err != nil {
return err
}
key.isBooleanType = true
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
continue
@ -338,17 +344,16 @@ func (f *File) parse(reader io.Reader) (err error) {
p.count++
}
key, err := section.NewKey(kname, "")
value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
if err != nil {
return err
}
key, err := section.NewKey(kname, value)
if err != nil {
return err
}
key.isAutoIncrement = isAutoIncr
value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
if err != nil {
return err
}
key.SetValue(value)
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
}

View file

@ -68,20 +68,33 @@ func (s *Section) NewKey(name, val string) (*Key, error) {
}
if inSlice(name, s.keyList) {
s.keys[name].value = val
if s.f.options.AllowShadows {
if err := s.keys[name].addShadow(val); err != nil {
return nil, err
}
} else {
s.keys[name].value = val
}
return s.keys[name], nil
}
s.keyList = append(s.keyList, name)
s.keys[name] = &Key{
s: s,
name: name,
value: val,
}
s.keys[name] = newKey(s, name, val)
s.keysHash[name] = val
return s.keys[name], nil
}
// NewBooleanKey creates a new boolean type key to given section.
func (s *Section) NewBooleanKey(name string) (*Key, error) {
key, err := s.NewKey(name, "true")
if err != nil {
return nil, err
}
key.isBooleanType = true
return key, nil
}
// GetKey returns key in section by given name.
func (s *Section) GetKey(name string) (*Key, error) {
// FIXME: change to section level lock?
@ -219,3 +232,17 @@ func (s *Section) DeleteKey(name string) {
}
}
}
// ChildSections returns a list of child sections of current section.
// For example, "[parent.child1]" and "[parent.child12]" are child sections
// of section "[parent]".
func (s *Section) ChildSections() []*Section {
prefix := s.name + "."
children := make([]*Section, 0, 3)
for _, name := range s.f.sectionList {
if strings.HasPrefix(name, prefix) {
children = append(children, s.f.sections[name])
}
}
return children
}

View file

@ -78,8 +78,14 @@ func parseDelim(actual string) string {
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setSliceWithProperType sets proper values to slice based on its type.
func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
strs := key.Strings(delim)
func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
var strs []string
if allowShadow {
strs = key.StringsWithShadows(delim)
} else {
strs = key.Strings(delim)
}
numVals := len(strs)
if numVals == 0 {
return nil
@ -92,17 +98,17 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
case reflect.String:
vals = strs
case reflect.Int:
vals = key.Ints(delim)
vals, _ = key.parseInts(strs, true, false)
case reflect.Int64:
vals = key.Int64s(delim)
vals, _ = key.parseInt64s(strs, true, false)
case reflect.Uint:
vals = key.Uints(delim)
vals, _ = key.parseUints(strs, true, false)
case reflect.Uint64:
vals = key.Uint64s(delim)
vals, _ = key.parseUint64s(strs, true, false)
case reflect.Float64:
vals = key.Float64s(delim)
vals, _ = key.parseFloat64s(strs, true, false)
case reflectTime:
vals = key.Times(delim)
vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
@ -133,7 +139,7 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to strcut.
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
switch t.Kind() {
case reflect.String:
if len(key.String()) == 0 {
@ -174,7 +180,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
}
field.SetUint(uintVal)
case reflect.Float64:
case reflect.Float32, reflect.Float64:
floatVal, err := key.Float64()
if err != nil {
return nil
@ -187,13 +193,25 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
}
field.Set(reflect.ValueOf(timeVal))
case reflect.Slice:
return setSliceWithProperType(key, field, delim)
return setSliceWithProperType(key, field, delim, allowShadow)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
opts := strings.SplitN(tag, ",", 3)
rawName = opts[0]
if len(opts) > 1 {
omitEmpty = opts[1] == "omitempty"
}
if len(opts) > 2 {
allowShadow = opts[2] == "allowshadow"
}
return rawName, omitEmpty, allowShadow
}
func (s *Section) mapTo(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
@ -209,8 +227,8 @@ func (s *Section) mapTo(val reflect.Value) error {
continue
}
opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty
fieldName := s.parseFieldName(tpField.Name, opts[0])
rawName, _, allowShadow := parseTagOptions(tag)
fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
@ -231,7 +249,8 @@ func (s *Section) mapTo(val reflect.Value) error {
}
if key, err := s.GetKey(fieldName); err == nil {
if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
delim := parseDelim(tpField.Tag.Get("delim"))
if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
}

View file

@ -30,7 +30,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy and merge.
// TODO: MessageSet and RawMessage.
// TODO: RawMessage.
package proto
@ -75,17 +75,24 @@ func Merge(dst, src Message) {
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i))
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
emOut := out.Addr().Interface().(extendableProto)
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
if emIn, ok := extendable(in.Addr().Interface()); ok {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
uf := in.FieldByName("XXX_unrecognized")
@ -98,7 +105,10 @@ func mergeStruct(out, in reflect.Value) {
}
}
func mergeAny(out, in reflect.Value) {
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
@ -112,7 +122,21 @@ func mergeAny(out, in reflect.Value) {
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
@ -127,7 +151,7 @@ func mergeAny(out, in reflect.Value) {
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key))
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
@ -143,13 +167,21 @@ func mergeAny(out, in reflect.Value) {
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem())
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
@ -167,7 +199,7 @@ func mergeAny(out, in reflect.Value) {
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
@ -184,7 +216,7 @@ func mergeExtension(out, in map[int32]Extension) {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value))
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {

View file

@ -46,6 +46,10 @@ import (
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
@ -57,7 +61,6 @@ var errOverflow = errors.New("proto: integer overflow")
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
@ -74,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
return 0, 0
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
i := p.index
l := len(p.buf)
@ -103,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
return
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
@ -314,10 +412,30 @@ func UnmarshalMerge(buf []byte, pb Message) error {
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
func (p *Buffer) DecodeGroup(pb Message) error {
typ, base, err := getbase(pb)
if err != nil {
return err
}
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
@ -356,6 +474,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
if required > 0 {
// Not enough information to determine the exact field.
// (See below.)
return &RequiredNotSetError{"{Unknown}"}
}
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
@ -368,15 +491,30 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
ext := e.ExtensionMap()[int32(tag)] // may be missing
extmap := e.extensionsWrite()
ext := extmap[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
e.ExtensionMap()[int32(tag)] = ext
extmap[int32(tag)] = ext
}
continue
}
}
// Maybe it's a oneof?
if prop.oneofUnmarshaler != nil {
m := structPointer_Interface(base, st).(Message)
// First return value indicates whether tag is a oneof field.
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
if err == ErrInternalBadWireType {
// Map the error to something more descriptive.
// Do the formatting here to save generated code space.
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
}
if ok {
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
@ -561,9 +699,13 @@ func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error
return err
}
nb := int(nn) // number of bytes of encoded bools
fin := o.index + nb
if fin < o.index {
return errOverflow
}
y := *v
for i := 0; i < nb; i++ {
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
@ -675,7 +817,7 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
@ -727,8 +869,15 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() {
keyelem = reflect.Zero(p.mtype.Key())
}
if !valelem.IsValid() {
valelem = reflect.Zero(p.mtype.Elem())
}
v.SetMapIndex(keyptr.Elem(), valptr.Elem())
v.SetMapIndex(keyelem, valelem)
return nil
}

Some files were not shown because too many files have changed in this diff Show more