Update dependencies

This commit is contained in:
Ed Robinson 2017-04-07 10:09:57 +01:00
parent 51e4dcbb1f
commit 65284441fa
No known key found for this signature in database
GPG key ID: EC501FCA6421CCF0
98 changed files with 25265 additions and 1992 deletions

View file

@ -15,8 +15,8 @@ import (
"github.com/PuerkitoBio/urlesc"
"golang.org/x/net/idna"
"golang.org/x/text/secure/precis"
"golang.org/x/text/unicode/norm"
"golang.org/x/text/width"
)
// A set of normalization flags determines how a URL will
@ -150,26 +150,22 @@ func MustNormalizeURLString(u string, f NormalizationFlags) string {
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
// It takes an URL string as input, as well as the normalization flags.
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
parsed, err := url.Parse(u)
if err != nil {
return "", err
if parsed, e := url.Parse(u); e != nil {
return "", e
} else {
options := make([]precis.Option, 1, 3)
options[0] = precis.IgnoreCase
if f&FlagLowercaseHost == FlagLowercaseHost {
options = append(options, precis.FoldCase())
}
options = append(options, precis.Norm(norm.NFC))
profile := precis.NewFreeform(options...)
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
return "", e
}
return NormalizeURL(parsed, f), nil
}
if f&FlagLowercaseHost == FlagLowercaseHost {
parsed.Host = strings.ToLower(parsed.Host)
}
// The idna package doesn't fully conform to RFC 5895
// (https://tools.ietf.org/html/rfc5895), so we do it here.
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
// TODO: Remove when (if?) idna package conforms to RFC 5895.
parsed.Host = width.Fold.String(parsed.Host)
parsed.Host = norm.NFC.String(parsed.Host)
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
return "", err
}
return NormalizeURL(parsed, f), nil
panic("Unreachable code.")
}
// NormalizeURL returns the normalized string.

View file

@ -1,233 +0,0 @@
package semver
import (
"fmt"
"strings"
"unicode"
)
type comparator func(Version, Version) bool
var (
compEQ comparator = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) == 0
}
compNE = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) != 0
}
compGT = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) == 1
}
compGE = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) >= 0
}
compLT = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) == -1
}
compLE = func(v1 Version, v2 Version) bool {
return v1.Compare(v2) <= 0
}
)
type versionRange struct {
v Version
c comparator
}
// rangeFunc creates a Range from the given versionRange.
func (vr *versionRange) rangeFunc() Range {
return Range(func(v Version) bool {
return vr.c(v, vr.v)
})
}
// Range represents a range of versions.
// A Range can be used to check if a Version satisfies it:
//
// range, err := semver.ParseRange(">1.0.0 <2.0.0")
// range(semver.MustParse("1.1.1") // returns true
type Range func(Version) bool
// OR combines the existing Range with another Range using logical OR.
func (rf Range) OR(f Range) Range {
return Range(func(v Version) bool {
return rf(v) || f(v)
})
}
// AND combines the existing Range with another Range using logical AND.
func (rf Range) AND(f Range) Range {
return Range(func(v Version) bool {
return rf(v) && f(v)
})
}
// ParseRange parses a range and returns a Range.
// If the range could not be parsed an error is returned.
//
// Valid ranges are:
// - "<1.0.0"
// - "<=1.0.0"
// - ">1.0.0"
// - ">=1.0.0"
// - "1.0.0", "=1.0.0", "==1.0.0"
// - "!1.0.0", "!=1.0.0"
//
// A Range can consist of multiple ranges separated by space:
// Ranges can be linked by logical AND:
// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0"
// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2
//
// Ranges can also be linked by logical OR:
// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x"
//
// AND has a higher precedence than OR. It's not possible to use brackets.
//
// Ranges can be combined by both AND and OR
//
// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1`
func ParseRange(s string) (Range, error) {
parts := splitAndTrim(s)
orParts, err := splitORParts(parts)
if err != nil {
return nil, err
}
var orFn Range
for _, p := range orParts {
var andFn Range
for _, ap := range p {
opStr, vStr, err := splitComparatorVersion(ap)
if err != nil {
return nil, err
}
vr, err := buildVersionRange(opStr, vStr)
if err != nil {
return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err)
}
rf := vr.rangeFunc()
// Set function
if andFn == nil {
andFn = rf
} else { // Combine with existing function
andFn = andFn.AND(rf)
}
}
if orFn == nil {
orFn = andFn
} else {
orFn = orFn.OR(andFn)
}
}
return orFn, nil
}
// splitORParts splits the already cleaned parts by '||'.
// Checks for invalid positions of the operator and returns an
// error if found.
func splitORParts(parts []string) ([][]string, error) {
var ORparts [][]string
last := 0
for i, p := range parts {
if p == "||" {
if i == 0 {
return nil, fmt.Errorf("First element in range is '||'")
}
ORparts = append(ORparts, parts[last:i])
last = i + 1
}
}
if last == len(parts) {
return nil, fmt.Errorf("Last element in range is '||'")
}
ORparts = append(ORparts, parts[last:])
return ORparts, nil
}
// buildVersionRange takes a slice of 2: operator and version
// and builds a versionRange, otherwise an error.
func buildVersionRange(opStr, vStr string) (*versionRange, error) {
c := parseComparator(opStr)
if c == nil {
return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, ""))
}
v, err := Parse(vStr)
if err != nil {
return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err)
}
return &versionRange{
v: v,
c: c,
}, nil
}
// splitAndTrim splits a range string by spaces and cleans leading and trailing spaces
func splitAndTrim(s string) (result []string) {
last := 0
for i := 0; i < len(s); i++ {
if s[i] == ' ' {
if last < i-1 {
result = append(result, s[last:i])
}
last = i + 1
}
}
if last < len(s)-1 {
result = append(result, s[last:])
}
// parts := strings.Split(s, " ")
// for _, x := range parts {
// if s := strings.TrimSpace(x); len(s) != 0 {
// result = append(result, s)
// }
// }
return
}
// splitComparatorVersion splits the comparator from the version.
// Spaces between the comparator and the version are not allowed.
// Input must be free of leading or trailing spaces.
func splitComparatorVersion(s string) (string, string, error) {
i := strings.IndexFunc(s, unicode.IsDigit)
if i == -1 {
return "", "", fmt.Errorf("Could not get version from string: %q", s)
}
return strings.TrimSpace(s[0:i]), s[i:], nil
}
func parseComparator(s string) comparator {
switch s {
case "==":
fallthrough
case "":
fallthrough
case "=":
return compEQ
case ">":
return compGT
case ">=":
return compGE
case "<":
return compLT
case "<=":
return compLE
case "!":
fallthrough
case "!=":
return compNE
}
return nil
}
// MustParseRange is like ParseRange but panics if the range cannot be parsed.
func MustParseRange(s string) Range {
r, err := ParseRange(s)
if err != nil {
panic(`semver: ParseRange(` + s + `): ` + err.Error())
}
return r
}

View file

@ -200,29 +200,6 @@ func Make(s string) (Version, error) {
return Parse(s)
}
// ParseTolerant allows for certain version specifications that do not strictly adhere to semver
// specs to be parsed by this library. It does so by normalizing versions before passing them to
// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions
// with only major and minor components specified
func ParseTolerant(s string) (Version, error) {
s = strings.TrimSpace(s)
s = strings.TrimPrefix(s, "v")
// Split into major.minor.(patch+pr+meta)
parts := strings.SplitN(s, ".", 3)
if len(parts) < 3 {
if strings.ContainsAny(parts[len(parts)-1], "+-") {
return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data")
}
for len(parts) < 3 {
parts = append(parts, "0")
}
s = strings.Join(parts, ".")
}
return Parse(s)
}
// Parse parses version string and returns a validated Version or error
func Parse(s string) (Version, error) {
if len(s) == 0 {

View file

@ -1,150 +0,0 @@
// +build ignore
// This file is used to generate keys for tests.
package main
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"text/template"
jose "gopkg.in/square/go-jose.v2"
)
type key struct {
name string
new func() (crypto.Signer, error)
}
var keys = []key{
{
"ECDSA_256", func() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
},
},
{
"ECDSA_384", func() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
},
},
{
"ECDSA_521", func() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
},
},
{
"RSA_1024", func() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 1024)
},
},
{
"RSA_2048", func() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 2048)
},
},
{
"RSA_4096", func() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 4096)
},
},
}
func newJWK(k key, prefix, ident string) (privBytes, pubBytes []byte, err error) {
priv, err := k.new()
if err != nil {
return nil, nil, fmt.Errorf("generate %s: %v", k.name, err)
}
pub := priv.Public()
privKey := &jose.JSONWebKey{Key: priv}
thumbprint, err := privKey.Thumbprint(crypto.SHA256)
if err != nil {
return nil, nil, fmt.Errorf("computing thumbprint: %v", err)
}
keyID := hex.EncodeToString(thumbprint)
privKey.KeyID = keyID
pubKey := &jose.JSONWebKey{Key: pub, KeyID: keyID}
privBytes, err = json.MarshalIndent(privKey, prefix, ident)
if err != nil {
return
}
pubBytes, err = json.MarshalIndent(pubKey, prefix, ident)
return
}
type keyData struct {
Name string
Priv string
Pub string
}
var tmpl = template.Must(template.New("").Parse(`// +build !golint
// This file contains statically created JWKs for tests created by gen.go
package oidc
import (
"encoding/json"
jose "gopkg.in/square/go-jose.v2"
)
func mustLoadJWK(s string) jose.JSONWebKey {
var jwk jose.JSONWebKey
if err := json.Unmarshal([]byte(s), &jwk); err != nil {
panic(err)
}
return jwk
}
var (
{{- range $i, $key := .Keys }}
testKey{{ $key.Name }} = mustLoadJWK(` + "`" + `{{ $key.Pub }}` + "`" + `)
testKey{{ $key.Name }}_Priv = mustLoadJWK(` + "`" + `{{ $key.Priv }}` + "`" + `)
{{ end -}}
)
`))
func main() {
var tmplData struct {
Keys []keyData
}
for _, k := range keys {
for i := 0; i < 4; i++ {
log.Printf("generating %s", k.name)
priv, pub, err := newJWK(k, "\t", "\t")
if err != nil {
log.Fatal(err)
}
name := fmt.Sprintf("%s_%d", k.name, i)
tmplData.Keys = append(tmplData.Keys, keyData{
Name: name,
Priv: string(priv),
Pub: string(pub),
})
}
}
buff := new(bytes.Buffer)
if err := tmpl.Execute(buff, tmplData); err != nil {
log.Fatalf("excuting template: %v", err)
}
if err := ioutil.WriteFile("jose_test.go", buff.Bytes(), 0644); err != nil {
log.Fatal(err)
}
}

View file

@ -1,2 +0,0 @@
// Package http is DEPRECATED. Use net/http instead.
package http

View file

@ -1,20 +0,0 @@
// +build !golint
// Don't lint this file. We don't want to have to add a comment to each constant.
package oidc
const (
// JOSE asymmetric signing algorithm values as defined by RFC 7518
//
// see: https://tools.ietf.org/html/rfc7518#section-3.1
RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
ES256 = "ES256" // ECDSA using P-256 and SHA-256
ES384 = "ES384" // ECDSA using P-384 and SHA-384
ES512 = "ES512" // ECDSA using P-521 and SHA-512
PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
)

View file

@ -1,2 +0,0 @@
// Package jose is DEPRECATED. Use gopkg.in/square/go-jose.v2 instead.
package jose

View file

@ -104,7 +104,7 @@ func encodeExponent(e int) string {
break
}
}
return base64.RawURLEncoding.EncodeToString(b[idx:])
return base64.URLEncoding.EncodeToString(b[idx:])
}
// Turns a URL encoded modulus of a key into a big int.
@ -119,7 +119,7 @@ func decodeModulus(n string) (*big.Int, error) {
}
func encodeModulus(n *big.Int) string {
return base64.RawURLEncoding.EncodeToString(n.Bytes())
return base64.URLEncoding.EncodeToString(n.Bytes())
}
// decodeBase64URLPaddingOptional decodes Base64 whether there is padding or not.

67
vendor/github.com/coreos/go-oidc/jose/sig_hmac.go generated vendored Executable file
View file

@ -0,0 +1,67 @@
package jose
import (
"bytes"
"crypto"
"crypto/hmac"
_ "crypto/sha256"
"errors"
"fmt"
)
type VerifierHMAC struct {
KeyID string
Hash crypto.Hash
Secret []byte
}
type SignerHMAC struct {
VerifierHMAC
}
func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) {
if jwk.Alg != "" && jwk.Alg != "HS256" {
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
}
v := VerifierHMAC{
KeyID: jwk.ID,
Secret: jwk.Secret,
Hash: crypto.SHA256,
}
return &v, nil
}
func (v *VerifierHMAC) ID() string {
return v.KeyID
}
func (v *VerifierHMAC) Alg() string {
return "HS256"
}
func (v *VerifierHMAC) Verify(sig []byte, data []byte) error {
h := hmac.New(v.Hash.New, v.Secret)
h.Write(data)
if !bytes.Equal(sig, h.Sum(nil)) {
return errors.New("invalid hmac signature")
}
return nil
}
func NewSignerHMAC(kid string, secret []byte) *SignerHMAC {
return &SignerHMAC{
VerifierHMAC: VerifierHMAC{
KeyID: kid,
Secret: secret,
Hash: crypto.SHA256,
},
}
}
func (s *SignerHMAC) Sign(data []byte) ([]byte, error) {
h := hmac.New(s.Hash.New, s.Secret)
h.Write(data)
return h.Sum(nil), nil
}

View file

@ -1,200 +0,0 @@
package oidc
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"sync"
"time"
"github.com/pquerna/cachecontrol"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
jose "gopkg.in/square/go-jose.v2"
)
// keysExpiryDelta is the allowed clock skew between a client and the OpenID Connect
// server.
//
// When keys expire, they are valid for this amount of time after.
//
// If the keys have not expired, and an ID Token claims it was signed by a key not in
// the cache, if and only if the keys expire in this amount of time, the keys will be
// updated.
const keysExpiryDelta = 30 * time.Second
func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *remoteKeySet {
if now == nil {
now = time.Now
}
return &remoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now}
}
type remoteKeySet struct {
jwksURL string
ctx context.Context
now func() time.Time
// guard all other fields
mu sync.Mutex
// inflightCtx suppresses parallel execution of updateKeys and allows
// multiple goroutines to wait for its result.
// Its Err() method returns any errors encountered during updateKeys.
//
// If nil, there is no inflight updateKeys request.
inflightCtx *inflight
// A set of cached keys and their expiry.
cachedKeys []jose.JSONWebKey
expiry time.Time
}
// inflight is used to wait on some in-flight request from multiple goroutines
type inflight struct {
done chan struct{}
err error
}
// Done returns a channel that is closed when the inflight request finishes.
func (i *inflight) Done() <-chan struct{} {
return i.done
}
// Err returns any error encountered during request execution. May be nil.
func (i *inflight) Err() error {
return i.err
}
// Cancel signals completion of the inflight request with error err.
// Must be called only once for particular inflight instance.
func (i *inflight) Cancel(err error) {
i.err = err
close(i.done)
}
func (r *remoteKeySet) keysWithIDFromCache(keyIDs []string) ([]jose.JSONWebKey, bool) {
r.mu.Lock()
keys, expiry := r.cachedKeys, r.expiry
r.mu.Unlock()
// Have the keys expired?
if expiry.Add(keysExpiryDelta).Before(r.now()) {
return nil, false
}
var signingKeys []jose.JSONWebKey
for _, key := range keys {
if contains(keyIDs, key.KeyID) {
signingKeys = append(signingKeys, key)
}
}
if len(signingKeys) == 0 {
// Are the keys about to expire?
if r.now().Add(keysExpiryDelta).After(expiry) {
return nil, false
}
}
return signingKeys, true
}
func (r *remoteKeySet) keysWithID(ctx context.Context, keyIDs []string) ([]jose.JSONWebKey, error) {
keys, ok := r.keysWithIDFromCache(keyIDs)
if ok {
return keys, nil
}
var inflightCtx *inflight
func() {
r.mu.Lock()
defer r.mu.Unlock()
// If there's not a current inflight request, create one.
if r.inflightCtx == nil {
inflightCtx := &inflight{make(chan struct{}), nil}
r.inflightCtx = inflightCtx
go func() {
// TODO(ericchiang): Upstream Kubernetes request that we recover every time
// we spawn a goroutine, because panics in a goroutine will bring down the
// entire program. There's no way to recover from another goroutine's panic.
//
// Most users actually want to let the panic propagate and bring down the
// program because it implies some unrecoverable state.
//
// Add a context key to allow the recover behavior.
//
// See: https://github.com/coreos/go-oidc/issues/89
// Sync keys and close inflightCtx when that's done.
// Use the remoteKeySet's context instead of the requests context
// because a re-sync is unique to the keys set and will span multiple
// requests.
inflightCtx.Cancel(r.updateKeys(r.ctx))
r.mu.Lock()
defer r.mu.Unlock()
r.inflightCtx = nil
}()
}
inflightCtx = r.inflightCtx
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-inflightCtx.Done():
if err := inflightCtx.Err(); err != nil {
return nil, err
}
}
// Since we've just updated keys, we don't care about the cache miss.
keys, _ = r.keysWithIDFromCache(keyIDs)
return keys, nil
}
func (r *remoteKeySet) updateKeys(ctx context.Context) error {
req, err := http.NewRequest("GET", r.jwksURL, nil)
if err != nil {
return fmt.Errorf("oidc: can't create request: %v", err)
}
resp, err := ctxhttp.Do(ctx, clientFromContext(ctx), req)
if err != nil {
return fmt.Errorf("oidc: get keys failed %v", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("oidc: read response body: %v", err)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
}
var keySet jose.JSONWebKeySet
if err := json.Unmarshal(body, &keySet); err != nil {
return fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
}
// If the server doesn't provide cache control headers, assume the
// keys expire immediately.
expiry := r.now()
_, e, err := cachecontrol.CachableResponse(req, resp, cachecontrol.Options{})
if err == nil && e.After(expiry) {
expiry = e
}
r.mu.Lock()
defer r.mu.Unlock()
r.cachedKeys = keySet.Keys
r.expiry = expiry
return nil
}

View file

@ -1,2 +0,0 @@
// Package key is DEPRECATED. Use github.com/coreos/go-oidc instead.
package key

View file

@ -1,2 +0,0 @@
// Package oauth2 is DEPRECATED. Use golang.org/x/oauth instead.
package oauth2

View file

@ -1,299 +0,0 @@
// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
package oidc
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
"golang.org/x/oauth2"
jose "gopkg.in/square/go-jose.v2"
)
const (
// ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
ScopeOpenID = "openid"
// ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
// OAuth2 refresh tokens.
//
// Support for this scope differs between OpenID Connect providers. For instance
// Google rejects it, favoring appending "access_type=offline" as part of the
// authorization request instead.
//
// See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
ScopeOfflineAccess = "offline_access"
)
// ClientContext returns a new Context that carries the provided HTTP client.
//
// This method sets the same context key used by the golang.org/x/oauth2 package,
// so the returned context works for that package too.
//
// myClient := &http.Client{}
// ctx := oidc.ClientContext(parentContext, myClient)
//
// // This will use the custom client
// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
//
func ClientContext(ctx context.Context, client *http.Client) context.Context {
return context.WithValue(ctx, oauth2.HTTPClient, client)
}
func clientFromContext(ctx context.Context) *http.Client {
if client, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
return client
}
return http.DefaultClient
}
// Provider represents an OpenID Connect server's configuration.
type Provider struct {
issuer string
authURL string
tokenURL string
userInfoURL string
// Raw claims returned by the server.
rawClaims []byte
remoteKeySet *remoteKeySet
}
type cachedKeys struct {
keys []jose.JSONWebKey
expiry time.Time
}
type providerJSON struct {
Issuer string `json:"issuer"`
AuthURL string `json:"authorization_endpoint"`
TokenURL string `json:"token_endpoint"`
JWKSURL string `json:"jwks_uri"`
UserInfoURL string `json:"userinfo_endpoint"`
}
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
//
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
// or "https://login.salesforce.com".
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
resp, err := ctxhttp.Get(ctx, clientFromContext(ctx), wellKnown)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s: %s", resp.Status, body)
}
defer resp.Body.Close()
var p providerJSON
if err := json.Unmarshal(body, &p); err != nil {
return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
}
if p.Issuer != issuer {
return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
}
return &Provider{
issuer: p.Issuer,
authURL: p.AuthURL,
tokenURL: p.TokenURL,
userInfoURL: p.UserInfoURL,
rawClaims: body,
remoteKeySet: newRemoteKeySet(ctx, p.JWKSURL, time.Now),
}, nil
}
// Claims unmarshals raw fields returned by the server during discovery.
//
// var claims struct {
// ScopesSupported []string `json:"scopes_supported"`
// ClaimsSupported []string `json:"claims_supported"`
// }
//
// if err := provider.Claims(&claims); err != nil {
// // handle unmarshaling error
// }
//
// For a list of fields defined by the OpenID Connect spec see:
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
func (p *Provider) Claims(v interface{}) error {
if p.rawClaims == nil {
return errors.New("oidc: claims not set")
}
return json.Unmarshal(p.rawClaims, v)
}
// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
func (p *Provider) Endpoint() oauth2.Endpoint {
return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
}
// UserInfo represents the OpenID Connect userinfo claims.
type UserInfo struct {
Subject string `json:"sub"`
Profile string `json:"profile"`
Email string `json:"email"`
EmailVerified bool `json:"email_verified"`
claims []byte
}
// Claims unmarshals the raw JSON object claims into the provided object.
func (u *UserInfo) Claims(v interface{}) error {
if u.claims == nil {
return errors.New("oidc: claims not set")
}
return json.Unmarshal(u.claims, v)
}
// UserInfo uses the token source to query the provider's user info endpoint.
func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
if p.userInfoURL == "" {
return nil, errors.New("oidc: user info endpoint is not supported by this provider")
}
req, err := http.NewRequest("GET", p.userInfoURL, nil)
if err != nil {
return nil, fmt.Errorf("oidc: create GET request: %v", err)
}
token, err := tokenSource.Token()
if err != nil {
return nil, fmt.Errorf("oidc: get access token: %v", err)
}
token.SetAuthHeader(req)
resp, err := ctxhttp.Do(ctx, clientFromContext(ctx), req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("%s: %s", resp.Status, body)
}
var userInfo UserInfo
if err := json.Unmarshal(body, &userInfo); err != nil {
return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
}
userInfo.claims = body
return &userInfo, nil
}
// IDToken is an OpenID Connect extension that provides a predictable representation
// of an authorization event.
//
// The ID Token only holds fields OpenID Connect requires. To access additional
// claims returned by the server, use the Claims method.
type IDToken struct {
// The URL of the server which issued this token. This will always be the same
// as the URL used for initial discovery.
Issuer string
// The client, or set of clients, that this token is issued for.
Audience []string
// A unique string which identifies the end user.
Subject string
IssuedAt time.Time
Expiry time.Time
Nonce string
// Raw payload of the id_token.
claims []byte
}
// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
//
// idToken, err := idTokenVerifier.Verify(rawIDToken)
// if err != nil {
// // handle error
// }
// var claims struct {
// Email string `json:"email"`
// EmailVerified bool `json:"email_verified"`
// }
// if err := idToken.Claims(&claims); err != nil {
// // handle error
// }
//
func (i *IDToken) Claims(v interface{}) error {
if i.claims == nil {
return errors.New("oidc: claims not set")
}
return json.Unmarshal(i.claims, v)
}
type idToken struct {
Issuer string `json:"iss"`
Subject string `json:"sub"`
Audience audience `json:"aud"`
Expiry jsonTime `json:"exp"`
IssuedAt jsonTime `json:"iat"`
Nonce string `json:"nonce"`
}
type audience []string
func (a *audience) UnmarshalJSON(b []byte) error {
var s string
if json.Unmarshal(b, &s) == nil {
*a = audience{s}
return nil
}
var auds []string
if err := json.Unmarshal(b, &auds); err != nil {
return err
}
*a = audience(auds)
return nil
}
func (a audience) MarshalJSON() ([]byte, error) {
if len(a) == 1 {
return json.Marshal(a[0])
}
return json.Marshal([]string(a))
}
type jsonTime time.Time
func (j *jsonTime) UnmarshalJSON(b []byte) error {
var n json.Number
if err := json.Unmarshal(b, &n); err != nil {
return err
}
var unix int64
if t, err := n.Int64(); err == nil {
unix = t
} else {
f, err := n.Float64()
if err != nil {
return err
}
unix = int64(f)
}
*j = jsonTime(time.Unix(unix, 0))
return nil
}
func (j jsonTime) MarshalJSON() ([]byte, error) {
return json.Marshal(time.Time(j).Unix())
}

View file

@ -1,2 +0,0 @@
// Package oidc is DEPRECATED. Use github.com/coreos/go-oidc instead.
package oidc

View file

@ -567,7 +567,7 @@ func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) {
next = &pcsStepNext{aft: ttl}
} else {
next = &pcsStepRetry{aft: time.Second}
log.Printf("go-oidc: provider config sync failed, retrying in %v: %v", next.after(), err)
log.Printf("go-oidc: provider config sync falied, retyring in %v: %v", next.after(), err)
}
return
}
@ -586,7 +586,7 @@ func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) {
next = &pcsStepNext{aft: ttl}
} else {
next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)}
log.Printf("go-oidc: provider config sync failed, retrying in %v: %v", next.after(), err)
log.Printf("go-oidc: provider config sync falied, retyring in %v: %v", next.after(), err)
}
return
}

View file

@ -1,263 +0,0 @@
package oidc
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"golang.org/x/net/context"
"golang.org/x/oauth2"
jose "gopkg.in/square/go-jose.v2"
)
// IDTokenVerifier provides verification for ID Tokens.
type IDTokenVerifier struct {
keySet *remoteKeySet
config *verificationConfig
}
// verificationConfig is the unexported configuration for an IDTokenVerifier.
//
// Users interact with this struct using a VerificationOption.
type verificationConfig struct {
issuer string
// If provided, this value must be in the ID Token audiences.
audience string
// If not nil, check the expiry of the id token.
checkExpiry func() time.Time
// If specified, only these sets of algorithms may be used to sign the JWT.
requiredAlgs []string
// If not nil, don't verify nonce.
nonceSource NonceSource
}
// VerificationOption provides additional checks on ID Tokens.
type VerificationOption interface {
// Unexport this method so other packages can't implement this interface.
updateConfig(c *verificationConfig)
}
// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
//
// The returned IDTokenVerifier is tied to the Provider's context and its behavior is
// undefined once the Provider's context is canceled.
func (p *Provider) Verifier(options ...VerificationOption) *IDTokenVerifier {
config := &verificationConfig{issuer: p.issuer}
for _, option := range options {
option.updateConfig(config)
}
return newVerifier(p.remoteKeySet, config)
}
func newVerifier(keySet *remoteKeySet, config *verificationConfig) *IDTokenVerifier {
// As discussed in the godocs for VerifrySigningAlg, because almost all providers
// only support RS256, default to only allowing it.
if len(config.requiredAlgs) == 0 {
config.requiredAlgs = []string{RS256}
}
return &IDTokenVerifier{
keySet: keySet,
config: config,
}
}
func parseJWT(p string) ([]byte, error) {
parts := strings.Split(p, ".")
if len(parts) < 2 {
return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
}
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
}
return payload, nil
}
func contains(sli []string, ele string) bool {
for _, s := range sli {
if s == ele {
return true
}
}
return false
}
// Verify parses a raw ID Token, verifies it's been signed by the provider, preforms
// any additional checks passed as VerifictionOptions, and returns the payload.
//
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
//
// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
// if err != nil {
// // handle error
// }
//
// // Extract the ID Token from oauth2 token.
// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
// if !ok {
// // handle error
// }
//
// token, err := verifier.Verify(ctx, rawIDToken)
//
func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
jws, err := jose.ParseSigned(rawIDToken)
if err != nil {
return nil, fmt.Errorf("oidc: mallformed jwt: %v", err)
}
// Throw out tokens with invalid claims before trying to verify the token. This lets
// us do cheap checks before possibly re-syncing keys.
payload, err := parseJWT(rawIDToken)
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
}
var token idToken
if err := json.Unmarshal(payload, &token); err != nil {
return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
}
t := &IDToken{
Issuer: token.Issuer,
Subject: token.Subject,
Audience: []string(token.Audience),
Expiry: time.Time(token.Expiry),
IssuedAt: time.Time(token.IssuedAt),
Nonce: token.Nonce,
claims: payload,
}
// Check issuer.
if t.Issuer != v.config.issuer {
return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.config.issuer, t.Issuer)
}
// If a client ID has been provided, make sure it's part of the audience.
if v.config.audience != "" {
if !contains(t.Audience, v.config.audience) {
return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.audience, t.Audience)
}
}
// If a set of required algorithms has been provided, ensure that the signatures use those.
var keyIDs, gotAlgs []string
for _, sig := range jws.Signatures {
if len(v.config.requiredAlgs) == 0 || contains(v.config.requiredAlgs, sig.Header.Algorithm) {
keyIDs = append(keyIDs, sig.Header.KeyID)
} else {
gotAlgs = append(gotAlgs, sig.Header.Algorithm)
}
}
if len(keyIDs) == 0 {
return nil, fmt.Errorf("oidc: no signatures use a require algorithm, expected %q got %q", v.config.requiredAlgs, gotAlgs)
}
// Get keys from the remote key set. This may trigger a re-sync.
keys, err := v.keySet.keysWithID(ctx, keyIDs)
if err != nil {
return nil, fmt.Errorf("oidc: get keys for id token: %v", err)
}
if len(keys) == 0 {
return nil, fmt.Errorf("oidc: no keys match signature ID(s) %q", keyIDs)
}
// Try to use a key to validate the signature.
var gotPayload []byte
for _, key := range keys {
if p, err := jws.Verify(&key); err == nil {
gotPayload = p
}
}
if len(gotPayload) == 0 {
return nil, fmt.Errorf("oidc: failed to verify id token")
}
// Ensure that the payload returned by the square actually matches the payload parsed earlier.
if !bytes.Equal(gotPayload, payload) {
return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
}
// Check the nonce after we've verified the token. We don't want to allow unverified
// payloads to trigger a nonce lookup.
if v.config.nonceSource != nil {
if err := v.config.nonceSource.ClaimNonce(t.Nonce); err != nil {
return nil, err
}
}
return t, nil
}
// VerifyAudience ensures that an ID Token was issued for the specific client.
//
// Note that a verified token may be valid for other clients, as OpenID Connect allows a token to have
// multiple audiences.
func VerifyAudience(clientID string) VerificationOption {
return clientVerifier{clientID}
}
type clientVerifier struct {
clientID string
}
func (v clientVerifier) updateConfig(c *verificationConfig) {
c.audience = v.clientID
}
// VerifyExpiry ensures that an ID Token has not expired.
func VerifyExpiry() VerificationOption {
return expiryVerifier{}
}
type expiryVerifier struct{}
func (v expiryVerifier) updateConfig(c *verificationConfig) {
c.checkExpiry = time.Now
}
// VerifySigningAlg enforces that an ID Token is signed by a specific signing algorithm.
//
// Because so many providers only support RS256, if this verifiction option isn't used,
// the IDTokenVerifier defaults to only allowing RS256.
func VerifySigningAlg(allowedAlgs ...string) VerificationOption {
return algVerifier{allowedAlgs}
}
type algVerifier struct {
algs []string
}
func (v algVerifier) updateConfig(c *verificationConfig) {
c.requiredAlgs = v.algs
}
// Nonce returns an auth code option which requires the ID Token created by the
// OpenID Connect provider to contain the specified nonce.
func Nonce(nonce string) oauth2.AuthCodeOption {
return oauth2.SetAuthURLParam("nonce", nonce)
}
// NonceSource represents a source which can verify a nonce is valid and has not
// been claimed before.
type NonceSource interface {
ClaimNonce(nonce string) error
}
// VerifyNonce ensures that the ID Token contains a nonce which can be claimed by the nonce source.
func VerifyNonce(source NonceSource) VerificationOption {
return nonceVerifier{source}
}
type nonceVerifier struct {
nonceSource NonceSource
}
func (n nonceVerifier) updateConfig(c *verificationConfig) {
c.nonceSource = n.nonceSource
}

View file

@ -45,11 +45,7 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te
break
}
if v.IsNil() {
if v.CanSet() {
v.Set(reflect.New(v.Type().Elem()))
} else {
v = reflect.New(v.Type().Elem())
}
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(json.Unmarshaler); ok {

View file

@ -15,12 +15,12 @@ import (
func Marshal(o interface{}) ([]byte, error) {
j, err := json.Marshal(o)
if err != nil {
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
return nil, fmt.Errorf("error marshaling into JSON: ", err)
}
y, err := JSONToYAML(j)
if err != nil {
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
return nil, fmt.Errorf("error converting JSON to YAML: ", err)
}
return y, nil
@ -48,7 +48,7 @@ func JSONToYAML(j []byte) ([]byte, error) {
var jsonObj interface{}
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshalling to interface{}, it just picks float64
// etc.) when unmarshling to interface{}, it just picks float64
// universally. go-yaml does go through the effort of picking the right
// number type, so we can preserve number type throughout this process.
err := yaml.Unmarshal(j, &jsonObj)

File diff suppressed because one or more lines are too long

View file

@ -17,11 +17,7 @@ package spec
import (
"encoding/json"
"fmt"
"log"
"net/url"
"os"
"path"
"path/filepath"
"reflect"
"strings"
"sync"
@ -30,17 +26,6 @@ import (
"github.com/go-openapi/swag"
)
var (
// Debug enables logging when SWAGGER_DEBUG env var is not empty
Debug = os.Getenv("SWAGGER_DEBUG") != ""
)
// ExpandOptions provides options for expand.
type ExpandOptions struct {
RelativeBase string
SkipSchemas bool
}
// ResolutionCache a cache for resolving urls
type ResolutionCache interface {
Get(string) (interface{}, bool)
@ -52,11 +37,7 @@ type simpleCache struct {
store map[string]interface{}
}
var resCache ResolutionCache
func init() {
resCache = initResolutionCache()
}
var resCache = initResolutionCache()
func initResolutionCache() ResolutionCache {
return &simpleCache{store: map[string]interface{}{
@ -66,11 +47,8 @@ func initResolutionCache() ResolutionCache {
}
func (s *simpleCache) Get(uri string) (interface{}, bool) {
debugLog("getting %q from resolution cache", uri)
s.lock.Lock()
v, ok := s.store[uri]
debugLog("got %q from resolution cache: %t", uri, ok)
s.lock.Unlock()
return v, ok
}
@ -81,9 +59,9 @@ func (s *simpleCache) Set(uri string, data interface{}) {
s.lock.Unlock()
}
// ResolveRefWithBase resolves a reference against a context root with preservation of base path
func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
// ResolveRef resolves a reference against a context root
func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
resolver, err := defaultSchemaLoader(root, nil, nil)
if err != nil {
return nil, err
}
@ -95,19 +73,9 @@ func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schem
return result, nil
}
// ResolveRef resolves a reference against a context root
func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
return ResolveRefWithBase(root, ref, nil)
}
// ResolveParameter resolves a paramter reference against a context root
func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) {
return ResolveParameterWithBase(root, ref, nil)
}
// ResolveParameterWithBase resolves a paramter reference against a context root and base path
func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
resolver, err := defaultSchemaLoader(root, nil, nil)
if err != nil {
return nil, err
}
@ -121,12 +89,7 @@ func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*
// ResolveResponse resolves response a reference against a context root
func ResolveResponse(root interface{}, ref Ref) (*Response, error) {
return ResolveResponseWithBase(root, ref, nil)
}
// ResolveResponseWithBase resolves response a reference against a context root and base path
func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
resolver, err := defaultSchemaLoader(root, nil, nil)
if err != nil {
return nil, err
}
@ -138,70 +101,23 @@ func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*R
return result, nil
}
// ResolveItems resolves header and parameter items reference against a context root and base path
func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
if err != nil {
return nil, err
}
result := new(Items)
if err := resolver.Resolve(&ref, result); err != nil {
return nil, err
}
return result, nil
}
// ResolvePathItem resolves response a path item against a context root and base path
func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
if err != nil {
return nil, err
}
result := new(PathItem)
if err := resolver.Resolve(&ref, result); err != nil {
return nil, err
}
return result, nil
}
type schemaLoader struct {
loadingRef *Ref
startingRef *Ref
currentRef *Ref
root interface{}
options *ExpandOptions
cache ResolutionCache
loadDoc func(string) (json.RawMessage, error)
}
var idPtr, _ = jsonpointer.New("/id")
var schemaPtr, _ = jsonpointer.New("/$schema")
var refPtr, _ = jsonpointer.New("/$ref")
// PathLoader function to use when loading remote refs
var PathLoader func(string) (json.RawMessage, error)
func init() {
PathLoader = func(path string) (json.RawMessage, error) {
data, err := swag.LoadFromFileOrHTTP(path)
if err != nil {
return nil, err
}
return json.RawMessage(data), nil
}
}
func defaultSchemaLoader(
root interface{}, ref *Ref,
expandOptions *ExpandOptions, cache ResolutionCache) (*schemaLoader, error) {
func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) {
if cache == nil {
cache = resCache
}
if expandOptions == nil {
expandOptions = &ExpandOptions{}
}
var ptr *jsonpointer.Pointer
if ref != nil {
@ -211,16 +127,18 @@ func defaultSchemaLoader(
currentRef := nextRef(root, ref, ptr)
return &schemaLoader{
root: root,
loadingRef: ref,
startingRef: ref,
currentRef: currentRef,
root: root,
options: expandOptions,
cache: cache,
loadDoc: func(path string) (json.RawMessage, error) {
debugLog("fetching document at %q", path)
return PathLoader(path)
data, err := swag.LoadFromFileOrHTTP(path)
if err != nil {
return nil, err
}
return json.RawMessage(data), nil
},
currentRef: currentRef,
}, nil
}
@ -241,7 +159,6 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe
if startingRef == nil {
return nil
}
if ptr == nil {
return startingRef
}
@ -267,106 +184,32 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe
refRef, _, _ := refPtr.Get(node)
if refRef != nil {
var rf Ref
switch value := refRef.(type) {
case string:
rf, _ = NewRef(value)
}
rf, _ := NewRef(refRef.(string))
nw, err := ret.Inherits(rf)
if err != nil {
break
}
nwURL := nw.GetURL()
if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") {
nwpt := filepath.ToSlash(nwURL.Path)
if filepath.IsAbs(nwpt) {
_, err := os.Stat(nwpt)
if err != nil {
nwURL.Path = filepath.Join(".", nwpt)
}
}
}
ret = nw
}
}
return ret
}
func debugLog(msg string, args ...interface{}) {
if Debug {
log.Printf(msg, args...)
}
}
func normalizeFileRef(ref *Ref, relativeBase string) *Ref {
refURL := ref.GetURL()
debugLog("normalizing %s against %s", ref.String(), relativeBase)
if strings.HasPrefix(refURL.String(), "#") {
return ref
}
if refURL.Scheme == "file" || (refURL.Scheme == "" && refURL.Host == "") {
filePath := refURL.Path
debugLog("normalizing file path: %s", filePath)
if !filepath.IsAbs(filepath.FromSlash(filePath)) && len(relativeBase) != 0 {
debugLog("joining %s with %s", relativeBase, filePath)
if fi, err := os.Stat(filepath.FromSlash(relativeBase)); err == nil {
if !fi.IsDir() {
relativeBase = path.Dir(relativeBase)
}
}
filePath = filepath.Join(filepath.FromSlash(relativeBase), filepath.FromSlash(filePath))
}
if !filepath.IsAbs(filepath.FromSlash(filePath)) {
pwd, err := os.Getwd()
if err == nil {
debugLog("joining cwd %s with %s", pwd, filePath)
filePath = filepath.Join(pwd, filePath)
}
}
debugLog("cleaning %s", filePath)
filePath = filepath.Clean(filePath)
_, err := os.Stat(filepath.FromSlash(filePath))
if err == nil {
debugLog("rewriting url to scheme \"\" path %s", filePath)
refURL.Scheme = ""
refURL.Path = filepath.ToSlash(filePath)
debugLog("new url with joined filepath: %s", refURL.String())
*ref = MustCreateRef(refURL.String())
}
}
return ref
}
func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error {
tgt := reflect.ValueOf(target)
if tgt.Kind() != reflect.Ptr {
return fmt.Errorf("resolve ref: target needs to be a pointer")
}
oldRef := currentRef
if currentRef != nil {
debugLog("resolve ref current %s new %s", currentRef.String(), ref.String())
nextRef := nextRef(node, ref, currentRef.GetPointer())
if nextRef == nil || nextRef.GetURL() == nil {
return nil
}
var err error
currentRef, err = currentRef.Inherits(*nextRef)
debugLog("resolved ref current %s", currentRef.String())
currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer()))
if err != nil {
return err
}
}
if currentRef == nil {
currentRef = ref
}
@ -402,69 +245,42 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
return nil
}
relativeBase := ""
if r.options != nil && r.options.RelativeBase != "" {
relativeBase = r.options.RelativeBase
}
normalizeFileRef(currentRef, relativeBase)
normalizeFileRef(ref, relativeBase)
data, _, _, err := r.load(currentRef.GetURL())
if err != nil {
return err
}
if ((oldRef == nil && currentRef != nil) ||
(oldRef != nil && currentRef == nil) ||
oldRef.String() != currentRef.String()) &&
((oldRef == nil && ref != nil) ||
(oldRef != nil && ref == nil) ||
(oldRef.String() != ref.String())) {
return r.resolveRef(currentRef, ref, data, target)
}
var res interface{}
if currentRef.String() != "" {
res, _, err = currentRef.GetPointer().Get(data)
if refURL.Scheme != "" && refURL.Host != "" {
// most definitely take the red pill
data, _, _, err := r.load(refURL)
if err != nil {
if strings.HasPrefix(ref.String(), "#") {
if r.loadingRef != nil {
rr, er := r.loadingRef.Inherits(*ref)
if er != nil {
return er
}
refURL = rr.GetURL()
return err
}
data, _, _, err = r.load(refURL)
if err != nil {
return err
}
} else {
data = r.root
}
}
if ((oldRef == nil && currentRef != nil) ||
(oldRef != nil && currentRef == nil) ||
oldRef.String() != currentRef.String()) &&
((oldRef == nil && ref != nil) ||
(oldRef != nil && ref == nil) ||
(oldRef.String() != ref.String())) {
res, _, err = ref.GetPointer().Get(data)
return r.resolveRef(currentRef, ref, data, target)
}
var res interface{}
if currentRef.String() != "" {
res, _, err = currentRef.GetPointer().Get(data)
if err != nil {
return err
}
} else {
res = data
}
} else {
res = data
if err := swag.DynamicJSONToStruct(res, target); err != nil {
return err
}
}
if err := swag.DynamicJSONToStruct(res, target); err != nil {
return err
}
r.currentRef = currentRef
return nil
}
func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) {
debugLog("loading schema from url: %s", refURL)
toFetch := *refURL
toFetch.Fragment = ""
@ -483,27 +299,33 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error)
return data, toFetch, fromCache, nil
}
func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error {
return r.resolveRef(r.currentRef, ref, r.root, target)
if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil {
return err
}
return nil
}
type specExpander struct {
spec *Swagger
resolver *schemaLoader
}
// ExpandSpec expands the references in a swagger spec
func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
resolver, err := defaultSchemaLoader(spec, nil, options, nil)
func ExpandSpec(spec *Swagger) error {
resolver, err := defaultSchemaLoader(spec, nil, nil)
if err != nil {
return err
}
if options == nil || !options.SkipSchemas {
for key, definition := range spec.Definitions {
var def *Schema
var err error
if def, err = expandSchema(definition, []string{"#/definitions/" + key}, resolver); err != nil {
return err
}
spec.Definitions[key] = *def
for key, defintition := range spec.Definitions {
var def *Schema
var err error
if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil {
return err
}
spec.Definitions[key] = *def
}
for key, parameter := range spec.Parameters {
@ -534,11 +356,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
// ExpandSchema expands the refs in the schema object
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
return ExpandSchemaWithBasePath(schema, root, cache, nil)
}
// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options
func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache ResolutionCache, opts *ExpandOptions) error {
if schema == nil {
return nil
}
@ -549,17 +367,18 @@ func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache Resolution
nrr, _ := NewRef(schema.ID)
var rrr *Ref
if nrr.String() != "" {
switch rt := root.(type) {
switch root.(type) {
case *Schema:
rid, _ := NewRef(rt.ID)
rid, _ := NewRef(root.(*Schema).ID)
rrr, _ = rid.Inherits(nrr)
case *Swagger:
rid, _ := NewRef(rt.ID)
rid, _ := NewRef(root.(*Swagger).ID)
rrr, _ = rid.Inherits(nrr)
}
}
resolver, err := defaultSchemaLoader(root, rrr, opts, cache)
resolver, err := defaultSchemaLoader(root, rrr, cache)
if err != nil {
return err
}
@ -570,7 +389,7 @@ func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache Resolution
}
var s *Schema
if s, err = expandSchema(*schema, refs, resolver); err != nil {
return err
return nil
}
*schema = *s
return nil
@ -581,15 +400,7 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S
if target.Items.Schema != nil {
t, err := expandSchema(*target.Items.Schema, parentRefs, resolver)
if err != nil {
if target.Items.Schema.ID == "" {
target.Items.Schema.ID = target.ID
if err != nil {
t, err = expandSchema(*target.Items.Schema, parentRefs, resolver)
if err != nil {
return nil, err
}
}
}
return nil, err
}
*target.Items.Schema = *t
}
@ -604,108 +415,101 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S
return &target, nil
}
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) {
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) {
defer func() {
schema = &target
}()
if target.Ref.String() == "" && target.Ref.IsRoot() {
debugLog("skipping expand schema for no ref and root: %v", resolver.root)
return resolver.root.(*Schema), nil
target = *resolver.root.(*Schema)
return
}
// t is the new expanded schema
var t *Schema
for target.Ref.String() != "" {
if swag.ContainsStringsCI(parentRefs, target.Ref.String()) {
return &target, nil
// var newTarget Schema
pRefs := strings.Join(parentRefs, ",")
pRefs += ","
if strings.Contains(pRefs, target.Ref.String()+",") {
err = nil
return
}
if err := resolver.Resolve(&target.Ref, &t); err != nil {
return &target, err
if err = resolver.Resolve(&target.Ref, &t); err != nil {
return
}
parentRefs = append(parentRefs, target.Ref.String())
target = *t
}
t, err := expandItems(target, parentRefs, resolver)
if err != nil {
return &target, err
if t, err = expandItems(target, parentRefs, resolver); err != nil {
return
}
target = *t
for i := range target.AllOf {
t, err := expandSchema(target.AllOf[i], parentRefs, resolver)
if err != nil {
return &target, err
if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil {
return
}
target.AllOf[i] = *t
}
for i := range target.AnyOf {
t, err := expandSchema(target.AnyOf[i], parentRefs, resolver)
if err != nil {
return &target, err
if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil {
return
}
target.AnyOf[i] = *t
}
for i := range target.OneOf {
t, err := expandSchema(target.OneOf[i], parentRefs, resolver)
if err != nil {
return &target, err
if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil {
return
}
target.OneOf[i] = *t
}
if target.Not != nil {
t, err := expandSchema(*target.Not, parentRefs, resolver)
if err != nil {
return &target, err
if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil {
return
}
*target.Not = *t
}
for k := range target.Properties {
t, err := expandSchema(target.Properties[k], parentRefs, resolver)
if err != nil {
return &target, err
for k, _ := range target.Properties {
if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil {
return
}
target.Properties[k] = *t
}
if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver)
if err != nil {
return &target, err
if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil {
return
}
*target.AdditionalProperties.Schema = *t
}
for k := range target.PatternProperties {
t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver)
if err != nil {
return &target, err
for k, _ := range target.PatternProperties {
if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil {
return
}
target.PatternProperties[k] = *t
}
for k := range target.Dependencies {
for k, _ := range target.Dependencies {
if target.Dependencies[k].Schema != nil {
t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver)
if err != nil {
return &target, err
if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil {
return
}
*target.Dependencies[k].Schema = *t
}
}
if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver)
if err != nil {
return &target, err
if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil {
return
}
*target.AdditionalItems.Schema = *t
}
for k := range target.Definitions {
t, err := expandSchema(target.Definitions[k], parentRefs, resolver)
if err != nil {
return &target, err
for k, _ := range target.Definitions {
if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil {
return
}
target.Definitions[k] = *t
}
return &target, nil
return
}
func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
@ -778,25 +582,22 @@ func expandResponse(response *Response, resolver *schemaLoader) error {
return nil
}
var parentRefs []string
if response.Ref.String() != "" {
parentRefs = append(parentRefs, response.Ref.String())
if err := resolver.Resolve(&response.Ref, response); err != nil {
return err
}
}
if !resolver.options.SkipSchemas && response.Schema != nil {
parentRefs = append(parentRefs, response.Schema.Ref.String())
debugLog("response ref: %s", response.Schema.Ref)
if response.Schema != nil {
parentRefs := []string{response.Schema.Ref.String()}
if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil {
return err
}
s, err := expandSchema(*response.Schema, parentRefs, resolver)
if err != nil {
if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil {
return err
} else {
*response.Schema = *s
}
*response.Schema = *s
}
return nil
}
@ -805,24 +606,21 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader) error {
if parameter == nil {
return nil
}
var parentRefs []string
if parameter.Ref.String() != "" {
parentRefs = append(parentRefs, parameter.Ref.String())
if err := resolver.Resolve(&parameter.Ref, parameter); err != nil {
return err
}
}
if !resolver.options.SkipSchemas && parameter.Schema != nil {
parentRefs = append(parentRefs, parameter.Schema.Ref.String())
if parameter.Schema != nil {
parentRefs := []string{parameter.Schema.Ref.String()}
if err := resolver.Resolve(&parameter.Schema.Ref, &parameter.Schema); err != nil {
return err
}
s, err := expandSchema(*parameter.Schema, parentRefs, resolver)
if err != nil {
if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil {
return err
} else {
*parameter.Schema = *s
}
*parameter.Schema = *s
}
return nil
}

View file

@ -16,9 +16,7 @@ package spec
import (
"encoding/json"
"strings"
"github.com/go-openapi/jsonpointer"
"github.com/go-openapi/swag"
)
@ -32,7 +30,6 @@ type HeaderProps struct {
type Header struct {
CommonValidations
SimpleSchema
VendorExtensible
HeaderProps
}
@ -161,35 +158,8 @@ func (h *Header) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &h.SimpleSchema); err != nil {
return err
}
if err := json.Unmarshal(data, &h.VendorExtensible); err != nil {
return err
}
if err := json.Unmarshal(data, &h.HeaderProps); err != nil {
return err
}
return nil
}
// JSONLookup look up a value by the json property name
func (p Header) JSONLookup(token string) (interface{}, error) {
if ex, ok := p.Extensions[token]; ok {
return &ex, nil
}
r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
return nil, err
}
if r != nil {
return r, nil
}
r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
return nil, err
}
if r != nil {
return r, nil
}
r, _, err = jsonpointer.GetForToken(p.HeaderProps, token)
return r, err
}

View file

@ -16,9 +16,7 @@ package spec
import (
"encoding/json"
"strings"
"github.com/go-openapi/jsonpointer"
"github.com/go-openapi/swag"
)
@ -199,20 +197,3 @@ func (i Items) MarshalJSON() ([]byte, error) {
}
return swag.ConcatJSON(b3, b1, b2), nil
}
// JSONLookup look up a value by the json property name
func (p Items) JSONLookup(token string) (interface{}, error) {
if token == "$ref" {
return &p.Ref, nil
}
r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
return nil, err
}
if r != nil {
return r, nil
}
r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
return r, err
}

View file

@ -16,7 +16,6 @@ package spec
import (
"encoding/json"
"strings"
"github.com/go-openapi/jsonpointer"
"github.com/go-openapi/swag"
@ -101,16 +100,15 @@ func (p Parameter) JSONLookup(token string) (interface{}, error) {
if token == "$ref" {
return &p.Ref, nil
}
r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
if err != nil {
return nil, err
}
if r != nil {
return r, nil
}
r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
if err != nil {
return nil, err
}
if r != nil {

View file

@ -55,7 +55,7 @@ func (r *Ref) RemoteURI() string {
}
// IsValidURI returns true when the url the ref points to can be found
func (r *Ref) IsValidURI(basepaths ...string) bool {
func (r *Ref) IsValidURI() bool {
if r.String() == "" {
return true
}
@ -81,18 +81,14 @@ func (r *Ref) IsValidURI(basepaths ...string) bool {
// check for local file
pth := v
if r.HasURLPathOnly {
base := "."
if len(basepaths) > 0 {
base = filepath.Dir(filepath.Join(basepaths...))
}
p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth)))
p, e := filepath.Abs(pth)
if e != nil {
return false
}
pth = p
}
fi, err := os.Stat(filepath.ToSlash(pth))
fi, err := os.Stat(pth)
if err != nil {
return false
}

View file

@ -17,7 +17,6 @@ package spec
import (
"encoding/json"
"github.com/go-openapi/jsonpointer"
"github.com/go-openapi/swag"
)
@ -37,15 +36,6 @@ type Response struct {
ResponseProps
}
// JSONLookup look up a value by the json property name
func (p Response) JSONLookup(token string) (interface{}, error) {
if token == "$ref" {
return &p.Ref, nil
}
r, _, err := jsonpointer.GetForToken(p.ResponseProps, token)
return r, err
}
// UnmarshalJSON hydrates this items instance with the data from JSON
func (r *Response) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &r.ResponseProps); err != nil {

View file

@ -51,7 +51,7 @@ func (r Responses) JSONLookup(token string) (interface{}, error) {
}
if i, err := strconv.Atoi(token); err == nil {
if scr, ok := r.StatusCodeResponses[i]; ok {
return scr, nil
return &scr, nil
}
}
return nil, fmt.Errorf("object has no field %q", token)

View file

@ -269,7 +269,7 @@ func (s Schema) JSONLookup(token string) (interface{}, error) {
}
r, _, err := jsonpointer.GetForToken(s.SchemaProps, token)
if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) {
if r != nil || err != nil {
return r, err
}
r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token)

View file

@ -16,8 +16,6 @@ package spec
import "encoding/json"
//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json
//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema
//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/...
//go:generate perl -pi -e s,Json,JSON,g bindata.go
@ -29,14 +27,9 @@ const (
)
var (
jsonSchema *Schema
swaggerSchema *Schema
)
func init() {
jsonSchema = MustLoadJSONSchemaDraft04()
jsonSchema = MustLoadJSONSchemaDraft04()
swaggerSchema = MustLoadSwagger20Schema()
}
)
// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error
func MustLoadJSONSchemaDraft04() *Schema {

View file

@ -159,7 +159,7 @@ func FormatInt16(value int16) string {
// FormatInt32 turns an int32 into a string
func FormatInt32(value int32) string {
return strconv.Itoa(int(value))
return strconv.FormatInt(int64(value), 10)
}
// FormatInt64 turns an int64 into a string

View file

@ -17,7 +17,6 @@ package swag
import (
"bytes"
"encoding/json"
"log"
"reflect"
"strings"
"sync"
@ -111,40 +110,28 @@ func ConcatJSON(blobs ...[]byte) []byte {
if len(b) < 3 { // yep empty but also the last one, so closing this thing
if i == last && a > 0 {
if err := buf.WriteByte(closing); err != nil {
log.Println(err)
}
buf.WriteByte(closing)
}
continue
}
idx = 0
if a > 0 { // we need to join with a comma for everything beyond the first non-empty item
if err := buf.WriteByte(comma); err != nil {
log.Println(err)
}
buf.WriteByte(comma)
idx = 1 // this is not the first or the last so we want to drop the leading bracket
}
if i != last { // not the last one, strip brackets
if _, err := buf.Write(b[idx : len(b)-1]); err != nil {
log.Println(err)
}
buf.Write(b[idx : len(b)-1])
} else { // last one, strip only the leading bracket
if _, err := buf.Write(b[idx:]); err != nil {
log.Println(err)
}
buf.Write(b[idx:])
}
a++
}
// somehow it ended up being empty, so provide a default value
if buf.Len() == 0 {
if err := buf.WriteByte(opening); err != nil {
log.Println(err)
}
if err := buf.WriteByte(closing); err != nil {
log.Println(err)
}
buf.WriteByte(opening)
buf.WriteByte(closing)
}
return buf.Bytes()
}
@ -152,23 +139,15 @@ func ConcatJSON(blobs ...[]byte) []byte {
// ToDynamicJSON turns an object into a properly JSON typed structure
func ToDynamicJSON(data interface{}) interface{} {
// TODO: convert straight to a json typed map (mergo + iterate?)
b, err := json.Marshal(data)
if err != nil {
log.Println(err)
}
b, _ := json.Marshal(data)
var res interface{}
if err := json.Unmarshal(b, &res); err != nil {
log.Println(err)
}
json.Unmarshal(b, &res)
return res
}
// FromDynamicJSON turns an object into a properly JSON typed structure
func FromDynamicJSON(data, target interface{}) error {
b, err := json.Marshal(data)
if err != nil {
log.Println(err)
}
b, _ := json.Marshal(data)
return json.Unmarshal(b, target)
}

View file

@ -17,25 +17,13 @@ package swag
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"path/filepath"
"strings"
"time"
)
// LoadHTTPTimeout the default timeout for load requests
var LoadHTTPTimeout = 30 * time.Second
// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
func LoadFromFileOrHTTP(path string) ([]byte, error) {
return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
}
// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
// timeout arg allows for per request overriding of the request timeout
func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path)
return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes)(path)
}
// LoadStrategy returns a loader function for a given path or uri
@ -43,32 +31,19 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(
if strings.HasPrefix(path, "http") {
return remote
}
return func(pth string) ([]byte, error) { return local(filepath.FromSlash(pth)) }
return local
}
func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) {
return func(path string) ([]byte, error) {
client := &http.Client{Timeout: timeout}
req, err := http.NewRequest("GET", path, nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
defer func() {
if resp != nil {
if e := resp.Body.Close(); e != nil {
log.Println(e)
}
}
}()
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
}
return ioutil.ReadAll(resp.Body)
func loadHTTPBytes(path string) ([]byte, error) {
resp, err := http.Get(path)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
}
return ioutil.ReadAll(resp.Body)
}

View file

@ -22,9 +22,8 @@ import (
"strings"
)
// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
// Taken from https://github.com/golang/lint/blob/1fab560e16097e5b69afb66eb93aab843ef77845/lint.go#L663-L698
var commonInitialisms = map[string]bool{
"ACL": true,
"API": true,
"ASCII": true,
"CPU": true,
@ -45,21 +44,19 @@ var commonInitialisms = map[string]bool{
"RPC": true,
"SLA": true,
"SMTP": true,
"SQL": true,
"SSH": true,
"TCP": true,
"TLS": true,
"TTL": true,
"UDP": true,
"UI": true,
"UID": true,
"UUID": true,
"UID": true,
"UI": true,
"URI": true,
"URL": true,
"UTF8": true,
"VM": true,
"XML": true,
"XMPP": true,
"XSRF": true,
"XSS": true,
}
@ -249,9 +246,6 @@ func ToJSONName(name string) string {
// ToVarName camelcases a name which can be underscored or pascal cased
func ToVarName(name string) string {
res := ToGoName(name)
if _, ok := commonInitialisms[res]; ok {
return lower(res)
}
if len(res) <= 1 {
return lower(res)
}

View file

@ -129,7 +129,7 @@ func (f *Fuzzer) genElementCount() int {
if f.minElements == f.maxElements {
return f.minElements
}
return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
return f.minElements + f.r.Intn(f.maxElements-f.minElements)
}
func (f *Fuzzer) genShouldFill() bool {
@ -229,19 +229,12 @@ func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) {
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Array:
if f.genShouldFill() {
n := v.Len()
for i := 0; i < n; i++ {
f.doFuzz(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
f.doFuzz(v.Field(i), 0)
}
case reflect.Array:
fallthrough
case reflect.Chan:
fallthrough
case reflect.Func:

View file

@ -5,7 +5,6 @@
package jlexer
import (
"encoding/base64"
"fmt"
"io"
"reflect"
@ -506,7 +505,7 @@ func (r *Lexer) SkipRecursive() {
return
}
case c == '\\' && inQuotes:
wasEscape = !wasEscape
wasEscape = true
continue
case c == '"' && inQuotes:
inQuotes = wasEscape
@ -516,11 +515,7 @@ func (r *Lexer) SkipRecursive() {
wasEscape = false
}
r.pos = len(r.Data)
r.err = &LexerError{
Reason: "EOF reached while skipping array/object or token",
Offset: r.pos,
Data: string(r.Data[r.pos:]),
}
r.err = io.EOF
}
// Raw fetches the next item recursively as a data slice
@ -532,34 +527,6 @@ func (r *Lexer) Raw() []byte {
return r.Data[r.start:r.pos]
}
// IsStart returns whether the lexer is positioned at the start
// of an input string.
func (r *Lexer) IsStart() bool {
return r.pos == 0
}
// Consumed reads all remaining bytes from the input, publishing an error if
// there is anything but whitespace remaining.
func (r *Lexer) Consumed() {
if r.pos > len(r.Data) {
return
}
for _, c := range r.Data[r.pos:] {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
r.err = &LexerError{
Reason: "invalid character '" + string(c) + "' after top-level value",
Offset: r.pos,
Data: string(r.Data[r.pos:]),
}
return
}
r.pos++
r.start++
}
}
// UnsafeString returns the string value if the token is a string literal.
//
// Warning: returned string may point to the input buffer, so the string should not outlive
@ -593,28 +560,6 @@ func (r *Lexer) String() string {
return ret
}
// Bytes reads a string literal and base64 decodes it into a byte slice.
func (r *Lexer) Bytes() []byte {
if r.token.kind == tokenUndef && r.Ok() {
r.fetchToken()
}
if !r.Ok() || r.token.kind != tokenString {
r.errInvalidToken("string")
return nil
}
ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
len, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
if err != nil {
r.err = &LexerError{
Reason: err.Error(),
}
return nil
}
r.consume()
return ret[:len]
}
// Bool reads a true or false boolean keyword.
func (r *Lexer) Bool() bool {
if r.token.kind == tokenUndef && r.Ok() {

View file

@ -2,7 +2,6 @@
package jwriter
import (
"encoding/base64"
"io"
"strconv"
"unicode/utf8"
@ -10,19 +9,8 @@ import (
"github.com/mailru/easyjson/buffer"
)
// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
// Flags field in Writer is used to set and pass them around.
type Flags int
const (
NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
)
// Writer is a JSON writer.
type Writer struct {
Flags Flags
Error error
Buffer buffer.Buffer
}
@ -71,19 +59,6 @@ func (w *Writer) Raw(data []byte, err error) {
}
}
// Base64Bytes appends data to the buffer after base64 encoding it
func (w *Writer) Base64Bytes(data []byte) {
if data == nil {
w.Buffer.AppendString("null")
return
}
w.Buffer.AppendByte('"')
dst := make([]byte, base64.StdEncoding.EncodedLen(len(data)))
base64.StdEncoding.Encode(dst, data)
w.Buffer.AppendBytes(dst)
w.Buffer.AppendByte('"')
}
func (w *Writer) Uint8(n uint8) {
w.Buffer.EnsureSpace(3)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
@ -225,12 +200,6 @@ func (w *Writer) Bool(v bool) {
const chars = "0123456789abcdef"
func isNotEscapedSingleChar(c byte) bool {
// Note: might make sense to use a table if there are more chars to escape. With 4 chars
// it benchmarks the same.
return c != '<' && c != '\\' && c != '"' && c != '>' && c >= 0x20 && c < utf8.RuneSelf
}
func (w *Writer) String(s string) {
w.Buffer.AppendByte('"')
@ -240,32 +209,39 @@ func (w *Writer) String(s string) {
p := 0 // last non-escape symbol
for i := 0; i < len(s); {
c := s[i]
if isNotEscapedSingleChar(c) {
// single-width character, no escaping is required
i++
continue
} else if c < utf8.RuneSelf {
// single-with character, need to escape
w.Buffer.AppendString(s[p:i])
// single-with character
if c := s[i]; c < utf8.RuneSelf {
var escape byte
switch c {
case '\t':
w.Buffer.AppendString(`\t`)
escape = 't'
case '\r':
w.Buffer.AppendString(`\r`)
escape = 'r'
case '\n':
w.Buffer.AppendString(`\n`)
escape = 'n'
case '\\':
w.Buffer.AppendString(`\\`)
escape = '\\'
case '"':
w.Buffer.AppendString(`\"`)
escape = '"'
case '<', '>':
// do nothing
default:
if c >= 0x20 {
// no escaping is required
i++
continue
}
}
if escape != 0 {
w.Buffer.AppendString(s[p:i])
w.Buffer.AppendByte('\\')
w.Buffer.AppendByte(escape)
} else {
w.Buffer.AppendString(s[p:i])
w.Buffer.AppendString(`\u00`)
w.Buffer.AppendByte(chars[c>>4])
w.Buffer.AppendByte(chars[c&0xf])
}
i++
p = i
continue

0
vendor/github.com/pborman/uuid/dce.go generated vendored Normal file → Executable file
View file

0
vendor/github.com/pborman/uuid/doc.go generated vendored Normal file → Executable file
View file

View file

@ -19,7 +19,7 @@ var (
NIL = Parse("00000000-0000-0000-0000-000000000000")
)
// NewHash returns a new UUID derived from the hash of space concatenated with
// NewHash returns a new UUID dervied from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement

View file

@ -7,21 +7,17 @@ package uuid
import "errors"
func (u UUID) MarshalJSON() ([]byte, error) {
if len(u) != 16 {
if len(u) == 0 {
return []byte(`""`), nil
}
var js [38]byte
js[0] = '"'
encodeHex(js[1:], u)
js[37] = '"'
return js[:], nil
return []byte(`"` + u.String() + `"`), nil
}
func (u *UUID) UnmarshalJSON(data []byte) error {
if string(data) == `""` {
if len(data) == 0 || string(data) == `""` {
return nil
}
if data[0] != '"' {
if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("invalid UUID format")
}
data = data[1 : len(data)-1]

20
vendor/github.com/pborman/uuid/node.go generated vendored Normal file → Executable file
View file

@ -4,13 +4,9 @@
package uuid
import (
"net"
"sync"
)
import "net"
var (
nodeMu sync.Mutex
interfaces []net.Interface // cached list of interfaces
ifname string // name of interface being used
nodeID []byte // hardware for version 1 UUIDs
@ -20,8 +16,6 @@ var (
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
defer nodeMu.Unlock()
nodeMu.Lock()
return ifname
}
@ -32,12 +26,6 @@ func NodeInterface() string {
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
return setNodeInterface(name)
}
func setNodeInterface(name string) bool {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
@ -71,10 +59,8 @@ func setNodeInterface(name string) bool {
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
defer nodeMu.Unlock()
nodeMu.Lock()
if nodeID == nil {
setNodeInterface("")
SetNodeInterface("")
}
nid := make([]byte, 6)
copy(nid, nodeID)
@ -85,8 +71,6 @@ func NodeID() []byte {
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
defer nodeMu.Unlock()
nodeMu.Lock()
if setNodeID(id) {
ifname = "user"
return true

View file

@ -1,66 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"database/sql/driver"
"errors"
"fmt"
)
// Scan implements sql.Scanner so UUIDs can be read from databases transparently
// Currently, database types that map to string and []byte are supported. Please
// consult database-specific driver documentation for matching types.
func (uuid *UUID) Scan(src interface{}) error {
switch src.(type) {
case string:
// if an empty UUID comes from a table, we return a null UUID
if src.(string) == "" {
return nil
}
// see uuid.Parse for required string format
parsed := Parse(src.(string))
if parsed == nil {
return errors.New("Scan: invalid UUID format")
}
*uuid = parsed
case []byte:
b := src.([]byte)
// if an empty UUID comes from a table, we return a null UUID
if len(b) == 0 {
return nil
}
// assumes a simple slice of bytes if 16 bytes
// otherwise attempts to parse
if len(b) == 16 {
*uuid = UUID(b)
} else {
u := Parse(string(b))
if u == nil {
return errors.New("Scan: invalid UUID format")
}
*uuid = u
}
default:
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
}
return nil
}
// Value implements sql.Valuer so that UUIDs can be written to databases
// transparently. Currently, UUIDs map to strings. Please consult
// database-specific driver documentation for matching types.
func (uuid UUID) Value() (driver.Value, error) {
return uuid.String(), nil
}

14
vendor/github.com/pborman/uuid/time.go generated vendored Normal file → Executable file
View file

@ -23,7 +23,7 @@ const (
)
var (
timeMu sync.Mutex
mu sync.Mutex
lasttime uint64 // last time we returned
clock_seq uint16 // clock sequence for this run
@ -43,8 +43,8 @@ func (t Time) UnixTime() (sec, nsec int64) {
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
defer mu.Unlock()
mu.Lock()
return getTime()
}
@ -75,8 +75,8 @@ func getTime() (Time, uint16, error) {
// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
// for
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
defer mu.Unlock()
mu.Lock()
return clockSequence()
}
@ -90,8 +90,8 @@ func clockSequence() int {
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
defer mu.Unlock()
mu.Lock()
setClockSequence(seq)
}

View file

@ -16,7 +16,7 @@ func randomBits(b []byte) {
}
// xvalues returns the value of a byte as a hexadecimal digit or 255.
var xvalues = [256]byte{
var xvalues = []byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,

66
vendor/github.com/pborman/uuid/uuid.go generated vendored Normal file → Executable file
View file

@ -7,26 +7,11 @@ package uuid
import (
"bytes"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"strings"
)
// Array is a pass-by-value UUID that can be used as an effecient key in a map.
type Array [16]byte
// UUID converts uuid into a slice.
func (uuid Array) UUID() UUID {
return uuid[:]
}
// String returns the string representation of uuid,
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
func (uuid Array) String() string {
return uuid.UUID().String()
}
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
// 4122.
type UUID []byte
@ -69,8 +54,8 @@ func Parse(s string) UUID {
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return nil
}
var uuid [16]byte
for i, x := range [16]int{
uuid := make([]byte, 16)
for i, x := range []int{
0, 2, 4, 6,
9, 11,
14, 16,
@ -82,7 +67,7 @@ func Parse(s string) UUID {
uuid[i] = v
}
}
return uuid[:]
return uuid
}
// Equal returns true if uuid1 and uuid2 are equal.
@ -90,50 +75,26 @@ func Equal(uuid1, uuid2 UUID) bool {
return bytes.Equal(uuid1, uuid2)
}
// Array returns an array representation of uuid that can be used as a map key.
// Array panics if uuid is not valid.
func (uuid UUID) Array() Array {
if len(uuid) != 16 {
panic("invalid uuid")
}
var a Array
copy(a[:], uuid)
return a
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
if len(uuid) != 16 {
if uuid == nil || len(uuid) != 16 {
return ""
}
var buf [36]byte
encodeHex(buf[:], uuid)
return string(buf[:])
b := []byte(uuid)
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
}
// URN returns the RFC 2141 URN form of uuid,
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
func (uuid UUID) URN() string {
if len(uuid) != 16 {
if uuid == nil || len(uuid) != 16 {
return ""
}
var buf [36 + 9]byte
copy(buf[:], "urn:uuid:")
encodeHex(buf[9:], uuid)
return string(buf[:])
}
func encodeHex(dst []byte, uuid UUID) {
hex.Encode(dst[:], uuid[:4])
dst[8] = '-'
hex.Encode(dst[9:13], uuid[4:6])
dst[13] = '-'
hex.Encode(dst[14:18], uuid[6:8])
dst[18] = '-'
hex.Encode(dst[19:23], uuid[8:10])
dst[23] = '-'
hex.Encode(dst[24:], uuid[10:])
b := []byte(uuid)
return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
}
// Variant returns the variant encoded in uuid. It returns Invalid if
@ -152,9 +113,10 @@ func (uuid UUID) Variant() Variant {
default:
return Reserved
}
panic("unreachable")
}
// Version returns the version of uuid. It returns false if uuid is not
// Version returns the verison of uuid. It returns false if uuid is not
// valid.
func (uuid UUID) Version() (Version, bool) {
if len(uuid) != 16 {
@ -186,7 +148,7 @@ func (v Variant) String() string {
return fmt.Sprintf("BadVariant%d", int(v))
}
// SetRand sets the random number generator to r, which implements io.Reader.
// SetRand sets the random number generator to r, which implents io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//