Migrate Sirupsen to sirupsen.
This commit is contained in:
parent
c134dcd6fe
commit
fb4ba7af2b
684 changed files with 92394 additions and 33943 deletions
14
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
14
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
|
@ -17,6 +17,7 @@ func SetupRootCommand(rootCmd *cobra.Command) {
|
|||
cobra.AddTemplateFunc("operationSubCommands", operationSubCommands)
|
||||
cobra.AddTemplateFunc("managementSubCommands", managementSubCommands)
|
||||
cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages)
|
||||
cobra.AddTemplateFunc("useLine", UseLine)
|
||||
|
||||
rootCmd.SetUsageTemplate(usageTemplate)
|
||||
rootCmd.SetHelpTemplate(helpTemplate)
|
||||
|
@ -25,6 +26,7 @@ func SetupRootCommand(rootCmd *cobra.Command) {
|
|||
|
||||
rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage")
|
||||
rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help")
|
||||
rootCmd.PersistentFlags().Lookup("help").Hidden = true
|
||||
}
|
||||
|
||||
// FlagErrorFunc prints an error message which matches the format of the
|
||||
|
@ -97,9 +99,19 @@ func managementSubCommands(cmd *cobra.Command) []*cobra.Command {
|
|||
return cmds
|
||||
}
|
||||
|
||||
// UseLine returns the usage line for a command. This implementation is different
|
||||
// from the default Command.UseLine in that it does not add a `[flags]` to the
|
||||
// end of the line.
|
||||
func UseLine(cmd *cobra.Command) string {
|
||||
if cmd.HasParent() {
|
||||
return cmd.Parent().CommandPath() + " " + cmd.Use
|
||||
}
|
||||
return cmd.Use
|
||||
}
|
||||
|
||||
var usageTemplate = `Usage:
|
||||
|
||||
{{- if not .HasSubCommands}} {{.UseLine}}{{end}}
|
||||
{{- if not .HasSubCommands}} {{ useLine . }}{{end}}
|
||||
{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}}
|
||||
|
||||
{{ .Short | trim }}
|
||||
|
|
157
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
157
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
|
@ -1,27 +1,28 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/docker/notary/passphrase"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/theupdateframework/notary"
|
||||
notaryclient "github.com/theupdateframework/notary/client"
|
||||
"github.com/theupdateframework/notary/passphrase"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -40,7 +41,8 @@ type Cli interface {
|
|||
In() *InStream
|
||||
SetIn(in *InStream)
|
||||
ConfigFile() *configfile.ConfigFile
|
||||
CredentialsStore(serverAddress string) credentials.Store
|
||||
ServerInfo() ServerInfo
|
||||
NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
|
||||
}
|
||||
|
||||
// DockerCli is an instance the docker command line client.
|
||||
|
@ -105,106 +107,66 @@ func (cli *DockerCli) ServerInfo() ServerInfo {
|
|||
return cli.server
|
||||
}
|
||||
|
||||
// GetAllCredentials returns all of the credentials stored in all of the
|
||||
// configured credential stores.
|
||||
func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) {
|
||||
auths := make(map[string]types.AuthConfig)
|
||||
for registry := range cli.configFile.CredentialHelpers {
|
||||
helper := cli.CredentialsStore(registry)
|
||||
newAuths, err := helper.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAll(auths, newAuths)
|
||||
}
|
||||
defaultStore := cli.CredentialsStore("")
|
||||
newAuths, err := defaultStore.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAll(auths, newAuths)
|
||||
return auths, nil
|
||||
}
|
||||
|
||||
func addAll(to, from map[string]types.AuthConfig) {
|
||||
for reg, ac := range from {
|
||||
to[reg] = ac
|
||||
}
|
||||
}
|
||||
|
||||
// CredentialsStore returns a new credentials store based
|
||||
// on the settings provided in the configuration file. Empty string returns
|
||||
// the default credential store.
|
||||
func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store {
|
||||
if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" {
|
||||
return credentials.NewNativeStore(cli.configFile, helper)
|
||||
}
|
||||
return credentials.NewFileStore(cli.configFile)
|
||||
}
|
||||
|
||||
// getConfiguredCredentialStore returns the credential helper configured for the
|
||||
// given registry, the default credsStore, or the empty string if neither are
|
||||
// configured.
|
||||
func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string {
|
||||
if c.CredentialHelpers != nil && serverAddress != "" {
|
||||
if helper, exists := c.CredentialHelpers[serverAddress]; exists {
|
||||
return helper
|
||||
}
|
||||
}
|
||||
return c.CredentialsStore
|
||||
}
|
||||
|
||||
// Initialize the dockerCli runs initialization that must happen after command
|
||||
// line flags are parsed.
|
||||
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
|
||||
cli.configFile = LoadDefaultConfigFile(cli.err)
|
||||
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
|
||||
|
||||
var err error
|
||||
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
if tlsconfig.IsErrEncryptedKey(err) {
|
||||
var (
|
||||
passwd string
|
||||
giveup bool
|
||||
)
|
||||
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
|
||||
|
||||
for attempts := 0; tlsconfig.IsErrEncryptedKey(err); attempts++ {
|
||||
// some code and comments borrowed from notary/trustmanager/keystore.go
|
||||
passwd, giveup, err = passRetriever("private", "encrypted TLS private", false, attempts)
|
||||
// Check if the passphrase retriever got an error or if it is telling us to give up
|
||||
if giveup || err != nil {
|
||||
return errors.Wrap(err, "private key is encrypted, but could not get passphrase")
|
||||
}
|
||||
|
||||
opts.Common.TLSOptions.Passphrase = passwd
|
||||
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
newClient := func(password string) (client.APIClient, error) {
|
||||
opts.Common.TLSOptions.Passphrase = password
|
||||
return NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
}
|
||||
cli.client, err = getClientWithPassword(passRetriever, newClient)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cli.initializeFromClient()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) initializeFromClient() {
|
||||
cli.defaultVersion = cli.client.ClientVersion()
|
||||
|
||||
if ping, err := cli.client.Ping(context.Background()); err == nil {
|
||||
cli.server = ServerInfo{
|
||||
HasExperimental: ping.Experimental,
|
||||
OSType: ping.OSType,
|
||||
}
|
||||
ping, err := cli.client.Ping(context.Background())
|
||||
if err != nil {
|
||||
// Default to true if we fail to connect to daemon
|
||||
cli.server = ServerInfo{HasExperimental: true}
|
||||
|
||||
// since the new header was added in 1.25, assume server is 1.24 if header is not present.
|
||||
if ping.APIVersion == "" {
|
||||
ping.APIVersion = "1.24"
|
||||
}
|
||||
|
||||
// if server version is lower than the current cli, downgrade
|
||||
if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) {
|
||||
cli.client.UpdateClientVersion(ping.APIVersion)
|
||||
if ping.APIVersion != "" {
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
cli.server = ServerInfo{
|
||||
HasExperimental: ping.Experimental,
|
||||
OSType: ping.OSType,
|
||||
}
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
|
||||
func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) {
|
||||
for attempts := 0; ; attempts++ {
|
||||
passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts)
|
||||
if giveup || err != nil {
|
||||
return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase")
|
||||
}
|
||||
|
||||
apiclient, err := newClient(passwd)
|
||||
if !tlsconfig.IsErrEncryptedKey(err) {
|
||||
return apiclient, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
|
||||
func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
|
||||
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
|
||||
}
|
||||
|
||||
// ServerInfo stores details about the supported features and platform of the
|
||||
|
@ -219,19 +181,6 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli {
|
|||
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err}
|
||||
}
|
||||
|
||||
// LoadDefaultConfigFile attempts to load the default config file and returns
|
||||
// an initialized ConfigFile struct if none is found.
|
||||
func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile {
|
||||
configFile, e := cliconfig.Load(cliconfig.Dir())
|
||||
if e != nil {
|
||||
fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e)
|
||||
}
|
||||
if !configFile.ContainsAuth() {
|
||||
credentials.DetectDefaultStore(configFile)
|
||||
}
|
||||
return configFile
|
||||
}
|
||||
|
||||
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
|
||||
|
@ -258,7 +207,8 @@ func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.
|
|||
return client.NewClient(host, verStr, httpClient, customHeaders)
|
||||
}
|
||||
|
||||
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) {
|
||||
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
|
||||
var host string
|
||||
switch len(hosts) {
|
||||
case 0:
|
||||
host = os.Getenv("DOCKER_HOST")
|
||||
|
@ -268,8 +218,7 @@ func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string,
|
|||
return "", errors.New("Please specify only one -H")
|
||||
}
|
||||
|
||||
host, err = dopts.ParseHost(tlsOptions != nil, host)
|
||||
return
|
||||
return dopts.ParseHost(tlsOptions != nil, host)
|
||||
}
|
||||
|
||||
func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) {
|
||||
|
@ -285,6 +234,10 @@ func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, er
|
|||
}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: config,
|
||||
DialContext: (&net.Dialer{
|
||||
KeepAlive: 30 * time.Second,
|
||||
Timeout: 30 * time.Second,
|
||||
}).DialContext,
|
||||
}
|
||||
proto, addr, _, err := client.ParseHost(host)
|
||||
if err != nil {
|
||||
|
|
2
vendor/github.com/docker/cli/cli/command/events_utils.go
generated
vendored
2
vendor/github.com/docker/cli/cli/command/events_utils.go
generated
vendored
|
@ -3,8 +3,8 @@ package command
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
eventtypes "github.com/docker/docker/api/types/events"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// EventHandler is abstract interface for user to customize
|
||||
|
|
133
vendor/github.com/docker/cli/cli/command/image/build.go
generated
vendored
133
vendor/github.com/docker/cli/cli/command/image/build.go
generated
vendored
|
@ -21,13 +21,14 @@ import (
|
|||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
runconfigopts "github.com/docker/docker/runconfig/opts"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -62,6 +63,8 @@ type buildOptions struct {
|
|||
squash bool
|
||||
target string
|
||||
imageIDFile string
|
||||
stream bool
|
||||
platform string
|
||||
}
|
||||
|
||||
// dockerfileFromStdin returns true when the user specified that the Dockerfile
|
||||
|
@ -76,16 +79,20 @@ func (o buildOptions) contextFromStdin() bool {
|
|||
return o.context == "-"
|
||||
}
|
||||
|
||||
// NewBuildCommand creates a new `docker build` command
|
||||
func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
func newBuildOptions() buildOptions {
|
||||
ulimits := make(map[string]*units.Ulimit)
|
||||
options := buildOptions{
|
||||
return buildOptions{
|
||||
tags: opts.NewListOpts(validateTag),
|
||||
buildArgs: opts.NewListOpts(opts.ValidateEnv),
|
||||
ulimits: opts.NewUlimitOpt(&ulimits),
|
||||
labels: opts.NewListOpts(opts.ValidateEnv),
|
||||
extraHosts: opts.NewListOpts(opts.ValidateExtraHost),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBuildCommand creates a new `docker build` command
|
||||
func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options := newBuildOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "build [OPTIONS] PATH | URL | -",
|
||||
|
@ -129,11 +136,16 @@ func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command {
|
|||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
||||
|
||||
command.AddTrustVerificationFlags(flags)
|
||||
command.AddPlatformFlag(flags, &options.platform)
|
||||
|
||||
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
|
||||
flags.SetAnnotation("squash", "experimental", nil)
|
||||
flags.SetAnnotation("squash", "version", []string{"1.25"})
|
||||
|
||||
flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context")
|
||||
flags.SetAnnotation("stream", "experimental", nil)
|
||||
flags.SetAnnotation("stream", "version", []string{"1.31"})
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
@ -154,7 +166,7 @@ func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
|
|||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
||||
func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||
var (
|
||||
buildCtx io.ReadCloser
|
||||
dockerfileCtx io.ReadCloser
|
||||
|
@ -164,6 +176,7 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
|||
relDockerfile string
|
||||
progBuff io.Writer
|
||||
buildBuff io.Writer
|
||||
remote string
|
||||
)
|
||||
|
||||
if options.dockerfileFromStdin() {
|
||||
|
@ -189,6 +202,7 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
|||
|
||||
switch {
|
||||
case options.contextFromStdin():
|
||||
// buildCtx is tar archive. if stdin was dockerfile then it is wrapped
|
||||
buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName)
|
||||
case isLocalDir(specifiedContext):
|
||||
contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName)
|
||||
|
@ -212,7 +226,8 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
|||
contextDir = tempDir
|
||||
}
|
||||
|
||||
if buildCtx == nil {
|
||||
// read from a directory into tar archive
|
||||
if buildCtx == nil && !options.stream {
|
||||
excludes, err := build.ReadDockerignore(contextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -229,38 +244,61 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
|||
}
|
||||
|
||||
excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin())
|
||||
|
||||
compression := archive.Uncompressed
|
||||
if options.compress {
|
||||
compression = archive.Gzip
|
||||
}
|
||||
buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
|
||||
Compression: compression,
|
||||
ExcludePatterns: excludes,
|
||||
ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// replace Dockerfile if added dynamically
|
||||
if dockerfileCtx != nil {
|
||||
// replace Dockerfile if it was added from stdin and there is archive context
|
||||
if dockerfileCtx != nil && buildCtx != nil {
|
||||
buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// if streaming and dockerfile was not from stdin then read from file
|
||||
// to the same reader that is usually stdin
|
||||
if options.stream && dockerfileCtx == nil {
|
||||
dockerfileCtx, err = os.Open(relDockerfile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open %s", relDockerfile)
|
||||
}
|
||||
defer dockerfileCtx.Close()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var resolvedTags []*resolvedTag
|
||||
if command.IsTrusted() {
|
||||
translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
|
||||
return TrustedReference(ctx, dockerCli, ref, nil)
|
||||
}
|
||||
// Wrap the tar archive to replace the Dockerfile entry with the rewritten
|
||||
// Dockerfile which uses trusted pulls.
|
||||
buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags)
|
||||
// if there is a tar wrapper, the dockerfile needs to be replaced inside it
|
||||
if buildCtx != nil {
|
||||
// Wrap the tar archive to replace the Dockerfile entry with the rewritten
|
||||
// Dockerfile which uses trusted pulls.
|
||||
buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags)
|
||||
} else if dockerfileCtx != nil {
|
||||
// if there was not archive context still do the possible replacements in Dockerfile
|
||||
newDockerfile, _, err := rewriteDockerfileFrom(ctx, dockerfileCtx, translator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dockerfileCtx = ioutil.NopCloser(bytes.NewBuffer(newDockerfile))
|
||||
}
|
||||
}
|
||||
|
||||
if options.compress {
|
||||
buildCtx, err = build.Compress(buildCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Setup an upload progress bar
|
||||
|
@ -269,9 +307,46 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
|||
progressOutput = &lastProgressOutput{output: progressOutput}
|
||||
}
|
||||
|
||||
var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
|
||||
// if up to this point nothing has set the context then we must have another
|
||||
// way for sending it(streaming) and set the context to the Dockerfile
|
||||
if dockerfileCtx != nil && buildCtx == nil {
|
||||
buildCtx = dockerfileCtx
|
||||
}
|
||||
|
||||
authConfigs, _ := dockerCli.GetAllCredentials()
|
||||
s, err := trySession(dockerCli, contextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var body io.Reader
|
||||
if buildCtx != nil && !options.stream {
|
||||
body = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
|
||||
}
|
||||
|
||||
// add context stream to the session
|
||||
if options.stream && s != nil {
|
||||
syncDone := make(chan error) // used to signal first progress reporting completed.
|
||||
// progress would also send errors but don't need it here as errors
|
||||
// are handled by session.Run() and ImageBuild()
|
||||
if err := addDirToSession(s, contextDir, progressOutput, syncDone); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := newBufferedWriter(syncDone, buildBuff)
|
||||
defer func() {
|
||||
select {
|
||||
case <-buf.flushed:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
buildBuff = buf
|
||||
|
||||
remote = clientSessionRemote
|
||||
body = buildCtx
|
||||
}
|
||||
|
||||
configFile := dockerCli.ConfigFile()
|
||||
authConfigs, _ := configFile.GetAllCredentials()
|
||||
buildOptions := types.ImageBuildOptions{
|
||||
Memory: options.memory.Value(),
|
||||
MemorySwap: options.memorySwap.Value(),
|
||||
|
@ -291,15 +366,28 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
|||
Dockerfile: relDockerfile,
|
||||
ShmSize: options.shmSize.Value(),
|
||||
Ulimits: options.ulimits.GetList(),
|
||||
BuildArgs: runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()),
|
||||
BuildArgs: configFile.ParseProxyConfig(dockerCli.Client().DaemonHost(), options.buildArgs.GetAll()),
|
||||
AuthConfigs: authConfigs,
|
||||
Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()),
|
||||
Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()),
|
||||
CacheFrom: options.cacheFrom,
|
||||
SecurityOpt: options.securityOpt,
|
||||
NetworkMode: options.networkMode,
|
||||
Squash: options.squash,
|
||||
ExtraHosts: options.extraHosts.GetAll(),
|
||||
Target: options.target,
|
||||
RemoteContext: remote,
|
||||
Platform: options.platform,
|
||||
}
|
||||
|
||||
if s != nil {
|
||||
go func() {
|
||||
logrus.Debugf("running session: %v", s.ID())
|
||||
if err := s.Run(ctx, dockerCli.Client().DialSession); err != nil {
|
||||
logrus.Error(err)
|
||||
cancel() // cancel progress context
|
||||
}
|
||||
}()
|
||||
buildOptions.SessionID = s.ID()
|
||||
}
|
||||
|
||||
response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
|
||||
|
@ -307,6 +395,7 @@ func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
|
|||
if options.quiet {
|
||||
fmt.Fprintf(dockerCli.Err(), "%s", progBuff)
|
||||
}
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
|
74
vendor/github.com/docker/cli/cli/command/image/build/context.go
generated
vendored
74
vendor/github.com/docker/cli/cli/command/image/build/context.go
generated
vendored
|
@ -1,25 +1,25 @@
|
|||
package build
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/builder/remotecontext/git"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/gitutils"
|
||||
"github.com/docker/docker/pkg/httputils"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
|
@ -29,6 +29,8 @@ import (
|
|||
const (
|
||||
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
|
||||
DefaultDockerfileName string = "Dockerfile"
|
||||
// archiveHeaderSize is the number of bytes in an archive header
|
||||
archiveHeaderSize = 512
|
||||
)
|
||||
|
||||
// ValidateContextDirectory checks if all the contents of the directory
|
||||
|
@ -85,12 +87,12 @@ func ValidateContextDirectory(srcPath string, excludes []string) error {
|
|||
func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) {
|
||||
buf := bufio.NewReader(r)
|
||||
|
||||
magic, err := buf.Peek(archive.HeaderSize)
|
||||
magic, err := buf.Peek(archiveHeaderSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, "", errors.Errorf("failed to peek context header from STDIN: %v", err)
|
||||
}
|
||||
|
||||
if archive.IsArchive(magic) {
|
||||
if IsArchive(magic) {
|
||||
return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil
|
||||
}
|
||||
|
||||
|
@ -134,6 +136,18 @@ func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCl
|
|||
|
||||
}
|
||||
|
||||
// IsArchive checks for the magic bytes of a tar or any supported compression
|
||||
// algorithm.
|
||||
func IsArchive(header []byte) bool {
|
||||
compression := archive.DetectCompression(header)
|
||||
if compression != archive.Uncompressed {
|
||||
return true
|
||||
}
|
||||
r := tar.NewReader(bytes.NewBuffer(header))
|
||||
_, err := r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// GetContextFromGitURL uses a Git URL as context for a `docker build`. The
|
||||
// git repo is cloned into a temporary directory used as the context directory.
|
||||
// Returns the absolute path to the temporary context directory, the relative
|
||||
|
@ -143,7 +157,7 @@ func GetContextFromGitURL(gitURL, dockerfileName string) (string, string, error)
|
|||
if _, err := exec.LookPath("git"); err != nil {
|
||||
return "", "", errors.Wrapf(err, "unable to find 'git'")
|
||||
}
|
||||
absContextDir, err := gitutils.Clone(gitURL)
|
||||
absContextDir, err := git.Clone(gitURL)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "unable to 'git clone' to temporary context directory")
|
||||
}
|
||||
|
@ -161,7 +175,7 @@ func GetContextFromGitURL(gitURL, dockerfileName string) (string, string, error)
|
|||
// Returns the tar archive used for the context and a path of the
|
||||
// dockerfile inside the tar.
|
||||
func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) {
|
||||
response, err := httputils.Download(remoteURL)
|
||||
response, err := getWithStatusError(remoteURL)
|
||||
if err != nil {
|
||||
return nil, "", errors.Errorf("unable to download remote context %s: %v", remoteURL, err)
|
||||
}
|
||||
|
@ -173,6 +187,24 @@ func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.Read
|
|||
return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName)
|
||||
}
|
||||
|
||||
// getWithStatusError does an http.Get() and returns an error if the
|
||||
// status code is 4xx or 5xx.
|
||||
func getWithStatusError(url string) (resp *http.Response, err error) {
|
||||
if resp, err = http.Get(url); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode < 400 {
|
||||
return resp, nil
|
||||
}
|
||||
msg := fmt.Sprintf("failed to GET %s with status %s", url, resp.Status)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, msg+": error reading body")
|
||||
}
|
||||
return nil, errors.Errorf(msg+": %s", bytes.TrimSpace(body))
|
||||
}
|
||||
|
||||
// GetContextFromLocalDir uses the given local directory as context for a
|
||||
// `docker build`. Returns the absolute path to the local context directory,
|
||||
// the relative path of the dockerfile in that context directory, and a non-nil
|
||||
|
@ -344,3 +376,27 @@ func AddDockerfileToBuildContext(dockerfileCtx io.ReadCloser, buildCtx io.ReadCl
|
|||
})
|
||||
return buildCtx, randomName, nil
|
||||
}
|
||||
|
||||
// Compress the build context for sending to the API
|
||||
func Compress(buildCtx io.ReadCloser) (io.ReadCloser, error) {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
|
||||
go func() {
|
||||
compressWriter, err := archive.CompressStream(pipeWriter, archive.Gzip)
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
}
|
||||
defer buildCtx.Close()
|
||||
|
||||
if _, err := pools.Copy(compressWriter, buildCtx); err != nil {
|
||||
pipeWriter.CloseWithError(
|
||||
errors.Wrap(err, "failed to compress context"))
|
||||
compressWriter.Close()
|
||||
return
|
||||
}
|
||||
compressWriter.Close()
|
||||
pipeWriter.Close()
|
||||
}()
|
||||
|
||||
return pipeReader, nil
|
||||
}
|
||||
|
|
153
vendor/github.com/docker/cli/cli/command/image/build_session.go
generated
vendored
Normal file
153
vendor/github.com/docker/cli/cli/command/image/build_session.go
generated
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/image/build"
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const clientSessionRemote = "client-session"
|
||||
|
||||
func isSessionSupported(dockerCli command.Cli) bool {
|
||||
return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31")
|
||||
}
|
||||
|
||||
func trySession(dockerCli command.Cli, contextDir string) (*session.Session, error) {
|
||||
var s *session.Session
|
||||
if isSessionSupported(dockerCli) {
|
||||
sharedKey, err := getBuildSharedKey(contextDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get build shared key")
|
||||
}
|
||||
s, err = session.NewSession(filepath.Base(contextDir), sharedKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create session")
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func addDirToSession(session *session.Session, contextDir string, progressOutput progress.Output, done chan error) error {
|
||||
excludes, err := build.ReadDockerignore(contextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := &sizeProgress{out: progressOutput, action: "Streaming build context to Docker daemon"}
|
||||
|
||||
workdirProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{
|
||||
{Dir: contextDir, Excludes: excludes},
|
||||
})
|
||||
session.Allow(workdirProvider)
|
||||
|
||||
// this will be replaced on parallel build jobs. keep the current
|
||||
// progressbar for now
|
||||
if snpc, ok := workdirProvider.(interface {
|
||||
SetNextProgressCallback(func(int, bool), chan error)
|
||||
}); ok {
|
||||
snpc.SetNextProgressCallback(p.update, done)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type sizeProgress struct {
|
||||
out progress.Output
|
||||
action string
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
func (sp *sizeProgress) update(size int, last bool) {
|
||||
if sp.limiter == nil {
|
||||
sp.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
|
||||
}
|
||||
if last || sp.limiter.Allow() {
|
||||
sp.out.WriteProgress(progress.Progress{Action: sp.action, Current: int64(size), LastUpdate: last})
|
||||
}
|
||||
}
|
||||
|
||||
type bufferedWriter struct {
|
||||
done chan error
|
||||
io.Writer
|
||||
buf *bytes.Buffer
|
||||
flushed chan struct{}
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newBufferedWriter(done chan error, w io.Writer) *bufferedWriter {
|
||||
bw := &bufferedWriter{done: done, Writer: w, buf: new(bytes.Buffer), flushed: make(chan struct{})}
|
||||
go func() {
|
||||
<-done
|
||||
bw.flushBuffer()
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) Write(dt []byte) (int, error) {
|
||||
select {
|
||||
case <-bw.done:
|
||||
bw.flushBuffer()
|
||||
return bw.Writer.Write(dt)
|
||||
default:
|
||||
return bw.buf.Write(dt)
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) flushBuffer() {
|
||||
bw.mu.Lock()
|
||||
select {
|
||||
case <-bw.flushed:
|
||||
default:
|
||||
bw.Writer.Write(bw.buf.Bytes())
|
||||
close(bw.flushed)
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
|
||||
func getBuildSharedKey(dir string) (string, error) {
|
||||
// build session is hash of build dir with node based randomness
|
||||
s := sha256.Sum256([]byte(fmt.Sprintf("%s:%s", tryNodeIdentifier(), dir)))
|
||||
return hex.EncodeToString(s[:]), nil
|
||||
}
|
||||
|
||||
func tryNodeIdentifier() string {
|
||||
out := cliconfig.Dir() // return config dir as default on permission error
|
||||
if err := os.MkdirAll(cliconfig.Dir(), 0700); err == nil {
|
||||
sessionFile := filepath.Join(cliconfig.Dir(), ".buildNodeID")
|
||||
if _, err := os.Lstat(sessionFile); err != nil {
|
||||
if os.IsNotExist(err) { // create a new file with stored randomness
|
||||
b := make([]byte, 32)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return out
|
||||
}
|
||||
if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil {
|
||||
return out
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dt, err := ioutil.ReadFile(sessionFile)
|
||||
if err == nil {
|
||||
return string(dt)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
3
vendor/github.com/docker/cli/cli/command/image/cmd.go
generated
vendored
3
vendor/github.com/docker/cli/cli/command/image/cmd.go
generated
vendored
|
@ -8,8 +8,7 @@ import (
|
|||
)
|
||||
|
||||
// NewImageCommand returns a cobra command for `image` subcommands
|
||||
// nolint: interfacer
|
||||
func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command {
|
||||
func NewImageCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "image",
|
||||
Short: "Manage images",
|
||||
|
|
8
vendor/github.com/docker/cli/cli/command/image/prune.go
generated
vendored
8
vendor/github.com/docker/cli/cli/command/image/prune.go
generated
vendored
|
@ -37,7 +37,7 @@ func NewPruneCommand(dockerCli command.Cli) *cobra.Command {
|
|||
fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
|
||||
return nil
|
||||
},
|
||||
Tags: map[string]string{"version": "1.25"},
|
||||
Annotations: map[string]string{"version": "1.25"},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
@ -65,12 +65,12 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
|
|||
warning = allImageWarning
|
||||
}
|
||||
if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
|
||||
return
|
||||
return 0, "", nil
|
||||
}
|
||||
|
||||
report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters)
|
||||
if err != nil {
|
||||
return
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
if len(report.ImagesDeleted) > 0 {
|
||||
|
@ -85,7 +85,7 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
|
|||
spaceReclaimed = report.SpaceReclaimed
|
||||
}
|
||||
|
||||
return
|
||||
return spaceReclaimed, output, nil
|
||||
}
|
||||
|
||||
// RunPrune calls the Image Prune API
|
||||
|
|
37
vendor/github.com/docker/cli/cli/command/image/pull.go
generated
vendored
37
vendor/github.com/docker/cli/cli/command/image/pull.go
generated
vendored
|
@ -6,16 +6,17 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type pullOptions struct {
|
||||
remote string
|
||||
all bool
|
||||
remote string
|
||||
all bool
|
||||
platform string
|
||||
}
|
||||
|
||||
// NewPullCommand creates a new `docker pull` command
|
||||
|
@ -35,44 +36,39 @@ func NewPullCommand(dockerCli command.Cli) *cobra.Command {
|
|||
flags := cmd.Flags()
|
||||
|
||||
flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository")
|
||||
|
||||
command.AddPlatformFlag(flags, &opts.platform)
|
||||
command.AddTrustVerificationFlags(flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runPull(dockerCli command.Cli, opts pullOptions) error {
|
||||
func runPull(cli command.Cli, opts pullOptions) error {
|
||||
distributionRef, err := reference.ParseNormalizedNamed(opts.remote)
|
||||
if err != nil {
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
if opts.all && !reference.IsNameOnly(distributionRef) {
|
||||
case opts.all && !reference.IsNameOnly(distributionRef):
|
||||
return errors.New("tag can't be used with --all-tags/-a")
|
||||
}
|
||||
|
||||
if !opts.all && reference.IsNameOnly(distributionRef) {
|
||||
case !opts.all && reference.IsNameOnly(distributionRef):
|
||||
distributionRef = reference.TagNameOnly(distributionRef)
|
||||
if tagged, ok := distributionRef.(reference.Tagged); ok {
|
||||
fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", tagged.Tag())
|
||||
fmt.Fprintf(cli.Out(), "Using default tag: %s\n", tagged.Tag())
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := registry.ParseRepositoryInfo(distributionRef)
|
||||
ctx := context.Background()
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(cli), distributionRef.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "pull")
|
||||
|
||||
// Check if reference has a digest
|
||||
_, isCanonical := distributionRef.(reference.Canonical)
|
||||
if command.IsTrusted() && !isCanonical {
|
||||
err = trustedPull(ctx, dockerCli, repoInfo, distributionRef, authConfig, requestPrivilege)
|
||||
err = trustedPull(ctx, cli, imgRefAndAuth, opts.platform)
|
||||
} else {
|
||||
err = imagePullPrivileged(ctx, dockerCli, authConfig, reference.FamiliarString(distributionRef), requestPrivilege, opts.all)
|
||||
err = imagePullPrivileged(ctx, cli, imgRefAndAuth, opts.all, opts.platform)
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "when fetching 'plugin'") {
|
||||
|
@ -80,6 +76,5 @@ func runPull(dockerCli command.Cli, opts pullOptions) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/cli/cli/command/image/push.go
generated
vendored
2
vendor/github.com/docker/cli/cli/command/image/push.go
generated
vendored
|
@ -48,7 +48,7 @@ func runPush(dockerCli command.Cli, remote string) error {
|
|||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push")
|
||||
|
||||
if command.IsTrusted() {
|
||||
return trustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege)
|
||||
return TrustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege)
|
||||
}
|
||||
|
||||
responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref, requestPrivilege)
|
||||
|
|
15
vendor/github.com/docker/cli/cli/command/image/remove.go
generated
vendored
15
vendor/github.com/docker/cli/cli/command/image/remove.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
apiclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -56,9 +57,13 @@ func runRemove(dockerCli command.Cli, opts removeOptions, images []string) error
|
|||
}
|
||||
|
||||
var errs []string
|
||||
for _, image := range images {
|
||||
dels, err := client.ImageRemove(ctx, image, options)
|
||||
var fatalErr = false
|
||||
for _, img := range images {
|
||||
dels, err := client.ImageRemove(ctx, img, options)
|
||||
if err != nil {
|
||||
if !apiclient.IsErrNotFound(err) {
|
||||
fatalErr = true
|
||||
}
|
||||
errs = append(errs, err.Error())
|
||||
} else {
|
||||
for _, del := range dels {
|
||||
|
@ -72,7 +77,11 @@ func runRemove(dockerCli command.Cli, opts removeOptions, images []string) error
|
|||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errors.Errorf("%s", strings.Join(errs, "\n"))
|
||||
msg := strings.Join(errs, "\n")
|
||||
if !opts.force || fatalErr {
|
||||
return errors.New(msg)
|
||||
}
|
||||
fmt.Fprintf(dockerCli.Err(), msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
16
vendor/github.com/docker/cli/cli/command/image/save.go
generated
vendored
16
vendor/github.com/docker/cli/cli/command/image/save.go
generated
vendored
|
@ -2,6 +2,8 @@ package image
|
|||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
|
@ -41,6 +43,10 @@ func runSave(dockerCli command.Cli, opts saveOptions) error {
|
|||
return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect")
|
||||
}
|
||||
|
||||
if err := validateOutputPath(opts.output); err != nil {
|
||||
return errors.Wrap(err, "failed to save image")
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -54,3 +60,13 @@ func runSave(dockerCli command.Cli, opts saveOptions) error {
|
|||
|
||||
return command.CopyToFile(opts.output, responseBody)
|
||||
}
|
||||
|
||||
func validateOutputPath(path string) error {
|
||||
dir := filepath.Dir(path)
|
||||
if dir != "" && dir != "." {
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
return errors.Errorf("unable to validate output path: directory %q does not exist", dir)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
209
vendor/github.com/docker/cli/cli/command/image/trust.go
generated
vendored
209
vendor/github.com/docker/cli/cli/command/image/trust.go
generated
vendored
|
@ -5,20 +5,20 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/docker/notary/client"
|
||||
"github.com/docker/notary/tuf/data"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary/client"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -28,8 +28,8 @@ type target struct {
|
|||
size int64
|
||||
}
|
||||
|
||||
// trustedPush handles content trust pushing of an image
|
||||
func trustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
|
||||
// TrustedPush handles content trust pushing of an image
|
||||
func TrustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
|
||||
responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref, requestPrivilege)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -84,7 +84,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(streams.Out(), "No tag specified, skipping trust metadata push")
|
||||
fmt.Fprintln(streams.Err(), "No tag specified, skipping trust metadata push")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -97,31 +97,29 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
}
|
||||
|
||||
if target == nil {
|
||||
fmt.Fprintln(streams.Out(), "No targets found, please provide a specific tag in order to sign it")
|
||||
return nil
|
||||
return errors.Errorf("no targets found, please provide a specific tag in order to sign it")
|
||||
}
|
||||
|
||||
fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata")
|
||||
|
||||
repo, err := trust.GetNotaryRepository(streams, repoInfo, authConfig, "push", "pull")
|
||||
repo, err := trust.GetNotaryRepository(streams.In(), streams.Out(), command.UserAgent(), repoInfo, &authConfig, "push", "pull")
|
||||
if err != nil {
|
||||
fmt.Fprintf(streams.Out(), "Error establishing connection to notary repository: %s\n", err)
|
||||
return err
|
||||
return errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
// get the latest repository metadata so we can figure out which roles to sign
|
||||
err = repo.Update(false)
|
||||
_, err = repo.ListTargets()
|
||||
|
||||
switch err.(type) {
|
||||
case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist:
|
||||
keys := repo.CryptoService.ListKeys(data.CanonicalRootRole)
|
||||
keys := repo.GetCryptoService().ListKeys(data.CanonicalRootRole)
|
||||
var rootKeyID string
|
||||
// always select the first root key
|
||||
if len(keys) > 0 {
|
||||
sort.Strings(keys)
|
||||
rootKeyID = keys[0]
|
||||
} else {
|
||||
rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey)
|
||||
rootPublicKey, err := repo.GetCryptoService().Create(data.CanonicalRootRole, "", data.ECDSAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -136,7 +134,7 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
err = repo.AddTarget(target, data.CanonicalTargetsRole)
|
||||
case nil:
|
||||
// already initialized and we have successfully downloaded the latest metadata
|
||||
err = addTargetToAllSignableRoles(repo, target)
|
||||
err = AddTargetToAllSignableRoles(repo, target)
|
||||
default:
|
||||
return trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
|
@ -146,59 +144,24 @@ func PushTrustedReference(streams command.Streams, repoInfo *registry.Repository
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(streams.Out(), "Failed to sign %q:%s - %s\n", repoInfo.Name.Name(), tag, err.Error())
|
||||
err = errors.Wrapf(err, "failed to sign %s:%s", repoInfo.Name.Name(), tag)
|
||||
return trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(streams.Out(), "Successfully signed %q:%s\n", repoInfo.Name.Name(), tag)
|
||||
fmt.Fprintf(streams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attempt to add the image target to all the top level delegation roles we can
|
||||
// AddTargetToAllSignableRoles attempts to add the image target to all the top level delegation roles we can
|
||||
// (based on whether we have the signing key and whether the role's path allows
|
||||
// us to).
|
||||
// If there are no delegation roles, we add to the targets role.
|
||||
func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error {
|
||||
var signableRoles []string
|
||||
|
||||
// translate the full key names, which includes the GUN, into just the key IDs
|
||||
allCanonicalKeyIDs := make(map[string]struct{})
|
||||
for fullKeyID := range repo.CryptoService.ListAllKeys() {
|
||||
allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{}
|
||||
}
|
||||
|
||||
allDelegationRoles, err := repo.GetDelegationRoles()
|
||||
func AddTargetToAllSignableRoles(repo client.Repository, target *client.Target) error {
|
||||
signableRoles, err := trust.GetSignableRoles(repo, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if there are no delegation roles, then just try to sign it into the targets role
|
||||
if len(allDelegationRoles) == 0 {
|
||||
return repo.AddTarget(target, data.CanonicalTargetsRole)
|
||||
}
|
||||
|
||||
// there are delegation roles, find every delegation role we have a key for, and
|
||||
// attempt to sign into into all those roles.
|
||||
for _, delegationRole := range allDelegationRoles {
|
||||
// We do not support signing any delegation role that isn't a direct child of the targets role.
|
||||
// Also don't bother checking the keys if we can't add the target
|
||||
// to this role due to path restrictions
|
||||
if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, canonicalKeyID := range delegationRole.KeyIDs {
|
||||
if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok {
|
||||
signableRoles = append(signableRoles, delegationRole.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(signableRoles) == 0 {
|
||||
return errors.Errorf("no valid signing keys for delegation roles")
|
||||
}
|
||||
|
||||
return repo.AddTarget(target, signableRoles...)
|
||||
}
|
||||
|
||||
|
@ -217,57 +180,13 @@ func imagePushPrivileged(ctx context.Context, cli command.Cli, authConfig types.
|
|||
}
|
||||
|
||||
// trustedPull handles content trust pulling of an image
|
||||
func trustedPull(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
|
||||
var refs []target
|
||||
|
||||
notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull")
|
||||
func trustedPull(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, platform string) error {
|
||||
refs, err := getTrustedPullTargets(cli, imgRefAndAuth)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if tagged, isTagged := ref.(reference.NamedTagged); !isTagged {
|
||||
// List all targets
|
||||
targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return trust.NotaryError(ref.Name(), err)
|
||||
}
|
||||
for _, tgt := range targets {
|
||||
t, err := convertTarget(tgt.Target)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Out(), "Skipping target for %q\n", reference.FamiliarName(ref))
|
||||
continue
|
||||
}
|
||||
// Only list tags in the top level targets role or the releases delegation role - ignore
|
||||
// all other delegation roles
|
||||
if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole {
|
||||
continue
|
||||
}
|
||||
refs = append(refs, t)
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
return trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name()))
|
||||
}
|
||||
} else {
|
||||
t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return trust.NotaryError(ref.Name(), err)
|
||||
}
|
||||
// Only get the tag if it's in the top level targets role or the releases delegation role
|
||||
// ignore it if it's in any other delegation roles
|
||||
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
|
||||
return trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag()))
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieving target for %s role\n", t.Role)
|
||||
r, err := convertTarget(t.Target)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
refs = append(refs, r)
|
||||
}
|
||||
|
||||
ref := imgRefAndAuth.Reference()
|
||||
for i, r := range refs {
|
||||
displayTag := r.name
|
||||
if displayTag != "" {
|
||||
|
@ -279,7 +198,11 @@ func trustedPull(ctx context.Context, cli command.Cli, repoInfo *registry.Reposi
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := imagePullPrivileged(ctx, cli, authConfig, reference.FamiliarString(trustedRef), requestPrivilege, false); err != nil {
|
||||
updatedImgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(cli), trustedRef.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := imagePullPrivileged(ctx, cli, updatedImgRefAndAuth, false, platform); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -295,19 +218,70 @@ func trustedPull(ctx context.Context, cli command.Cli, repoInfo *registry.Reposi
|
|||
return nil
|
||||
}
|
||||
|
||||
// imagePullPrivileged pulls the image and displays it to the output
|
||||
func imagePullPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error {
|
||||
func getTrustedPullTargets(cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth) ([]target, error) {
|
||||
notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPullOnly)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
|
||||
ref := imgRefAndAuth.Reference()
|
||||
tagged, isTagged := ref.(reference.NamedTagged)
|
||||
if !isTagged {
|
||||
// List all targets
|
||||
targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return nil, trust.NotaryError(ref.Name(), err)
|
||||
}
|
||||
var refs []target
|
||||
for _, tgt := range targets {
|
||||
t, err := convertTarget(tgt.Target)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Err(), "Skipping target for %q\n", reference.FamiliarName(ref))
|
||||
continue
|
||||
}
|
||||
// Only list tags in the top level targets role or the releases delegation role - ignore
|
||||
// all other delegation roles
|
||||
if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole {
|
||||
continue
|
||||
}
|
||||
refs = append(refs, t)
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name()))
|
||||
}
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return nil, trust.NotaryError(ref.Name(), err)
|
||||
}
|
||||
// Only get the tag if it's in the top level targets role or the releases delegation role
|
||||
// ignore it if it's in any other delegation roles
|
||||
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
|
||||
return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag()))
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieving target for %s role", t.Role)
|
||||
r, err := convertTarget(t.Target)
|
||||
return []target{r}, err
|
||||
}
|
||||
|
||||
// imagePullPrivileged pulls the image and displays it to the output
|
||||
func imagePullPrivileged(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, all bool, platform string) error {
|
||||
ref := reference.FamiliarString(imgRefAndAuth.Reference())
|
||||
|
||||
encodedAuth, err := command.EncodeAuthToBase64(*imgRefAndAuth.AuthConfig())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, imgRefAndAuth.RepoInfo().Index, "pull")
|
||||
options := types.ImagePullOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
PrivilegeFunc: requestPrivilege,
|
||||
All: all,
|
||||
Platform: platform,
|
||||
}
|
||||
|
||||
responseBody, err := cli.Client().ImagePull(ctx, ref, options)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -335,10 +309,9 @@ func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedT
|
|||
// Resolve the Auth config relevant for this server
|
||||
authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index)
|
||||
|
||||
notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull")
|
||||
notaryRepo, err := trust.GetNotaryRepository(cli.In(), cli.Out(), command.UserAgent(), repoInfo, &authConfig, "pull")
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err)
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
|
@ -348,14 +321,13 @@ func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedT
|
|||
// Only list tags in the top level targets role or the releases delegation role - ignore
|
||||
// all other delegation roles
|
||||
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", ref.Tag()))
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), client.ErrNoSuchTarget(ref.Tag()))
|
||||
}
|
||||
r, err := convertTarget(t.Target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
|
||||
return reference.WithDigest(reference.TrimNamed(ref), r.digest)
|
||||
}
|
||||
|
||||
|
@ -378,7 +350,14 @@ func TagTrusted(ctx context.Context, cli command.Cli, trustedRef reference.Canon
|
|||
familiarRef := reference.FamiliarString(ref)
|
||||
trustedFamiliarRef := reference.FamiliarString(trustedRef)
|
||||
|
||||
fmt.Fprintf(cli.Out(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef)
|
||||
fmt.Fprintf(cli.Err(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef)
|
||||
|
||||
return cli.Client().ImageTag(ctx, trustedFamiliarRef, familiarRef)
|
||||
}
|
||||
|
||||
// AuthResolver returns an auth resolver function from a command.Cli
|
||||
func AuthResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||
return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||
return command.ResolveAuthConfig(ctx, cli, index)
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/cli/cli/command/out.go
generated
vendored
2
vendor/github.com/docker/cli/cli/command/out.go
generated
vendored
|
@ -4,8 +4,8 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutStream is an output stream used by the DockerCli to write normal program
|
||||
|
|
4
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
4
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
|
@ -70,7 +70,7 @@ func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexI
|
|||
configKey = ElectAuthServer(ctx, cli)
|
||||
}
|
||||
|
||||
a, _ := cli.CredentialsStore(configKey).Get(configKey)
|
||||
a, _ := cli.ConfigFile().GetAuthConfig(configKey)
|
||||
return a
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ func ConfigureAuth(cli Cli, flUser, flPassword, serverAddress string, isDefaultR
|
|||
serverAddress = registry.ConvertToHostname(serverAddress)
|
||||
}
|
||||
|
||||
authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress)
|
||||
authconfig, err := cli.ConfigFile().GetAuthConfig(serverAddress)
|
||||
if err != nil {
|
||||
return authconfig, err
|
||||
}
|
||||
|
|
4
vendor/github.com/docker/cli/cli/command/trust.go
generated
vendored
4
vendor/github.com/docker/cli/cli/command/trust.go
generated
vendored
|
@ -12,6 +12,10 @@ var (
|
|||
untrusted bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
untrusted = !getDefaultTrustState()
|
||||
}
|
||||
|
||||
// AddTrustVerificationFlags adds content trust flags to the provided flagset
|
||||
func AddTrustVerificationFlags(fs *pflag.FlagSet) {
|
||||
trusted := getDefaultTrustState()
|
||||
|
|
8
vendor/github.com/docker/cli/cli/command/utils.go
generated
vendored
8
vendor/github.com/docker/cli/cli/command/utils.go
generated
vendored
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// CopyToFile writes the content of the reader to the specified file
|
||||
|
@ -117,3 +118,10 @@ func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args {
|
|||
|
||||
return pruneFilters
|
||||
}
|
||||
|
||||
// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later.
|
||||
func AddPlatformFlag(flags *pflag.FlagSet, target *string) {
|
||||
flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable")
|
||||
flags.SetAnnotation("platform", "version", []string{"1.32"})
|
||||
flags.SetAnnotation("platform", "experimental", nil)
|
||||
}
|
||||
|
|
54
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
54
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
|
@ -1,11 +1,13 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -38,15 +40,6 @@ func SetDir(dir string) {
|
|||
configDir = dir
|
||||
}
|
||||
|
||||
// NewConfigFile initializes an empty configuration file for the given filename 'fn'
|
||||
func NewConfigFile(fn string) *configfile.ConfigFile {
|
||||
return &configfile.ConfigFile{
|
||||
AuthConfigs: make(map[string]types.AuthConfig),
|
||||
HTTPHeaders: make(map[string]string),
|
||||
Filename: fn,
|
||||
}
|
||||
}
|
||||
|
||||
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
|
||||
// a non-nested reader
|
||||
func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
||||
|
@ -75,46 +68,53 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
|||
configDir = Dir()
|
||||
}
|
||||
|
||||
configFile := configfile.ConfigFile{
|
||||
AuthConfigs: make(map[string]types.AuthConfig),
|
||||
Filename: filepath.Join(configDir, ConfigFileName),
|
||||
}
|
||||
filename := filepath.Join(configDir, ConfigFileName)
|
||||
configFile := configfile.New(filename)
|
||||
|
||||
// Try happy path first - latest config file
|
||||
if _, err := os.Stat(configFile.Filename); err == nil {
|
||||
file, err := os.Open(configFile.Filename)
|
||||
if _, err := os.Stat(filename); err == nil {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return &configFile, errors.Errorf("%s - %v", configFile.Filename, err)
|
||||
return configFile, errors.Errorf("%s - %v", filename, err)
|
||||
}
|
||||
defer file.Close()
|
||||
err = configFile.LoadFromReader(file)
|
||||
if err != nil {
|
||||
err = errors.Errorf("%s - %v", configFile.Filename, err)
|
||||
err = errors.Errorf("%s - %v", filename, err)
|
||||
}
|
||||
return &configFile, err
|
||||
return configFile, err
|
||||
} else if !os.IsNotExist(err) {
|
||||
// if file is there but we can't stat it for any reason other
|
||||
// than it doesn't exist then stop
|
||||
return &configFile, errors.Errorf("%s - %v", configFile.Filename, err)
|
||||
return configFile, errors.Errorf("%s - %v", filename, err)
|
||||
}
|
||||
|
||||
// Can't find latest config file so check for the old one
|
||||
confFile := filepath.Join(homedir.Get(), oldConfigfile)
|
||||
if _, err := os.Stat(confFile); err != nil {
|
||||
return &configFile, nil //missing file is not an error
|
||||
return configFile, nil //missing file is not an error
|
||||
}
|
||||
file, err := os.Open(confFile)
|
||||
if err != nil {
|
||||
return &configFile, errors.Errorf("%s - %v", confFile, err)
|
||||
return configFile, errors.Errorf("%s - %v", confFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
err = configFile.LegacyLoadFromReader(file)
|
||||
if err != nil {
|
||||
return &configFile, errors.Errorf("%s - %v", confFile, err)
|
||||
return configFile, errors.Errorf("%s - %v", confFile, err)
|
||||
}
|
||||
|
||||
if configFile.HTTPHeaders == nil {
|
||||
configFile.HTTPHeaders = map[string]string{}
|
||||
}
|
||||
return &configFile, nil
|
||||
return configFile, nil
|
||||
}
|
||||
|
||||
// LoadDefaultConfigFile attempts to load the default config file and returns
|
||||
// an initialized ConfigFile struct if none is found.
|
||||
func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
|
||||
configFile, err := Load(Dir())
|
||||
if err != nil {
|
||||
fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err)
|
||||
}
|
||||
if !configFile.ContainsAuth() {
|
||||
configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore)
|
||||
}
|
||||
return configFile
|
||||
}
|
||||
|
|
111
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
111
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
|
@ -9,6 +9,8 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/config/credentials"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -41,6 +43,24 @@ type ConfigFile struct {
|
|||
ConfigFormat string `json:"configFormat,omitempty"`
|
||||
NodesFormat string `json:"nodesFormat,omitempty"`
|
||||
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||
}
|
||||
|
||||
// ProxyConfig contains proxy configuration settings
|
||||
type ProxyConfig struct {
|
||||
HTTPProxy string `json:"httpProxy,omitempty"`
|
||||
HTTPSProxy string `json:"httpsProxy,omitempty"`
|
||||
NoProxy string `json:"noProxy,omitempty"`
|
||||
FTPProxy string `json:"ftpProxy,omitempty"`
|
||||
}
|
||||
|
||||
// New initializes an empty configuration file for the given filename 'fn'
|
||||
func New(fn string) *ConfigFile {
|
||||
return &ConfigFile{
|
||||
AuthConfigs: make(map[string]types.AuthConfig),
|
||||
HTTPHeaders: make(map[string]string),
|
||||
Filename: fn,
|
||||
}
|
||||
}
|
||||
|
||||
// LegacyLoadFromReader reads the non-nested configuration data given and sets up the
|
||||
|
@ -108,6 +128,11 @@ func (configFile *ConfigFile) ContainsAuth() bool {
|
|||
len(configFile.AuthConfigs) > 0
|
||||
}
|
||||
|
||||
// GetAuthConfigs returns the mapping of repo to auth configuration
|
||||
func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig {
|
||||
return configFile.AuthConfigs
|
||||
}
|
||||
|
||||
// SaveToWriter encodes and writes out all the authorization information to
|
||||
// the given writer
|
||||
func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
|
||||
|
@ -152,6 +177,39 @@ func (configFile *ConfigFile) Save() error {
|
|||
return configFile.SaveToWriter(f)
|
||||
}
|
||||
|
||||
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
||||
// then checking this against any environment variables provided to the container
|
||||
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts []string) map[string]*string {
|
||||
var cfgKey string
|
||||
|
||||
if _, ok := configFile.Proxies[host]; !ok {
|
||||
cfgKey = "default"
|
||||
} else {
|
||||
cfgKey = host
|
||||
}
|
||||
|
||||
config := configFile.Proxies[cfgKey]
|
||||
permitted := map[string]*string{
|
||||
"HTTP_PROXY": &config.HTTPProxy,
|
||||
"HTTPS_PROXY": &config.HTTPSProxy,
|
||||
"NO_PROXY": &config.NoProxy,
|
||||
"FTP_PROXY": &config.FTPProxy,
|
||||
}
|
||||
m := opts.ConvertKVStringsToMapWithNil(runOpts)
|
||||
for k := range permitted {
|
||||
if *permitted[k] == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := m[k]; !ok {
|
||||
m[k] = permitted[k]
|
||||
}
|
||||
if _, ok := m[strings.ToLower(k)]; !ok {
|
||||
m[strings.ToLower(k)] = permitted[k]
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// encodeAuth creates a base64 encoded string to containing authorization information
|
||||
func encodeAuth(authConfig *types.AuthConfig) string {
|
||||
if authConfig.Username == "" && authConfig.Password == "" {
|
||||
|
@ -188,3 +246,56 @@ func decodeAuth(authStr string) (string, string, error) {
|
|||
password := strings.Trim(arr[1], "\x00")
|
||||
return arr[0], password, nil
|
||||
}
|
||||
|
||||
// GetCredentialsStore returns a new credentials store from the settings in the
|
||||
// configuration file
|
||||
func (configFile *ConfigFile) GetCredentialsStore(serverAddress string) credentials.Store {
|
||||
if helper := getConfiguredCredentialStore(configFile, serverAddress); helper != "" {
|
||||
return credentials.NewNativeStore(configFile, helper)
|
||||
}
|
||||
return credentials.NewFileStore(configFile)
|
||||
}
|
||||
|
||||
// GetAuthConfig for a repository from the credential store
|
||||
func (configFile *ConfigFile) GetAuthConfig(serverAddress string) (types.AuthConfig, error) {
|
||||
return configFile.GetCredentialsStore(serverAddress).Get(serverAddress)
|
||||
}
|
||||
|
||||
// getConfiguredCredentialStore returns the credential helper configured for the
|
||||
// given registry, the default credsStore, or the empty string if neither are
|
||||
// configured.
|
||||
func getConfiguredCredentialStore(c *ConfigFile, serverAddress string) string {
|
||||
if c.CredentialHelpers != nil && serverAddress != "" {
|
||||
if helper, exists := c.CredentialHelpers[serverAddress]; exists {
|
||||
return helper
|
||||
}
|
||||
}
|
||||
return c.CredentialsStore
|
||||
}
|
||||
|
||||
// GetAllCredentials returns all of the credentials stored in all of the
|
||||
// configured credential stores.
|
||||
func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) {
|
||||
auths := make(map[string]types.AuthConfig)
|
||||
addAll := func(from map[string]types.AuthConfig) {
|
||||
for reg, ac := range from {
|
||||
auths[reg] = ac
|
||||
}
|
||||
}
|
||||
|
||||
for registry := range configFile.CredentialHelpers {
|
||||
helper := configFile.GetCredentialsStore(registry)
|
||||
newAuths, err := helper.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAll(newAuths)
|
||||
}
|
||||
defaultStore := configFile.GetCredentialsStore("")
|
||||
newAuths, err := defaultStore.GetAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addAll(newAuths)
|
||||
return auths, nil
|
||||
}
|
||||
|
|
17
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
Normal file
17
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
// Store is the interface that any credentials store must implement.
|
||||
type Store interface {
|
||||
// Erase removes credentials from the store for a given server.
|
||||
Erase(serverAddress string) error
|
||||
// Get retrieves credentials from the store for a given server.
|
||||
Get(serverAddress string) (types.AuthConfig, error)
|
||||
// GetAll retrieves all the credentials from the store.
|
||||
GetAll() (map[string]types.AuthConfig, error)
|
||||
// Store saves credentials in the store.
|
||||
Store(authConfig types.AuthConfig) error
|
||||
}
|
21
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
Normal file
21
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// DetectDefaultStore return the default credentials store for the platform if
|
||||
// the store executable is available.
|
||||
func DetectDefaultStore(store string) string {
|
||||
platformDefault := defaultCredentialsStore()
|
||||
|
||||
// user defined or no default for platform
|
||||
if store != "" || platformDefault == "" {
|
||||
return store
|
||||
}
|
||||
|
||||
if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil {
|
||||
return platformDefault
|
||||
}
|
||||
return ""
|
||||
}
|
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go
generated
vendored
Normal file
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
package credentials
|
||||
|
||||
func defaultCredentialsStore() string {
|
||||
return "osxkeychain"
|
||||
}
|
13
vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go
generated
vendored
Normal file
13
vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/docker-credential-helpers/pass"
|
||||
)
|
||||
|
||||
func defaultCredentialsStore() string {
|
||||
if pass.PassInitialized {
|
||||
return "pass"
|
||||
}
|
||||
|
||||
return "secretservice"
|
||||
}
|
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
Normal file
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
// +build !windows,!darwin,!linux
|
||||
|
||||
package credentials
|
||||
|
||||
const defaultCredentialsStore = ""
|
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go
generated
vendored
Normal file
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
package credentials
|
||||
|
||||
func defaultCredentialsStore() string {
|
||||
return "wincred"
|
||||
}
|
55
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
Normal file
55
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/registry"
|
||||
)
|
||||
|
||||
type store interface {
|
||||
Save() error
|
||||
GetAuthConfigs() map[string]types.AuthConfig
|
||||
}
|
||||
|
||||
// fileStore implements a credentials store using
|
||||
// the docker configuration file to keep the credentials in plain text.
|
||||
type fileStore struct {
|
||||
file store
|
||||
}
|
||||
|
||||
// NewFileStore creates a new file credentials store.
|
||||
func NewFileStore(file store) Store {
|
||||
return &fileStore{file: file}
|
||||
}
|
||||
|
||||
// Erase removes the given credentials from the file store.
|
||||
func (c *fileStore) Erase(serverAddress string) error {
|
||||
delete(c.file.GetAuthConfigs(), serverAddress)
|
||||
return c.file.Save()
|
||||
}
|
||||
|
||||
// Get retrieves credentials for a specific server from the file store.
|
||||
func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||
authConfig, ok := c.file.GetAuthConfigs()[serverAddress]
|
||||
if !ok {
|
||||
// Maybe they have a legacy config file, we will iterate the keys converting
|
||||
// them to the new format and testing
|
||||
for r, ac := range c.file.GetAuthConfigs() {
|
||||
if serverAddress == registry.ConvertToHostname(r) {
|
||||
return ac, nil
|
||||
}
|
||||
}
|
||||
|
||||
authConfig = types.AuthConfig{}
|
||||
}
|
||||
return authConfig, nil
|
||||
}
|
||||
|
||||
func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||
return c.file.GetAuthConfigs(), nil
|
||||
}
|
||||
|
||||
// Store saves the given credentials in the file store.
|
||||
func (c *fileStore) Store(authConfig types.AuthConfig) error {
|
||||
c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig
|
||||
return c.file.Save()
|
||||
}
|
143
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
Normal file
143
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"github.com/docker/docker-credential-helpers/client"
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
const (
|
||||
remoteCredentialsPrefix = "docker-credential-"
|
||||
tokenUsername = "<token>"
|
||||
)
|
||||
|
||||
// nativeStore implements a credentials store
|
||||
// using native keychain to keep credentials secure.
|
||||
// It piggybacks into a file store to keep users' emails.
|
||||
type nativeStore struct {
|
||||
programFunc client.ProgramFunc
|
||||
fileStore Store
|
||||
}
|
||||
|
||||
// NewNativeStore creates a new native store that
|
||||
// uses a remote helper program to manage credentials.
|
||||
func NewNativeStore(file store, helperSuffix string) Store {
|
||||
name := remoteCredentialsPrefix + helperSuffix
|
||||
return &nativeStore{
|
||||
programFunc: client.NewShellProgramFunc(name),
|
||||
fileStore: NewFileStore(file),
|
||||
}
|
||||
}
|
||||
|
||||
// Erase removes the given credentials from the native store.
|
||||
func (c *nativeStore) Erase(serverAddress string) error {
|
||||
if err := client.Erase(c.programFunc, serverAddress); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Fallback to plain text store to remove email
|
||||
return c.fileStore.Erase(serverAddress)
|
||||
}
|
||||
|
||||
// Get retrieves credentials for a specific server from the native store.
|
||||
func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||
// load user email if it exist or an empty auth config.
|
||||
auth, _ := c.fileStore.Get(serverAddress)
|
||||
|
||||
creds, err := c.getCredentialsFromStore(serverAddress)
|
||||
if err != nil {
|
||||
return auth, err
|
||||
}
|
||||
auth.Username = creds.Username
|
||||
auth.IdentityToken = creds.IdentityToken
|
||||
auth.Password = creds.Password
|
||||
|
||||
return auth, nil
|
||||
}
|
||||
|
||||
// GetAll retrieves all the credentials from the native store.
|
||||
func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||
auths, err := c.listCredentialsInStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Emails are only stored in the file store.
|
||||
// This call can be safely eliminated when emails are removed.
|
||||
fileConfigs, _ := c.fileStore.GetAll()
|
||||
|
||||
authConfigs := make(map[string]types.AuthConfig)
|
||||
for registry := range auths {
|
||||
creds, err := c.getCredentialsFromStore(registry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ac := fileConfigs[registry] // might contain Email
|
||||
ac.Username = creds.Username
|
||||
ac.Password = creds.Password
|
||||
ac.IdentityToken = creds.IdentityToken
|
||||
authConfigs[registry] = ac
|
||||
}
|
||||
|
||||
return authConfigs, nil
|
||||
}
|
||||
|
||||
// Store saves the given credentials in the file store.
|
||||
func (c *nativeStore) Store(authConfig types.AuthConfig) error {
|
||||
if err := c.storeCredentialsInStore(authConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
authConfig.Username = ""
|
||||
authConfig.Password = ""
|
||||
authConfig.IdentityToken = ""
|
||||
|
||||
// Fallback to old credential in plain text to save only the email
|
||||
return c.fileStore.Store(authConfig)
|
||||
}
|
||||
|
||||
// storeCredentialsInStore executes the command to store the credentials in the native store.
|
||||
func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error {
|
||||
creds := &credentials.Credentials{
|
||||
ServerURL: config.ServerAddress,
|
||||
Username: config.Username,
|
||||
Secret: config.Password,
|
||||
}
|
||||
|
||||
if config.IdentityToken != "" {
|
||||
creds.Username = tokenUsername
|
||||
creds.Secret = config.IdentityToken
|
||||
}
|
||||
|
||||
return client.Store(c.programFunc, creds)
|
||||
}
|
||||
|
||||
// getCredentialsFromStore executes the command to get the credentials from the native store.
|
||||
func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) {
|
||||
var ret types.AuthConfig
|
||||
|
||||
creds, err := client.Get(c.programFunc, serverAddress)
|
||||
if err != nil {
|
||||
if credentials.IsErrCredentialsNotFound(err) {
|
||||
// do not return an error if the credentials are not
|
||||
// in the keychain. Let docker ask for new credentials.
|
||||
return ret, nil
|
||||
}
|
||||
return ret, err
|
||||
}
|
||||
|
||||
if creds.Username == tokenUsername {
|
||||
ret.IdentityToken = creds.Secret
|
||||
} else {
|
||||
ret.Password = creds.Secret
|
||||
ret.Username = creds.Username
|
||||
}
|
||||
|
||||
ret.ServerAddress = serverAddress
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// listCredentialsInStore returns a listing of stored credentials as a map of
|
||||
// URL -> username.
|
||||
func (c *nativeStore) listCredentialsInStore() (map[string]string, error) {
|
||||
return client.List(c.programFunc)
|
||||
}
|
21
vendor/github.com/docker/cli/cli/required.go
generated
vendored
21
vendor/github.com/docker/cli/cli/required.go
generated
vendored
|
@ -18,7 +18,7 @@ func NoArgs(cmd *cobra.Command, args []string) error {
|
|||
}
|
||||
|
||||
return errors.Errorf(
|
||||
"\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
"%q accepts no arguments.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
|
@ -33,9 +33,10 @@ func RequiresMinArgs(min int) cobra.PositionalArgs {
|
|||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
"%q requires at least %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
min,
|
||||
pluralize("argument", min),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
|
@ -50,9 +51,10 @@ func RequiresMaxArgs(max int) cobra.PositionalArgs {
|
|||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
"%q requires at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
max,
|
||||
pluralize("argument", max),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
|
@ -67,10 +69,11 @@ func RequiresRangeArgs(min int, max int) cobra.PositionalArgs {
|
|||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
"%q requires at least %d and at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
min,
|
||||
max,
|
||||
pluralize("argument", max),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
|
@ -85,12 +88,20 @@ func ExactArgs(number int) cobra.PositionalArgs {
|
|||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
"%q requires exactly %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
number,
|
||||
pluralize("argument", number),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func pluralize(word string, number int) string {
|
||||
if number == 1 {
|
||||
return word
|
||||
}
|
||||
return word + "s"
|
||||
}
|
||||
|
|
64
vendor/github.com/docker/cli/opts/duration.go
generated
vendored
Normal file
64
vendor/github.com/docker/cli/opts/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PositiveDurationOpt is an option type for time.Duration that uses a pointer.
|
||||
// It behave similarly to DurationOpt but only allows positive duration values.
|
||||
type PositiveDurationOpt struct {
|
||||
DurationOpt
|
||||
}
|
||||
|
||||
// Set a new value on the option. Setting a negative duration value will cause
|
||||
// an error to be returned.
|
||||
func (d *PositiveDurationOpt) Set(s string) error {
|
||||
err := d.DurationOpt.Set(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *d.DurationOpt.value < 0 {
|
||||
return errors.Errorf("duration cannot be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DurationOpt is an option type for time.Duration that uses a pointer. This
|
||||
// allows us to get nil values outside, instead of defaulting to 0
|
||||
type DurationOpt struct {
|
||||
value *time.Duration
|
||||
}
|
||||
|
||||
// NewDurationOpt creates a DurationOpt with the specified duration
|
||||
func NewDurationOpt(value *time.Duration) *DurationOpt {
|
||||
return &DurationOpt{
|
||||
value: value,
|
||||
}
|
||||
}
|
||||
|
||||
// Set a new value on the option
|
||||
func (d *DurationOpt) Set(s string) error {
|
||||
v, err := time.ParseDuration(s)
|
||||
d.value = &v
|
||||
return err
|
||||
}
|
||||
|
||||
// Type returns the type of this option, which will be displayed in `--help` output
|
||||
func (d *DurationOpt) Type() string {
|
||||
return "duration"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (d *DurationOpt) String() string {
|
||||
if d.value != nil {
|
||||
return d.value.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Value returns the time.Duration
|
||||
func (d *DurationOpt) Value() *time.Duration {
|
||||
return d.value
|
||||
}
|
0
vendor/github.com/docker/docker/opts/ip.go → vendor/github.com/docker/cli/opts/ip.go
generated
vendored
0
vendor/github.com/docker/docker/opts/ip.go → vendor/github.com/docker/cli/opts/ip.go
generated
vendored
174
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
Normal file
174
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
mounttypes "github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// MountOpt is a Value type for parsing mounts
|
||||
type MountOpt struct {
|
||||
values []mounttypes.Mount
|
||||
}
|
||||
|
||||
// Set a new mount value
|
||||
// nolint: gocyclo
|
||||
func (m *MountOpt) Set(value string) error {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mount := mounttypes.Mount{}
|
||||
|
||||
volumeOptions := func() *mounttypes.VolumeOptions {
|
||||
if mount.VolumeOptions == nil {
|
||||
mount.VolumeOptions = &mounttypes.VolumeOptions{
|
||||
Labels: make(map[string]string),
|
||||
}
|
||||
}
|
||||
if mount.VolumeOptions.DriverConfig == nil {
|
||||
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{}
|
||||
}
|
||||
return mount.VolumeOptions
|
||||
}
|
||||
|
||||
bindOptions := func() *mounttypes.BindOptions {
|
||||
if mount.BindOptions == nil {
|
||||
mount.BindOptions = new(mounttypes.BindOptions)
|
||||
}
|
||||
return mount.BindOptions
|
||||
}
|
||||
|
||||
tmpfsOptions := func() *mounttypes.TmpfsOptions {
|
||||
if mount.TmpfsOptions == nil {
|
||||
mount.TmpfsOptions = new(mounttypes.TmpfsOptions)
|
||||
}
|
||||
return mount.TmpfsOptions
|
||||
}
|
||||
|
||||
setValueOnMap := func(target map[string]string, value string) {
|
||||
parts := strings.SplitN(value, "=", 2)
|
||||
if len(parts) == 1 {
|
||||
target[value] = ""
|
||||
} else {
|
||||
target[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
mount.Type = mounttypes.TypeVolume // default to volume mounts
|
||||
// Set writable as the default
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
key := strings.ToLower(parts[0])
|
||||
|
||||
if len(parts) == 1 {
|
||||
switch key {
|
||||
case "readonly", "ro":
|
||||
mount.ReadOnly = true
|
||||
continue
|
||||
case "volume-nocopy":
|
||||
volumeOptions().NoCopy = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
||||
}
|
||||
|
||||
value := parts[1]
|
||||
switch key {
|
||||
case "type":
|
||||
mount.Type = mounttypes.Type(strings.ToLower(value))
|
||||
case "source", "src":
|
||||
mount.Source = value
|
||||
case "target", "dst", "destination":
|
||||
mount.Target = value
|
||||
case "readonly", "ro":
|
||||
mount.ReadOnly, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
||||
}
|
||||
case "consistency":
|
||||
mount.Consistency = mounttypes.Consistency(strings.ToLower(value))
|
||||
case "bind-propagation":
|
||||
bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value))
|
||||
case "volume-nocopy":
|
||||
volumeOptions().NoCopy, err = strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for volume-nocopy: %s", value)
|
||||
}
|
||||
case "volume-label":
|
||||
setValueOnMap(volumeOptions().Labels, value)
|
||||
case "volume-driver":
|
||||
volumeOptions().DriverConfig.Name = value
|
||||
case "volume-opt":
|
||||
if volumeOptions().DriverConfig.Options == nil {
|
||||
volumeOptions().DriverConfig.Options = make(map[string]string)
|
||||
}
|
||||
setValueOnMap(volumeOptions().DriverConfig.Options, value)
|
||||
case "tmpfs-size":
|
||||
sizeBytes, err := units.RAMInBytes(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
||||
}
|
||||
tmpfsOptions().SizeBytes = sizeBytes
|
||||
case "tmpfs-mode":
|
||||
ui64, err := strconv.ParseUint(value, 8, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
||||
}
|
||||
tmpfsOptions().Mode = os.FileMode(ui64)
|
||||
default:
|
||||
return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
|
||||
}
|
||||
}
|
||||
|
||||
if mount.Type == "" {
|
||||
return fmt.Errorf("type is required")
|
||||
}
|
||||
|
||||
if mount.Target == "" {
|
||||
return fmt.Errorf("target is required")
|
||||
}
|
||||
|
||||
if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume {
|
||||
return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type)
|
||||
}
|
||||
if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind {
|
||||
return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type)
|
||||
}
|
||||
if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs {
|
||||
return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type)
|
||||
}
|
||||
|
||||
m.values = append(m.values, mount)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (m *MountOpt) Type() string {
|
||||
return "mount"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (m *MountOpt) String() string {
|
||||
mounts := []string{}
|
||||
for _, mount := range m.values {
|
||||
repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target)
|
||||
mounts = append(mounts, repr)
|
||||
}
|
||||
return strings.Join(mounts, ", ")
|
||||
}
|
||||
|
||||
// Value returns the mounts
|
||||
func (m *MountOpt) Value() []mounttypes.Mount {
|
||||
return m.values
|
||||
}
|
106
vendor/github.com/docker/cli/opts/network.go
generated
vendored
Normal file
106
vendor/github.com/docker/cli/opts/network.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
networkOptName = "name"
|
||||
networkOptAlias = "alias"
|
||||
driverOpt = "driver-opt"
|
||||
)
|
||||
|
||||
// NetworkAttachmentOpts represents the network options for endpoint creation
|
||||
type NetworkAttachmentOpts struct {
|
||||
Target string
|
||||
Aliases []string
|
||||
DriverOpts map[string]string
|
||||
}
|
||||
|
||||
// NetworkOpt represents a network config in swarm mode.
|
||||
type NetworkOpt struct {
|
||||
options []NetworkAttachmentOpts
|
||||
}
|
||||
|
||||
// Set networkopts value
|
||||
func (n *NetworkOpt) Set(value string) error {
|
||||
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var netOpt NetworkAttachmentOpts
|
||||
if longSyntax {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
netOpt.Aliases = []string{}
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
|
||||
if len(parts) < 2 {
|
||||
return fmt.Errorf("invalid field %s", field)
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
||||
value := strings.TrimSpace(strings.ToLower(parts[1]))
|
||||
|
||||
switch key {
|
||||
case networkOptName:
|
||||
netOpt.Target = value
|
||||
case networkOptAlias:
|
||||
netOpt.Aliases = append(netOpt.Aliases, value)
|
||||
case driverOpt:
|
||||
key, value, err = parseDriverOpt(value)
|
||||
if err == nil {
|
||||
if netOpt.DriverOpts == nil {
|
||||
netOpt.DriverOpts = make(map[string]string)
|
||||
}
|
||||
netOpt.DriverOpts[key] = value
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid field key %s", key)
|
||||
}
|
||||
}
|
||||
if len(netOpt.Target) == 0 {
|
||||
return fmt.Errorf("network name/id is not specified")
|
||||
}
|
||||
} else {
|
||||
netOpt.Target = value
|
||||
}
|
||||
n.options = append(n.options, netOpt)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (n *NetworkOpt) Type() string {
|
||||
return "network"
|
||||
}
|
||||
|
||||
// Value returns the networkopts
|
||||
func (n *NetworkOpt) Value() []NetworkAttachmentOpts {
|
||||
return n.options
|
||||
}
|
||||
|
||||
// String returns the network opts as a string
|
||||
func (n *NetworkOpt) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func parseDriverOpt(driverOpt string) (string, string, error) {
|
||||
parts := strings.SplitN(driverOpt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("invalid key value pair format in driver options")
|
||||
}
|
||||
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
||||
value := strings.TrimSpace(strings.ToLower(parts[1]))
|
||||
return key, value, nil
|
||||
}
|
|
@ -179,7 +179,7 @@ func (opts *MapOpts) GetAll() map[string]string {
|
|||
}
|
||||
|
||||
func (opts *MapOpts) String() string {
|
||||
return fmt.Sprintf("%v", map[string]string((opts.values)))
|
||||
return fmt.Sprintf("%v", opts.values)
|
||||
}
|
||||
|
||||
// Type returns a string name for this Option type
|
163
vendor/github.com/docker/cli/opts/port.go
generated
vendored
Normal file
163
vendor/github.com/docker/cli/opts/port.go
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
const (
|
||||
portOptTargetPort = "target"
|
||||
portOptPublishedPort = "published"
|
||||
portOptProtocol = "protocol"
|
||||
portOptMode = "mode"
|
||||
)
|
||||
|
||||
// PortOpt represents a port config in swarm mode.
|
||||
type PortOpt struct {
|
||||
ports []swarm.PortConfig
|
||||
}
|
||||
|
||||
// Set a new port value
|
||||
// nolint: gocyclo
|
||||
func (p *PortOpt) Set(value string) error {
|
||||
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if longSyntax {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pConfig := swarm.PortConfig{}
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid field %s", field)
|
||||
}
|
||||
|
||||
key := strings.ToLower(parts[0])
|
||||
value := strings.ToLower(parts[1])
|
||||
|
||||
switch key {
|
||||
case portOptProtocol:
|
||||
if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) {
|
||||
return fmt.Errorf("invalid protocol value %s", value)
|
||||
}
|
||||
|
||||
pConfig.Protocol = swarm.PortConfigProtocol(value)
|
||||
case portOptMode:
|
||||
if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) {
|
||||
return fmt.Errorf("invalid publish mode value %s", value)
|
||||
}
|
||||
|
||||
pConfig.PublishMode = swarm.PortConfigPublishMode(value)
|
||||
case portOptTargetPort:
|
||||
tPort, err := strconv.ParseUint(value, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pConfig.TargetPort = uint32(tPort)
|
||||
case portOptPublishedPort:
|
||||
pPort, err := strconv.ParseUint(value, 10, 16)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pConfig.PublishedPort = uint32(pPort)
|
||||
default:
|
||||
return fmt.Errorf("invalid field key %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if pConfig.TargetPort == 0 {
|
||||
return fmt.Errorf("missing mandatory field %q", portOptTargetPort)
|
||||
}
|
||||
|
||||
if pConfig.PublishMode == "" {
|
||||
pConfig.PublishMode = swarm.PortConfigPublishModeIngress
|
||||
}
|
||||
|
||||
if pConfig.Protocol == "" {
|
||||
pConfig.Protocol = swarm.PortConfigProtocolTCP
|
||||
}
|
||||
|
||||
p.ports = append(p.ports, pConfig)
|
||||
} else {
|
||||
// short syntax
|
||||
portConfigs := []swarm.PortConfig{}
|
||||
ports, portBindingMap, err := nat.ParsePortSpecs([]string{value})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, portBindings := range portBindingMap {
|
||||
for _, portBinding := range portBindings {
|
||||
if portBinding.HostIP != "" {
|
||||
return fmt.Errorf("hostip is not supported")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for port := range ports {
|
||||
portConfig, err := ConvertPortToPortConfig(port, portBindingMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
portConfigs = append(portConfigs, portConfig...)
|
||||
}
|
||||
p.ports = append(p.ports, portConfigs...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (p *PortOpt) Type() string {
|
||||
return "port"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (p *PortOpt) String() string {
|
||||
ports := []string{}
|
||||
for _, port := range p.ports {
|
||||
repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode)
|
||||
ports = append(ports, repr)
|
||||
}
|
||||
return strings.Join(ports, ", ")
|
||||
}
|
||||
|
||||
// Value returns the ports
|
||||
func (p *PortOpt) Value() []swarm.PortConfig {
|
||||
return p.ports
|
||||
}
|
||||
|
||||
// ConvertPortToPortConfig converts ports to the swarm type
|
||||
func ConvertPortToPortConfig(
|
||||
port nat.Port,
|
||||
portBindings map[nat.Port][]nat.PortBinding,
|
||||
) ([]swarm.PortConfig, error) {
|
||||
ports := []swarm.PortConfig{}
|
||||
|
||||
for _, binding := range portBindings[port] {
|
||||
hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16)
|
||||
if err != nil && binding.HostPort != "" {
|
||||
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
|
||||
}
|
||||
ports = append(ports, swarm.PortConfig{
|
||||
//TODO Name: ?
|
||||
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
|
||||
TargetPort: uint32(port.Int()),
|
||||
PublishedPort: uint32(hostPort),
|
||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
||||
})
|
||||
}
|
||||
return ports, nil
|
||||
}
|
|
@ -18,7 +18,7 @@ func (s *QuotedString) Type() string {
|
|||
}
|
||||
|
||||
func (s *QuotedString) String() string {
|
||||
return string(*s.value)
|
||||
return *s.value
|
||||
}
|
||||
|
||||
func trimQuotes(value string) string {
|
98
vendor/github.com/docker/cli/opts/secret.go
generated
vendored
Normal file
98
vendor/github.com/docker/cli/opts/secret.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
// SecretOpt is a Value type for parsing secrets
|
||||
type SecretOpt struct {
|
||||
values []*swarmtypes.SecretReference
|
||||
}
|
||||
|
||||
// Set a new secret value
|
||||
func (o *SecretOpt) Set(value string) error {
|
||||
csvReader := csv.NewReader(strings.NewReader(value))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := &swarmtypes.SecretReference{
|
||||
File: &swarmtypes.SecretReferenceFileTarget{
|
||||
UID: "0",
|
||||
GID: "0",
|
||||
Mode: 0444,
|
||||
},
|
||||
}
|
||||
|
||||
// support a simple syntax of --secret foo
|
||||
if len(fields) == 1 {
|
||||
options.File.Name = fields[0]
|
||||
options.SecretName = fields[0]
|
||||
o.values = append(o.values, options)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
key := strings.ToLower(parts[0])
|
||||
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
||||
}
|
||||
|
||||
value := parts[1]
|
||||
switch key {
|
||||
case "source", "src":
|
||||
options.SecretName = value
|
||||
case "target":
|
||||
options.File.Name = value
|
||||
case "uid":
|
||||
options.File.UID = value
|
||||
case "gid":
|
||||
options.File.GID = value
|
||||
case "mode":
|
||||
m, err := strconv.ParseUint(value, 0, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid mode specified: %v", err)
|
||||
}
|
||||
|
||||
options.File.Mode = os.FileMode(m)
|
||||
default:
|
||||
return fmt.Errorf("invalid field in secret request: %s", key)
|
||||
}
|
||||
}
|
||||
|
||||
if options.SecretName == "" {
|
||||
return fmt.Errorf("source is required")
|
||||
}
|
||||
|
||||
o.values = append(o.values, options)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of this option
|
||||
func (o *SecretOpt) Type() string {
|
||||
return "secret"
|
||||
}
|
||||
|
||||
// String returns a string repr of this option
|
||||
func (o *SecretOpt) String() string {
|
||||
secrets := []string{}
|
||||
for _, secret := range o.values {
|
||||
repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name)
|
||||
secrets = append(secrets, repr)
|
||||
}
|
||||
return strings.Join(secrets, ", ")
|
||||
}
|
||||
|
||||
// Value returns the secret requests
|
||||
func (o *SecretOpt) Value() []*swarmtypes.SecretReference {
|
||||
return o.values
|
||||
}
|
|
@ -52,10 +52,7 @@ func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
|
|||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
|
||||
}
|
||||
|
||||
return &blkiodev.ThrottleDevice{
|
||||
Path: split[0],
|
||||
Rate: uint64(rate),
|
||||
}, nil
|
||||
return &blkiodev.ThrottleDevice{Path: split[0], Rate: rate}, nil
|
||||
}
|
||||
|
||||
// ThrottledeviceOpt defines a map of ThrottleDevices
|
84
vendor/github.com/docker/cli/opts/weightdevice.go
generated
vendored
Normal file
84
vendor/github.com/docker/cli/opts/weightdevice.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/blkiodev"
|
||||
)
|
||||
|
||||
// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error.
|
||||
type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error)
|
||||
|
||||
// ValidateWeightDevice validates that the specified string has a valid device-weight format.
|
||||
func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) {
|
||||
split := strings.SplitN(val, ":", 2)
|
||||
if len(split) != 2 {
|
||||
return nil, fmt.Errorf("bad format: %s", val)
|
||||
}
|
||||
if !strings.HasPrefix(split[0], "/dev/") {
|
||||
return nil, fmt.Errorf("bad format for device path: %s", val)
|
||||
}
|
||||
weight, err := strconv.ParseUint(split[1], 10, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid weight for device: %s", val)
|
||||
}
|
||||
if weight > 0 && (weight < 10 || weight > 1000) {
|
||||
return nil, fmt.Errorf("invalid weight for device: %s", val)
|
||||
}
|
||||
|
||||
return &blkiodev.WeightDevice{
|
||||
Path: split[0],
|
||||
Weight: uint16(weight),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeightdeviceOpt defines a map of WeightDevices
|
||||
type WeightdeviceOpt struct {
|
||||
values []*blkiodev.WeightDevice
|
||||
validator ValidatorWeightFctType
|
||||
}
|
||||
|
||||
// NewWeightdeviceOpt creates a new WeightdeviceOpt
|
||||
func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt {
|
||||
values := []*blkiodev.WeightDevice{}
|
||||
return WeightdeviceOpt{
|
||||
values: values,
|
||||
validator: validator,
|
||||
}
|
||||
}
|
||||
|
||||
// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt
|
||||
func (opt *WeightdeviceOpt) Set(val string) error {
|
||||
var value *blkiodev.WeightDevice
|
||||
if opt.validator != nil {
|
||||
v, err := opt.validator(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value = v
|
||||
}
|
||||
(opt.values) = append((opt.values), value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns WeightdeviceOpt values as a string.
|
||||
func (opt *WeightdeviceOpt) String() string {
|
||||
var out []string
|
||||
for _, v := range opt.values {
|
||||
out = append(out, v.String())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", out)
|
||||
}
|
||||
|
||||
// GetList returns a slice of pointers to WeightDevices.
|
||||
func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice {
|
||||
return opt.values
|
||||
}
|
||||
|
||||
// Type returns the option type
|
||||
func (opt *WeightdeviceOpt) Type() string {
|
||||
return "list"
|
||||
}
|
2
vendor/github.com/docker/distribution/blobs.go
generated
vendored
2
vendor/github.com/docker/distribution/blobs.go
generated
vendored
|
@ -152,7 +152,7 @@ type BlobProvider interface {
|
|||
|
||||
// BlobServer can serve blobs via http.
|
||||
type BlobServer interface {
|
||||
// ServeBlob attempts to serve the blob, identifed by dgst, via http. The
|
||||
// ServeBlob attempts to serve the blob, identified by dgst, via http. The
|
||||
// service may decide to redirect the client elsewhere or serve the data
|
||||
// directly.
|
||||
//
|
||||
|
|
2
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
2
vendor/github.com/docker/distribution/context/doc.go
generated
vendored
|
@ -64,7 +64,7 @@
|
|||
// Note that this only affects the new context, the previous context, with the
|
||||
// version field, can be used independently. Put another way, the new logger,
|
||||
// added to the request context, is unique to that context and can have
|
||||
// request scoped varaibles.
|
||||
// request scoped variables.
|
||||
//
|
||||
// HTTP Requests
|
||||
//
|
||||
|
|
2
vendor/github.com/docker/distribution/context/http.go
generated
vendored
2
vendor/github.com/docker/distribution/context/http.go
generated
vendored
|
@ -8,9 +8,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Common errors used with this package.
|
||||
|
|
2
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
2
vendor/github.com/docker/distribution/context/logger.go
generated
vendored
|
@ -3,7 +3,7 @@ package context
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
|
|
2
vendor/github.com/docker/distribution/errors.go
generated
vendored
2
vendor/github.com/docker/distribution/errors.go
generated
vendored
|
@ -77,7 +77,7 @@ func (err ErrManifestUnknownRevision) Error() string {
|
|||
type ErrManifestUnverified struct{}
|
||||
|
||||
func (ErrManifestUnverified) Error() string {
|
||||
return fmt.Sprintf("unverified manifest")
|
||||
return "unverified manifest"
|
||||
}
|
||||
|
||||
// ErrManifestVerification provides a type to collect errors encountered
|
||||
|
|
2
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
2
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
|
|
116
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
116
vendor/github.com/docker/distribution/registry/api/v2/urls.go
generated
vendored
|
@ -1,10 +1,9 @@
|
|||
package v2
|
||||
|
||||
import (
|
||||
"net"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
|
@ -48,66 +47,42 @@ func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
|
|||
// NewURLBuilderFromRequest uses information from an *http.Request to
|
||||
// construct the root url.
|
||||
func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
|
||||
var scheme string
|
||||
|
||||
forwardedProto := r.Header.Get("X-Forwarded-Proto")
|
||||
// TODO: log the error
|
||||
forwardedHeader, _, _ := parseForwardedHeader(r.Header.Get("Forwarded"))
|
||||
|
||||
switch {
|
||||
case len(forwardedProto) > 0:
|
||||
scheme = forwardedProto
|
||||
case len(forwardedHeader["proto"]) > 0:
|
||||
scheme = forwardedHeader["proto"]
|
||||
case r.TLS != nil:
|
||||
scheme = "https"
|
||||
case len(r.URL.Scheme) > 0:
|
||||
scheme = r.URL.Scheme
|
||||
default:
|
||||
var (
|
||||
scheme = "http"
|
||||
host = r.Host
|
||||
)
|
||||
|
||||
if r.TLS != nil {
|
||||
scheme = "https"
|
||||
} else if len(r.URL.Scheme) > 0 {
|
||||
scheme = r.URL.Scheme
|
||||
}
|
||||
|
||||
host := r.Host
|
||||
|
||||
if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
|
||||
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
|
||||
// comma-separated list of hosts, to which each proxy appends the
|
||||
// requested host. We want to grab the first from this comma-separated
|
||||
// list.
|
||||
hosts := strings.SplitN(forwardedHost, ",", 2)
|
||||
host = strings.TrimSpace(hosts[0])
|
||||
} else if addr, exists := forwardedHeader["for"]; exists {
|
||||
host = addr
|
||||
} else if h, exists := forwardedHeader["host"]; exists {
|
||||
host = h
|
||||
}
|
||||
|
||||
portLessHost, port := host, ""
|
||||
if !isIPv6Address(portLessHost) {
|
||||
// with go 1.6, this would treat the last part of IPv6 address as a port
|
||||
portLessHost, port, _ = net.SplitHostPort(host)
|
||||
}
|
||||
if forwardedPort := r.Header.Get("X-Forwarded-Port"); len(port) == 0 && len(forwardedPort) > 0 {
|
||||
ports := strings.SplitN(forwardedPort, ",", 2)
|
||||
forwardedPort = strings.TrimSpace(ports[0])
|
||||
if _, err := strconv.ParseInt(forwardedPort, 10, 32); err == nil {
|
||||
port = forwardedPort
|
||||
// Handle fowarded headers
|
||||
// Prefer "Forwarded" header as defined by rfc7239 if given
|
||||
// see https://tools.ietf.org/html/rfc7239
|
||||
if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 {
|
||||
forwardedHeader, _, err := parseForwardedHeader(forwarded)
|
||||
if err == nil {
|
||||
if fproto := forwardedHeader["proto"]; len(fproto) > 0 {
|
||||
scheme = fproto
|
||||
}
|
||||
if fhost := forwardedHeader["host"]; len(fhost) > 0 {
|
||||
host = fhost
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(portLessHost) > 0 {
|
||||
host = portLessHost
|
||||
}
|
||||
if len(port) > 0 {
|
||||
// remove enclosing brackets of ipv6 address otherwise they will be duplicated
|
||||
if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
|
||||
host = host[1 : len(host)-1]
|
||||
} else {
|
||||
if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 {
|
||||
scheme = forwardedProto
|
||||
}
|
||||
if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
|
||||
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
|
||||
// comma-separated list of hosts, to which each proxy appends the
|
||||
// requested host. We want to grab the first from this comma-separated
|
||||
// list.
|
||||
hosts := strings.SplitN(forwardedHost, ",", 2)
|
||||
host = strings.TrimSpace(hosts[0])
|
||||
}
|
||||
// JoinHostPort properly encloses ipv6 addresses in square brackets
|
||||
host = net.JoinHostPort(host, port)
|
||||
} else if isIPv6Address(host) && host[0] != '[' {
|
||||
// ipv6 needs to be enclosed in square brackets in urls
|
||||
host = "[" + host + "]"
|
||||
}
|
||||
|
||||
basePath := routeDescriptorsMap[RouteNameBase].Path
|
||||
|
@ -175,6 +150,8 @@ func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
|
|||
tagOrDigest = v.Tag()
|
||||
case reference.Digested:
|
||||
tagOrDigest = v.Digest().String()
|
||||
default:
|
||||
return "", fmt.Errorf("reference must have a tag or digest")
|
||||
}
|
||||
|
||||
manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
|
||||
|
@ -287,28 +264,3 @@ func appendValues(u string, values ...url.Values) string {
|
|||
|
||||
return appendValuesURL(up, values...).String()
|
||||
}
|
||||
|
||||
// isIPv6Address returns true if given string is a valid IPv6 address. No port is allowed. The address may be
|
||||
// enclosed in square brackets.
|
||||
func isIPv6Address(host string) bool {
|
||||
if len(host) > 1 && host[0] == '[' && host[len(host)-1] == ']' {
|
||||
host = host[1 : len(host)-1]
|
||||
}
|
||||
// The IPv6 scoped addressing zone identifier starts after the last percent sign.
|
||||
if i := strings.LastIndexByte(host, '%'); i > 0 {
|
||||
host = host[:i]
|
||||
}
|
||||
ip := net.ParseIP(host)
|
||||
if ip == nil {
|
||||
return false
|
||||
}
|
||||
if ip.To16() == nil {
|
||||
return false
|
||||
}
|
||||
if ip.To4() == nil {
|
||||
return true
|
||||
}
|
||||
// dot can be present in ipv4-mapped address, it needs to come after a colon though
|
||||
i := strings.IndexAny(host, ":.")
|
||||
return i >= 0 && host[i] == ':'
|
||||
}
|
||||
|
|
2
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/client/auth/session.go
generated
vendored
|
@ -10,10 +10,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"github.com/docker/distribution/registry/client/transport"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
2
vendor/github.com/docker/distribution/registry/registry.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/registry.go
generated
vendored
|
@ -11,7 +11,6 @@ import (
|
|||
|
||||
"rsc.io/letsencrypt"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
logstash "github.com/bshuster-repo/logrus-logstash-hook"
|
||||
"github.com/bugsnag/bugsnag-go"
|
||||
"github.com/docker/distribution/configuration"
|
||||
|
@ -22,6 +21,7 @@ import (
|
|||
"github.com/docker/distribution/uuid"
|
||||
"github.com/docker/distribution/version"
|
||||
gorhandlers "github.com/gorilla/handlers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/yvasiyarov/gorelic"
|
||||
)
|
||||
|
|
4
vendor/github.com/docker/distribution/registry/storage/blobstore.go
generated
vendored
4
vendor/github.com/docker/distribution/registry/storage/blobstore.go
generated
vendored
|
@ -27,7 +27,7 @@ func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error
|
|||
return nil, err
|
||||
}
|
||||
|
||||
p, err := bs.driver.GetContent(ctx, bp)
|
||||
p, err := getContent(ctx, bs.driver, bp)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case driver.PathNotFoundError:
|
||||
|
@ -37,7 +37,7 @@ func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return p, err
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
|
||||
|
|
2
vendor/github.com/docker/distribution/registry/storage/blobwriter.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/storage/blobwriter.go
generated
vendored
|
@ -7,11 +7,11 @@ import (
|
|||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
2
vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go
generated
vendored
|
@ -7,9 +7,9 @@ import (
|
|||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/context"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stevvooe/resumable"
|
||||
|
||||
// register resumable hashes with import
|
||||
|
|
71
vendor/github.com/docker/distribution/registry/storage/io.go
generated
vendored
Normal file
71
vendor/github.com/docker/distribution/registry/storage/io.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
)
|
||||
|
||||
const (
|
||||
maxBlobGetSize = 4 << 20
|
||||
)
|
||||
|
||||
func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) {
|
||||
r, err := driver.Reader(ctx, p, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return readAllLimited(r, maxBlobGetSize)
|
||||
}
|
||||
|
||||
func readAllLimited(r io.Reader, limit int64) ([]byte, error) {
|
||||
r = limitReader(r, limit)
|
||||
return ioutil.ReadAll(r)
|
||||
}
|
||||
|
||||
// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader,
|
||||
// this returns an error when the limit reached.
|
||||
func limitReader(r io.Reader, n int64) io.Reader {
|
||||
return &limitedReader{r: r, n: n}
|
||||
}
|
||||
|
||||
// limitedReader implements a reader that errors when the limit is reached.
|
||||
//
|
||||
// Partially cribbed from net/http.MaxBytesReader.
|
||||
type limitedReader struct {
|
||||
r io.Reader // underlying reader
|
||||
n int64 // max bytes remaining
|
||||
err error // sticky error
|
||||
}
|
||||
|
||||
func (l *limitedReader) Read(p []byte) (n int, err error) {
|
||||
if l.err != nil {
|
||||
return 0, l.err
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
// If they asked for a 32KB byte read but only 5 bytes are
|
||||
// remaining, no need to read 32KB. 6 bytes will answer the
|
||||
// question of the whether we hit the limit or go past it.
|
||||
if int64(len(p)) > l.n+1 {
|
||||
p = p[:l.n+1]
|
||||
}
|
||||
n, err = l.r.Read(p)
|
||||
|
||||
if int64(n) <= l.n {
|
||||
l.n -= int64(n)
|
||||
l.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
n = int(l.n)
|
||||
l.n = 0
|
||||
|
||||
l.err = errors.New("storage: read exceeds limit")
|
||||
return n, l.err
|
||||
}
|
2
vendor/github.com/docker/distribution/registry/storage/purgeuploads.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/storage/purgeuploads.go
generated
vendored
|
@ -5,10 +5,10 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/context"
|
||||
storageDriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/uuid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// uploadData stored the location of temporary files created during a layer upload
|
||||
|
|
20
vendor/github.com/docker/docker-credential-helpers/LICENSE
generated
vendored
Normal file
20
vendor/github.com/docker/docker-credential-helpers/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
Copyright (c) 2016 David Calavera
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
121
vendor/github.com/docker/docker-credential-helpers/client/client.go
generated
vendored
Normal file
121
vendor/github.com/docker/docker-credential-helpers/client/client.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
)
|
||||
|
||||
// isValidCredsMessage checks if 'msg' contains invalid credentials error message.
|
||||
// It returns whether the logs are free of invalid credentials errors and the error if it isn't.
|
||||
// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername.
|
||||
func isValidCredsMessage(msg string) error {
|
||||
if credentials.IsCredentialsMissingServerURLMessage(msg) {
|
||||
return credentials.NewErrCredentialsMissingServerURL()
|
||||
}
|
||||
|
||||
if credentials.IsCredentialsMissingUsernameMessage(msg) {
|
||||
return credentials.NewErrCredentialsMissingUsername()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store uses an external program to save credentials.
|
||||
func Store(program ProgramFunc, creds *credentials.Credentials) error {
|
||||
cmd := program("store")
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
if err := json.NewEncoder(buffer).Encode(creds); err != nil {
|
||||
return err
|
||||
}
|
||||
cmd.Input(buffer)
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
t := strings.TrimSpace(string(out))
|
||||
|
||||
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||
err = isValidErr
|
||||
}
|
||||
|
||||
return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get executes an external program to get the credentials from a native store.
|
||||
func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
|
||||
cmd := program("get")
|
||||
cmd.Input(strings.NewReader(serverURL))
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
t := strings.TrimSpace(string(out))
|
||||
|
||||
if credentials.IsErrCredentialsNotFoundMessage(t) {
|
||||
return nil, credentials.NewErrCredentialsNotFound()
|
||||
}
|
||||
|
||||
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||
err = isValidErr
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t)
|
||||
}
|
||||
|
||||
resp := &credentials.Credentials{
|
||||
ServerURL: serverURL,
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Erase executes a program to remove the server credentials from the native store.
|
||||
func Erase(program ProgramFunc, serverURL string) error {
|
||||
cmd := program("erase")
|
||||
cmd.Input(strings.NewReader(serverURL))
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
t := strings.TrimSpace(string(out))
|
||||
|
||||
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||
err = isValidErr
|
||||
}
|
||||
|
||||
return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// List executes a program to list server credentials in the native store.
|
||||
func List(program ProgramFunc) (map[string]string, error) {
|
||||
cmd := program("list")
|
||||
cmd.Input(strings.NewReader("unused"))
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
t := strings.TrimSpace(string(out))
|
||||
|
||||
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||
err = isValidErr
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t)
|
||||
}
|
||||
|
||||
var resp map[string]string
|
||||
if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
56
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
Normal file
56
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Program is an interface to execute external programs.
|
||||
type Program interface {
|
||||
Output() ([]byte, error)
|
||||
Input(in io.Reader)
|
||||
}
|
||||
|
||||
// ProgramFunc is a type of function that initializes programs based on arguments.
|
||||
type ProgramFunc func(args ...string) Program
|
||||
|
||||
// NewShellProgramFunc creates programs that are executed in a Shell.
|
||||
func NewShellProgramFunc(name string) ProgramFunc {
|
||||
return NewShellProgramFuncWithEnv(name, nil)
|
||||
}
|
||||
|
||||
// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables
|
||||
func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc {
|
||||
return func(args ...string) Program {
|
||||
return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)}
|
||||
}
|
||||
}
|
||||
|
||||
func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
|
||||
programCmd := exec.Command(commandName, args...)
|
||||
programCmd.Env = os.Environ()
|
||||
if env != nil {
|
||||
for k, v := range *env {
|
||||
programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
}
|
||||
programCmd.Stderr = os.Stderr
|
||||
return programCmd
|
||||
}
|
||||
|
||||
// Shell invokes shell commands to talk with a remote credentials helper.
|
||||
type Shell struct {
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
||||
// Output returns responses from the remote credentials helper.
|
||||
func (s *Shell) Output() ([]byte, error) {
|
||||
return s.cmd.Output()
|
||||
}
|
||||
|
||||
// Input sets the input to send to a remote credentials helper.
|
||||
func (s *Shell) Input(in io.Reader) {
|
||||
s.cmd.Stdin = in
|
||||
}
|
186
vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
generated
vendored
Normal file
186
vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
generated
vendored
Normal file
|
@ -0,0 +1,186 @@
|
|||
package credentials
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Credentials holds the information shared between docker and the credentials store.
|
||||
type Credentials struct {
|
||||
ServerURL string
|
||||
Username string
|
||||
Secret string
|
||||
}
|
||||
|
||||
// isValid checks the integrity of Credentials object such that no credentials lack
|
||||
// a server URL or a username.
|
||||
// It returns whether the credentials are valid and the error if it isn't.
|
||||
// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername
|
||||
func (c *Credentials) isValid() (bool, error) {
|
||||
if len(c.ServerURL) == 0 {
|
||||
return false, NewErrCredentialsMissingServerURL()
|
||||
}
|
||||
|
||||
if len(c.Username) == 0 {
|
||||
return false, NewErrCredentialsMissingUsername()
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling.
|
||||
// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain,
|
||||
// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials"
|
||||
var CredsLabel = "Docker Credentials"
|
||||
|
||||
// SetCredsLabel is a simple setter for CredsLabel
|
||||
func SetCredsLabel(label string) {
|
||||
CredsLabel = label
|
||||
}
|
||||
|
||||
// Serve initializes the credentials helper and parses the action argument.
|
||||
// This function is designed to be called from a command line interface.
|
||||
// It uses os.Args[1] as the key for the action.
|
||||
// It uses os.Stdin as input and os.Stdout as output.
|
||||
// This function terminates the program with os.Exit(1) if there is an error.
|
||||
func Serve(helper Helper) {
|
||||
var err error
|
||||
if len(os.Args) != 2 {
|
||||
err = fmt.Errorf("Usage: %s <store|get|erase|list|version>", os.Args[0])
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stdout, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleCommand uses a helper and a key to run a credential action.
|
||||
func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
|
||||
switch key {
|
||||
case "store":
|
||||
return Store(helper, in)
|
||||
case "get":
|
||||
return Get(helper, in, out)
|
||||
case "erase":
|
||||
return Erase(helper, in)
|
||||
case "list":
|
||||
return List(helper, out)
|
||||
case "version":
|
||||
return PrintVersion(out)
|
||||
}
|
||||
return fmt.Errorf("Unknown credential action `%s`", key)
|
||||
}
|
||||
|
||||
// Store uses a helper and an input reader to save credentials.
|
||||
// The reader must contain the JSON serialization of a Credentials struct.
|
||||
func Store(helper Helper, reader io.Reader) error {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
for scanner.Scan() {
|
||||
buffer.Write(scanner.Bytes())
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
var creds Credentials
|
||||
if err := json.NewDecoder(buffer).Decode(&creds); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok, err := creds.isValid(); !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
return helper.Add(&creds)
|
||||
}
|
||||
|
||||
// Get retrieves the credentials for a given server url.
|
||||
// The reader must contain the server URL to search.
|
||||
// The writer is used to write the JSON serialization of the credentials.
|
||||
func Get(helper Helper, reader io.Reader, writer io.Writer) error {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
for scanner.Scan() {
|
||||
buffer.Write(scanner.Bytes())
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
serverURL := strings.TrimSpace(buffer.String())
|
||||
if len(serverURL) == 0 {
|
||||
return NewErrCredentialsMissingServerURL()
|
||||
}
|
||||
|
||||
username, secret, err := helper.Get(serverURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp := Credentials{
|
||||
ServerURL: serverURL,
|
||||
Username: username,
|
||||
Secret: secret,
|
||||
}
|
||||
|
||||
buffer.Reset()
|
||||
if err := json.NewEncoder(buffer).Encode(resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprint(writer, buffer.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Erase removes credentials from the store.
|
||||
// The reader must contain the server URL to remove.
|
||||
func Erase(helper Helper, reader io.Reader) error {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
buffer := new(bytes.Buffer)
|
||||
for scanner.Scan() {
|
||||
buffer.Write(scanner.Bytes())
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
serverURL := strings.TrimSpace(buffer.String())
|
||||
if len(serverURL) == 0 {
|
||||
return NewErrCredentialsMissingServerURL()
|
||||
}
|
||||
|
||||
return helper.Delete(serverURL)
|
||||
}
|
||||
|
||||
//List returns all the serverURLs of keys in
|
||||
//the OS store as a list of strings
|
||||
func List(helper Helper, writer io.Writer) error {
|
||||
accts, err := helper.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.NewEncoder(writer).Encode(accts)
|
||||
}
|
||||
|
||||
//PrintVersion outputs the current version.
|
||||
func PrintVersion(writer io.Writer) error {
|
||||
fmt.Fprintln(writer, Version)
|
||||
return nil
|
||||
}
|
102
vendor/github.com/docker/docker-credential-helpers/credentials/error.go
generated
vendored
Normal file
102
vendor/github.com/docker/docker-credential-helpers/credentials/error.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
package credentials
|
||||
|
||||
const (
|
||||
// ErrCredentialsNotFound standardizes the not found error, so every helper returns
|
||||
// the same message and docker can handle it properly.
|
||||
errCredentialsNotFoundMessage = "credentials not found in native keychain"
|
||||
|
||||
// ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize
|
||||
// invalid credentials or credentials management operations
|
||||
errCredentialsMissingServerURLMessage = "no credentials server URL"
|
||||
errCredentialsMissingUsernameMessage = "no credentials username"
|
||||
)
|
||||
|
||||
// errCredentialsNotFound represents an error
|
||||
// raised when credentials are not in the store.
|
||||
type errCredentialsNotFound struct{}
|
||||
|
||||
// Error returns the standard error message
|
||||
// for when the credentials are not in the store.
|
||||
func (errCredentialsNotFound) Error() string {
|
||||
return errCredentialsNotFoundMessage
|
||||
}
|
||||
|
||||
// NewErrCredentialsNotFound creates a new error
|
||||
// for when the credentials are not in the store.
|
||||
func NewErrCredentialsNotFound() error {
|
||||
return errCredentialsNotFound{}
|
||||
}
|
||||
|
||||
// IsErrCredentialsNotFound returns true if the error
|
||||
// was caused by not having a set of credentials in a store.
|
||||
func IsErrCredentialsNotFound(err error) bool {
|
||||
_, ok := err.(errCredentialsNotFound)
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsErrCredentialsNotFoundMessage returns true if the error
|
||||
// was caused by not having a set of credentials in a store.
|
||||
//
|
||||
// This function helps to check messages returned by an
|
||||
// external program via its standard output.
|
||||
func IsErrCredentialsNotFoundMessage(err string) bool {
|
||||
return err == errCredentialsNotFoundMessage
|
||||
}
|
||||
|
||||
// errCredentialsMissingServerURL represents an error raised
|
||||
// when the credentials object has no server URL or when no
|
||||
// server URL is provided to a credentials operation requiring
|
||||
// one.
|
||||
type errCredentialsMissingServerURL struct{}
|
||||
|
||||
func (errCredentialsMissingServerURL) Error() string {
|
||||
return errCredentialsMissingServerURLMessage
|
||||
}
|
||||
|
||||
// errCredentialsMissingUsername represents an error raised
|
||||
// when the credentials object has no username or when no
|
||||
// username is provided to a credentials operation requiring
|
||||
// one.
|
||||
type errCredentialsMissingUsername struct{}
|
||||
|
||||
func (errCredentialsMissingUsername) Error() string {
|
||||
return errCredentialsMissingUsernameMessage
|
||||
}
|
||||
|
||||
// NewErrCredentialsMissingServerURL creates a new error for
|
||||
// errCredentialsMissingServerURL.
|
||||
func NewErrCredentialsMissingServerURL() error {
|
||||
return errCredentialsMissingServerURL{}
|
||||
}
|
||||
|
||||
// NewErrCredentialsMissingUsername creates a new error for
|
||||
// errCredentialsMissingUsername.
|
||||
func NewErrCredentialsMissingUsername() error {
|
||||
return errCredentialsMissingUsername{}
|
||||
}
|
||||
|
||||
// IsCredentialsMissingServerURL returns true if the error
|
||||
// was an errCredentialsMissingServerURL.
|
||||
func IsCredentialsMissingServerURL(err error) bool {
|
||||
_, ok := err.(errCredentialsMissingServerURL)
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsCredentialsMissingServerURLMessage checks for an
|
||||
// errCredentialsMissingServerURL in the error message.
|
||||
func IsCredentialsMissingServerURLMessage(err string) bool {
|
||||
return err == errCredentialsMissingServerURLMessage
|
||||
}
|
||||
|
||||
// IsCredentialsMissingUsername returns true if the error
|
||||
// was an errCredentialsMissingUsername.
|
||||
func IsCredentialsMissingUsername(err error) bool {
|
||||
_, ok := err.(errCredentialsMissingUsername)
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsCredentialsMissingUsernameMessage checks for an
|
||||
// errCredentialsMissingUsername in the error message.
|
||||
func IsCredentialsMissingUsernameMessage(err string) bool {
|
||||
return err == errCredentialsMissingUsernameMessage
|
||||
}
|
14
vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
generated
vendored
Normal file
14
vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
package credentials
|
||||
|
||||
// Helper is the interface a credentials store helper must implement.
|
||||
type Helper interface {
|
||||
// Add appends credentials to the store.
|
||||
Add(*Credentials) error
|
||||
// Delete removes credentials from the store.
|
||||
Delete(serverURL string) error
|
||||
// Get retrieves credentials from the store.
|
||||
// It returns username and secret as strings.
|
||||
Get(serverURL string) (string, string, error)
|
||||
// List returns the stored serverURLs and their associated usernames.
|
||||
List() (map[string]string, error)
|
||||
}
|
4
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
Normal file
4
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
package credentials
|
||||
|
||||
// Version holds a string describing the current version
|
||||
const Version = "0.6.0"
|
239
vendor/github.com/docker/docker-credential-helpers/pass/pass_linux.go
generated
vendored
Normal file
239
vendor/github.com/docker/docker-credential-helpers/pass/pass_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,239 @@
|
|||
// A `pass` based credential helper. Passwords are stored as arguments to pass
|
||||
// of the form: "$PASS_FOLDER/base64-url(serverURL)/username". We base64-url
|
||||
// encode the serverURL, because under the hood pass uses files and folders, so
|
||||
// /s will get translated into additional folders.
|
||||
package pass
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker-credential-helpers/credentials"
|
||||
)
|
||||
|
||||
const PASS_FOLDER = "docker-credential-helpers"
|
||||
|
||||
var (
|
||||
PassInitialized bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
// In principle, we could just run `pass init`. However, pass has a bug
|
||||
// where if gpg fails, it doesn't always exit 1. Additionally, pass
|
||||
// uses gpg2, but gpg is the default, which may be confusing. So let's
|
||||
// just explictily check that pass actually can store and retreive a
|
||||
// password.
|
||||
password := "pass is initialized"
|
||||
name := path.Join(PASS_FOLDER, "docker-pass-initialized-check")
|
||||
|
||||
_, err := runPass(password, "insert", "-f", "-m", name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
stored, err := runPass("", "show", name)
|
||||
PassInitialized = err == nil && stored == password
|
||||
|
||||
if PassInitialized {
|
||||
runPass("", "rm", "-rf", name)
|
||||
}
|
||||
}
|
||||
|
||||
func runPass(stdinContent string, args ...string) (string, error) {
|
||||
cmd := exec.Command("pass", args...)
|
||||
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer stdin.Close()
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer stderr.Close()
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer stdout.Close()
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = stdin.Write([]byte(stdinContent))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stdin.Close()
|
||||
|
||||
errContent, err := ioutil.ReadAll(stderr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error reading stderr: %s", err)
|
||||
}
|
||||
|
||||
result, err := ioutil.ReadAll(stdout)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading stdout: %s", err)
|
||||
}
|
||||
|
||||
cmdErr := cmd.Wait()
|
||||
if cmdErr != nil {
|
||||
return "", fmt.Errorf("%s: %s", cmdErr, errContent)
|
||||
}
|
||||
|
||||
return string(result), nil
|
||||
}
|
||||
|
||||
// Pass handles secrets using Linux secret-service as a store.
|
||||
type Pass struct{}
|
||||
|
||||
// Add adds new credentials to the keychain.
|
||||
func (h Pass) Add(creds *credentials.Credentials) error {
|
||||
if !PassInitialized {
|
||||
return errors.New("pass store is uninitialized")
|
||||
}
|
||||
|
||||
if creds == nil {
|
||||
return errors.New("missing credentials")
|
||||
}
|
||||
|
||||
encoded := base64.URLEncoding.EncodeToString([]byte(creds.ServerURL))
|
||||
|
||||
_, err := runPass(creds.Secret, "insert", "-f", "-m", path.Join(PASS_FOLDER, encoded, creds.Username))
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete removes credentials from the store.
|
||||
func (h Pass) Delete(serverURL string) error {
|
||||
if !PassInitialized {
|
||||
return errors.New("pass store is uninitialized")
|
||||
}
|
||||
|
||||
if serverURL == "" {
|
||||
return errors.New("missing server url")
|
||||
}
|
||||
|
||||
encoded := base64.URLEncoding.EncodeToString([]byte(serverURL))
|
||||
_, err := runPass("", "rm", "-rf", path.Join(PASS_FOLDER, encoded))
|
||||
return err
|
||||
}
|
||||
|
||||
func getPassDir() string {
|
||||
passDir := os.ExpandEnv("$HOME/.password-store")
|
||||
for _, e := range os.Environ() {
|
||||
parts := strings.SplitN(e, "=", 2)
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
if parts[0] != "PASSWORD_STORE_DIR" {
|
||||
continue
|
||||
}
|
||||
|
||||
passDir = parts[1]
|
||||
break
|
||||
}
|
||||
|
||||
return passDir
|
||||
}
|
||||
|
||||
// listPassDir lists all the contents of a directory in the password store.
|
||||
// Pass uses fancy unicode to emit stuff to stdout, so rather than try
|
||||
// and parse this, let's just look at the directory structure instead.
|
||||
func listPassDir(args ...string) ([]os.FileInfo, error) {
|
||||
passDir := getPassDir()
|
||||
p := path.Join(append([]string{passDir, PASS_FOLDER}, args...)...)
|
||||
contents, err := ioutil.ReadDir(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return []os.FileInfo{}, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return contents, nil
|
||||
}
|
||||
|
||||
// Get returns the username and secret to use for a given registry server URL.
|
||||
func (h Pass) Get(serverURL string) (string, string, error) {
|
||||
if !PassInitialized {
|
||||
return "", "", errors.New("pass store is uninitialized")
|
||||
}
|
||||
|
||||
if serverURL == "" {
|
||||
return "", "", errors.New("missing server url")
|
||||
}
|
||||
|
||||
encoded := base64.URLEncoding.EncodeToString([]byte(serverURL))
|
||||
|
||||
if _, err := os.Stat(path.Join(getPassDir(), PASS_FOLDER, encoded)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", "", nil;
|
||||
}
|
||||
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
usernames, err := listPassDir(encoded)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if len(usernames) < 1 {
|
||||
return "", "", fmt.Errorf("no usernames for %s", serverURL)
|
||||
}
|
||||
|
||||
actual := strings.TrimSuffix(usernames[0].Name(), ".gpg")
|
||||
secret, err := runPass("", "show", path.Join(PASS_FOLDER, encoded, actual))
|
||||
return actual, secret, err
|
||||
}
|
||||
|
||||
// List returns the stored URLs and corresponding usernames for a given credentials label
|
||||
func (h Pass) List() (map[string]string, error) {
|
||||
if !PassInitialized {
|
||||
return nil, errors.New("pass store is uninitialized")
|
||||
}
|
||||
|
||||
servers, err := listPassDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := map[string]string{}
|
||||
|
||||
for _, server := range servers {
|
||||
if !server.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
serverURL, err := base64.URLEncoding.DecodeString(server.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usernames, err := listPassDir(server.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(usernames) < 1 {
|
||||
return nil, fmt.Errorf("no usernames for %s", serverURL)
|
||||
}
|
||||
|
||||
resp[string(serverURL)] = strings.TrimSuffix(usernames[0].Name(), ".gpg")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
157
vendor/github.com/docker/docker/api/common.go
generated
vendored
157
vendor/github.com/docker/docker/api/common.go
generated
vendored
|
@ -1,166 +1,11 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"mime"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion string = "1.30"
|
||||
DefaultVersion string = "1.34"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
NoBaseImageSpecifier string = "scratch"
|
||||
)
|
||||
|
||||
// byPortInfo is a temporary type used to sort types.Port by its fields
|
||||
type byPortInfo []types.Port
|
||||
|
||||
func (r byPortInfo) Len() int { return len(r) }
|
||||
func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r byPortInfo) Less(i, j int) bool {
|
||||
if r[i].PrivatePort != r[j].PrivatePort {
|
||||
return r[i].PrivatePort < r[j].PrivatePort
|
||||
}
|
||||
|
||||
if r[i].IP != r[j].IP {
|
||||
return r[i].IP < r[j].IP
|
||||
}
|
||||
|
||||
if r[i].PublicPort != r[j].PublicPort {
|
||||
return r[i].PublicPort < r[j].PublicPort
|
||||
}
|
||||
|
||||
return r[i].Type < r[j].Type
|
||||
}
|
||||
|
||||
// DisplayablePorts returns formatted string representing open ports of container
|
||||
// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp"
|
||||
// it's used by command 'docker ps'
|
||||
func DisplayablePorts(ports []types.Port) string {
|
||||
type portGroup struct {
|
||||
first uint16
|
||||
last uint16
|
||||
}
|
||||
groupMap := make(map[string]*portGroup)
|
||||
var result []string
|
||||
var hostMappings []string
|
||||
var groupMapKeys []string
|
||||
sort.Sort(byPortInfo(ports))
|
||||
for _, port := range ports {
|
||||
current := port.PrivatePort
|
||||
portKey := port.Type
|
||||
if port.IP != "" {
|
||||
if port.PublicPort != current {
|
||||
hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
|
||||
continue
|
||||
}
|
||||
portKey = fmt.Sprintf("%s/%s", port.IP, port.Type)
|
||||
}
|
||||
group := groupMap[portKey]
|
||||
|
||||
if group == nil {
|
||||
groupMap[portKey] = &portGroup{first: current, last: current}
|
||||
// record order that groupMap keys are created
|
||||
groupMapKeys = append(groupMapKeys, portKey)
|
||||
continue
|
||||
}
|
||||
if current == (group.last + 1) {
|
||||
group.last = current
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, formGroup(portKey, group.first, group.last))
|
||||
groupMap[portKey] = &portGroup{first: current, last: current}
|
||||
}
|
||||
for _, portKey := range groupMapKeys {
|
||||
g := groupMap[portKey]
|
||||
result = append(result, formGroup(portKey, g.first, g.last))
|
||||
}
|
||||
result = append(result, hostMappings...)
|
||||
return strings.Join(result, ", ")
|
||||
}
|
||||
|
||||
func formGroup(key string, start, last uint16) string {
|
||||
parts := strings.Split(key, "/")
|
||||
groupType := parts[0]
|
||||
var ip string
|
||||
if len(parts) > 1 {
|
||||
ip = parts[0]
|
||||
groupType = parts[1]
|
||||
}
|
||||
group := strconv.Itoa(int(start))
|
||||
if start != last {
|
||||
group = fmt.Sprintf("%s-%d", group, last)
|
||||
}
|
||||
if ip != "" {
|
||||
group = fmt.Sprintf("%s:%s->%s", ip, group, group)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s", group, groupType)
|
||||
}
|
||||
|
||||
// MatchesContentType validates the content type against the expected one
|
||||
func MatchesContentType(contentType, expectedType string) bool {
|
||||
mimetype, _, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error parsing media type: %s error: %v", contentType, err)
|
||||
}
|
||||
return err == nil && mimetype == expectedType
|
||||
}
|
||||
|
||||
// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
|
||||
// otherwise generates a new one
|
||||
func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
|
||||
err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
|
||||
if err == libtrust.ErrKeyFileDoesNotExist {
|
||||
trustKey, err = libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error generating key: %s", err)
|
||||
}
|
||||
encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error serializing key: %s", err)
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
|
||||
return nil, fmt.Errorf("Error saving key file: %s", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
|
||||
}
|
||||
return trustKey, nil
|
||||
}
|
||||
|
||||
func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
|
||||
if ext == ".json" || ext == ".jwk" {
|
||||
encoded, err = json.Marshal(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
|
||||
}
|
||||
} else {
|
||||
pemBlock, err := key.PEMBlock()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
|
||||
}
|
||||
encoded = pem.EncodeToMemory(pemBlock)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
9
vendor/github.com/docker/docker/api/names.go
generated
vendored
9
vendor/github.com/docker/docker/api/names.go
generated
vendored
|
@ -1,9 +0,0 @@
|
|||
package api
|
||||
|
||||
import "regexp"
|
||||
|
||||
// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
|
||||
const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
|
||||
|
||||
// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
|
||||
var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
|
15
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
15
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/go-units"
|
||||
units "github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
|
||||
|
@ -178,6 +178,8 @@ type ImageBuildOptions struct {
|
|||
SecurityOpt []string
|
||||
ExtraHosts []string // List of extra hosts
|
||||
Target string
|
||||
SessionID string
|
||||
Platform string
|
||||
}
|
||||
|
||||
// ImageBuildResponse holds information
|
||||
|
@ -190,7 +192,8 @@ type ImageBuildResponse struct {
|
|||
|
||||
// ImageCreateOptions holds information to create images.
|
||||
type ImageCreateOptions struct {
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
|
||||
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
|
||||
}
|
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
|
@ -201,9 +204,10 @@ type ImageImportSource struct {
|
|||
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
type ImageImportOptions struct {
|
||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||
Message string // Message is the message to tag the image with
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||
Message string // Message is the message to tag the image with
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
Platform string // Platform is the target platform of the image
|
||||
}
|
||||
|
||||
// ImageListOptions holds parameters to filter the list of images with.
|
||||
|
@ -224,6 +228,7 @@ type ImagePullOptions struct {
|
|||
All bool
|
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc
|
||||
Platform string
|
||||
}
|
||||
|
||||
// RequestPrivilegeFunc is a function interface that
|
||||
|
|
39
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
39
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
|
@ -23,41 +23,46 @@ func (i Isolation) IsDefault() bool {
|
|||
// IpcMode represents the container ipc stack.
|
||||
type IpcMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its private ipc stack.
|
||||
// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
|
||||
func (n IpcMode) IsPrivate() bool {
|
||||
return !(n.IsHost() || n.IsContainer())
|
||||
return n == "private"
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container uses the host's ipc stack.
|
||||
// IsHost indicates whether the container shares the host's ipc namespace.
|
||||
func (n IpcMode) IsHost() bool {
|
||||
return n == "host"
|
||||
}
|
||||
|
||||
// IsContainer indicates whether the container uses a container's ipc stack.
|
||||
// IsShareable indicates whether the container's ipc namespace can be shared with another container.
|
||||
func (n IpcMode) IsShareable() bool {
|
||||
return n == "shareable"
|
||||
}
|
||||
|
||||
// IsContainer indicates whether the container uses another container's ipc namespace.
|
||||
func (n IpcMode) IsContainer() bool {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
return len(parts) > 1 && parts[0] == "container"
|
||||
}
|
||||
|
||||
// Valid indicates whether the ipc stack is valid.
|
||||
// IsNone indicates whether container IpcMode is set to "none".
|
||||
func (n IpcMode) IsNone() bool {
|
||||
return n == "none"
|
||||
}
|
||||
|
||||
// IsEmpty indicates whether container IpcMode is empty
|
||||
func (n IpcMode) IsEmpty() bool {
|
||||
return n == ""
|
||||
}
|
||||
|
||||
// Valid indicates whether the ipc mode is valid.
|
||||
func (n IpcMode) Valid() bool {
|
||||
parts := strings.Split(string(n), ":")
|
||||
switch mode := parts[0]; mode {
|
||||
case "", "host":
|
||||
case "container":
|
||||
if len(parts) != 2 || parts[1] == "" {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer()
|
||||
}
|
||||
|
||||
// Container returns the name of the container ipc stack is going to be used.
|
||||
func (n IpcMode) Container() string {
|
||||
parts := strings.SplitN(string(n), ":", 2)
|
||||
if len(parts) > 1 {
|
||||
if len(parts) > 1 && parts[0] == "container" {
|
||||
return parts[1]
|
||||
}
|
||||
return ""
|
||||
|
|
2
vendor/github.com/docker/docker/api/types/events/events.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/events/events.go
generated
vendored
|
@ -19,6 +19,8 @@ const (
|
|||
NodeEventType = "node"
|
||||
// SecretEventType is the event type that secrets generate
|
||||
SecretEventType = "secret"
|
||||
// ConfigEventType is the event type that configs generate
|
||||
ConfigEventType = "config"
|
||||
)
|
||||
|
||||
// Actor describes something that generates events,
|
||||
|
|
280
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
280
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
|
@ -1,38 +1,45 @@
|
|||
// Package filters provides helper function to parse and handle command line
|
||||
// filter, used for example in docker ps or docker images commands.
|
||||
/*Package filters provides tools for encoding a mapping of keys to a set of
|
||||
multiple values.
|
||||
*/
|
||||
package filters
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
)
|
||||
|
||||
// Args stores filter arguments as map key:{map key: bool}.
|
||||
// It contains an aggregation of the map of arguments (which are in the form
|
||||
// of -f 'key=value') based on the key, and stores values for the same key
|
||||
// in a map with string keys and boolean values.
|
||||
// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
|
||||
// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
|
||||
// Args stores a mapping of keys to a set of multiple values.
|
||||
type Args struct {
|
||||
fields map[string]map[string]bool
|
||||
}
|
||||
|
||||
// NewArgs initializes a new Args struct.
|
||||
func NewArgs() Args {
|
||||
return Args{fields: map[string]map[string]bool{}}
|
||||
// KeyValuePair are used to initialize a new Args
|
||||
type KeyValuePair struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
// ParseFlag parses the argument to the filter flag. Like
|
||||
// Arg creates a new KeyValuePair for initializing Args
|
||||
func Arg(key, value string) KeyValuePair {
|
||||
return KeyValuePair{Key: key, Value: value}
|
||||
}
|
||||
|
||||
// NewArgs returns a new Args populated with the initial args
|
||||
func NewArgs(initialArgs ...KeyValuePair) Args {
|
||||
args := Args{fields: map[string]map[string]bool{}}
|
||||
for _, arg := range initialArgs {
|
||||
args.Add(arg.Key, arg.Value)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// ParseFlag parses a key=value string and adds it to an Args.
|
||||
//
|
||||
// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
|
||||
//
|
||||
// If prev map is provided, then it is appended to, and returned. By default a new
|
||||
// map is created.
|
||||
// Deprecated: Use Args.Add()
|
||||
func ParseFlag(arg string, prev Args) (Args, error) {
|
||||
filters := prev
|
||||
if len(arg) == 0 {
|
||||
|
@ -53,74 +60,95 @@ func ParseFlag(arg string, prev Args) (Args, error) {
|
|||
return filters, nil
|
||||
}
|
||||
|
||||
// ErrBadFormat is an error returned in case of bad format for a filter.
|
||||
// ErrBadFormat is an error returned when a filter is not in the form key=value
|
||||
//
|
||||
// Deprecated: this error will be removed in a future version
|
||||
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
|
||||
|
||||
// ToParam packs the Args into a string for easy transport from client to server.
|
||||
// ToParam encodes the Args as args JSON encoded string
|
||||
//
|
||||
// Deprecated: use ToJSON
|
||||
func ToParam(a Args) (string, error) {
|
||||
// this way we don't URL encode {}, just empty space
|
||||
return ToJSON(a)
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON byte representation of the Args
|
||||
func (args Args) MarshalJSON() ([]byte, error) {
|
||||
if len(args.fields) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
return json.Marshal(args.fields)
|
||||
}
|
||||
|
||||
// ToJSON returns the Args as a JSON encoded string
|
||||
func ToJSON(a Args) (string, error) {
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(a.fields)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
buf, err := json.Marshal(a)
|
||||
return string(buf), err
|
||||
}
|
||||
|
||||
// ToParamWithVersion packs the Args into a string for easy transport from client to server.
|
||||
// The generated string will depend on the specified version (corresponding to the API version).
|
||||
// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
|
||||
// then the encoded format will use an older legacy format where the values are a
|
||||
// list of strings, instead of a set.
|
||||
//
|
||||
// Deprecated: Use ToJSON
|
||||
func ToParamWithVersion(version string, a Args) (string, error) {
|
||||
// this way we don't URL encode {}, just empty space
|
||||
if a.Len() == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// for daemons older than v1.10, filter must be of the form map[string][]string
|
||||
var buf []byte
|
||||
var err error
|
||||
if version != "" && versions.LessThan(version, "1.22") {
|
||||
buf, err = json.Marshal(convertArgsToSlice(a.fields))
|
||||
} else {
|
||||
buf, err = json.Marshal(a.fields)
|
||||
buf, err := json.Marshal(convertArgsToSlice(a.fields))
|
||||
return string(buf), err
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
|
||||
return ToJSON(a)
|
||||
}
|
||||
|
||||
// FromParam unpacks the filter Args.
|
||||
// FromParam decodes a JSON encoded string into Args
|
||||
//
|
||||
// Deprecated: use FromJSON
|
||||
func FromParam(p string) (Args, error) {
|
||||
if len(p) == 0 {
|
||||
return NewArgs(), nil
|
||||
}
|
||||
|
||||
r := strings.NewReader(p)
|
||||
d := json.NewDecoder(r)
|
||||
|
||||
m := map[string]map[string]bool{}
|
||||
if err := d.Decode(&m); err != nil {
|
||||
r.Seek(0, 0)
|
||||
|
||||
// Allow parsing old arguments in slice format.
|
||||
// Because other libraries might be sending them in this format.
|
||||
deprecated := map[string][]string{}
|
||||
if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
|
||||
m = deprecatedArgs(deprecated)
|
||||
} else {
|
||||
return NewArgs(), err
|
||||
}
|
||||
}
|
||||
return Args{m}, nil
|
||||
return FromJSON(p)
|
||||
}
|
||||
|
||||
// Get returns the list of values associates with a field.
|
||||
// It returns a slice of strings to keep backwards compatibility with old code.
|
||||
func (filters Args) Get(field string) []string {
|
||||
values := filters.fields[field]
|
||||
// FromJSON decodes a JSON encoded string into Args
|
||||
func FromJSON(p string) (Args, error) {
|
||||
args := NewArgs()
|
||||
|
||||
if p == "" {
|
||||
return args, nil
|
||||
}
|
||||
|
||||
raw := []byte(p)
|
||||
err := json.Unmarshal(raw, &args)
|
||||
if err == nil {
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// Fallback to parsing arguments in the legacy slice format
|
||||
deprecated := map[string][]string{}
|
||||
if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
|
||||
return args, err
|
||||
}
|
||||
|
||||
args.fields = deprecatedArgs(deprecated)
|
||||
return args, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON populates the Args from JSON encode bytes
|
||||
func (args Args) UnmarshalJSON(raw []byte) error {
|
||||
if len(raw) == 0 {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal(raw, &args.fields)
|
||||
}
|
||||
|
||||
// Get returns the list of values associated with the key
|
||||
func (args Args) Get(key string) []string {
|
||||
values := args.fields[key]
|
||||
if values == nil {
|
||||
return make([]string, 0)
|
||||
}
|
||||
|
@ -131,37 +159,34 @@ func (filters Args) Get(field string) []string {
|
|||
return slice
|
||||
}
|
||||
|
||||
// Add adds a new value to a filter field.
|
||||
func (filters Args) Add(name, value string) {
|
||||
if _, ok := filters.fields[name]; ok {
|
||||
filters.fields[name][value] = true
|
||||
// Add a new value to the set of values
|
||||
func (args Args) Add(key, value string) {
|
||||
if _, ok := args.fields[key]; ok {
|
||||
args.fields[key][value] = true
|
||||
} else {
|
||||
filters.fields[name] = map[string]bool{value: true}
|
||||
args.fields[key] = map[string]bool{value: true}
|
||||
}
|
||||
}
|
||||
|
||||
// Del removes a value from a filter field.
|
||||
func (filters Args) Del(name, value string) {
|
||||
if _, ok := filters.fields[name]; ok {
|
||||
delete(filters.fields[name], value)
|
||||
if len(filters.fields[name]) == 0 {
|
||||
delete(filters.fields, name)
|
||||
// Del removes a value from the set
|
||||
func (args Args) Del(key, value string) {
|
||||
if _, ok := args.fields[key]; ok {
|
||||
delete(args.fields[key], value)
|
||||
if len(args.fields[key]) == 0 {
|
||||
delete(args.fields, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of fields in the arguments.
|
||||
func (filters Args) Len() int {
|
||||
return len(filters.fields)
|
||||
// Len returns the number of keys in the mapping
|
||||
func (args Args) Len() int {
|
||||
return len(args.fields)
|
||||
}
|
||||
|
||||
// MatchKVList returns true if the values for the specified field matches the ones
|
||||
// from the sources.
|
||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||
// field is 'label' and sources are {'label1': '1', 'label2': '2'}
|
||||
// it returns true.
|
||||
func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
||||
fieldValues := filters.fields[field]
|
||||
// MatchKVList returns true if all the pairs in sources exist as key=value
|
||||
// pairs in the mapping at key, or if there are no values at key.
|
||||
func (args Args) MatchKVList(key string, sources map[string]string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
|
@ -172,8 +197,8 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
for name2match := range fieldValues {
|
||||
testKV := strings.SplitN(name2match, "=", 2)
|
||||
for value := range fieldValues {
|
||||
testKV := strings.SplitN(value, "=", 2)
|
||||
|
||||
v, ok := sources[testKV[0]]
|
||||
if !ok {
|
||||
|
@ -187,16 +212,13 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// Match returns true if the values for the specified field matches the source string
|
||||
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||
// field is 'image.name' and source is 'ubuntu'
|
||||
// it returns true.
|
||||
func (filters Args) Match(field, source string) bool {
|
||||
if filters.ExactMatch(field, source) {
|
||||
// Match returns true if any of the values at key match the source string
|
||||
func (args Args) Match(field, source string) bool {
|
||||
if args.ExactMatch(field, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := filters.fields[field]
|
||||
fieldValues := args.fields[field]
|
||||
for name2match := range fieldValues {
|
||||
match, err := regexp.MatchString(name2match, source)
|
||||
if err != nil {
|
||||
|
@ -209,9 +231,9 @@ func (filters Args) Match(field, source string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// ExactMatch returns true if the source matches exactly one of the filters.
|
||||
func (filters Args) ExactMatch(field, source string) bool {
|
||||
fieldValues, ok := filters.fields[field]
|
||||
// ExactMatch returns true if the source matches exactly one of the values.
|
||||
func (args Args) ExactMatch(key, source string) bool {
|
||||
fieldValues, ok := args.fields[key]
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if !ok || len(fieldValues) == 0 {
|
||||
return true
|
||||
|
@ -221,14 +243,15 @@ func (filters Args) ExactMatch(field, source string) bool {
|
|||
return fieldValues[source]
|
||||
}
|
||||
|
||||
// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
|
||||
func (filters Args) UniqueExactMatch(field, source string) bool {
|
||||
fieldValues := filters.fields[field]
|
||||
// UniqueExactMatch returns true if there is only one value and the source
|
||||
// matches exactly the value.
|
||||
func (args Args) UniqueExactMatch(key, source string) bool {
|
||||
fieldValues := args.fields[key]
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 {
|
||||
return true
|
||||
}
|
||||
if len(filters.fields[field]) != 1 {
|
||||
if len(args.fields[key]) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -236,14 +259,14 @@ func (filters Args) UniqueExactMatch(field, source string) bool {
|
|||
return fieldValues[source]
|
||||
}
|
||||
|
||||
// FuzzyMatch returns true if the source matches exactly one of the filters,
|
||||
// or the source has one of the filters as a prefix.
|
||||
func (filters Args) FuzzyMatch(field, source string) bool {
|
||||
if filters.ExactMatch(field, source) {
|
||||
// FuzzyMatch returns true if the source matches exactly one value, or the
|
||||
// source has one of the values as a prefix.
|
||||
func (args Args) FuzzyMatch(key, source string) bool {
|
||||
if args.ExactMatch(key, source) {
|
||||
return true
|
||||
}
|
||||
|
||||
fieldValues := filters.fields[field]
|
||||
fieldValues := args.fields[key]
|
||||
for prefix := range fieldValues {
|
||||
if strings.HasPrefix(source, prefix) {
|
||||
return true
|
||||
|
@ -252,30 +275,47 @@ func (filters Args) FuzzyMatch(field, source string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// Include returns true if the name of the field to filter is in the filters.
|
||||
func (filters Args) Include(field string) bool {
|
||||
_, ok := filters.fields[field]
|
||||
// Include returns true if the key exists in the mapping
|
||||
//
|
||||
// Deprecated: use Contains
|
||||
func (args Args) Include(field string) bool {
|
||||
_, ok := args.fields[field]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Validate ensures that all the fields in the filter are valid.
|
||||
// It returns an error as soon as it finds an invalid field.
|
||||
func (filters Args) Validate(accepted map[string]bool) error {
|
||||
for name := range filters.fields {
|
||||
// Contains returns true if the key exists in the mapping
|
||||
func (args Args) Contains(field string) bool {
|
||||
_, ok := args.fields[field]
|
||||
return ok
|
||||
}
|
||||
|
||||
type invalidFilter string
|
||||
|
||||
func (e invalidFilter) Error() string {
|
||||
return "Invalid filter '" + string(e) + "'"
|
||||
}
|
||||
|
||||
func (invalidFilter) InvalidParameter() {}
|
||||
|
||||
// Validate compared the set of accepted keys against the keys in the mapping.
|
||||
// An error is returned if any mapping keys are not in the accepted set.
|
||||
func (args Args) Validate(accepted map[string]bool) error {
|
||||
for name := range args.fields {
|
||||
if !accepted[name] {
|
||||
return fmt.Errorf("Invalid filter '%s'", name)
|
||||
return invalidFilter(name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WalkValues iterates over the list of filtered values for a field.
|
||||
// It stops the iteration if it finds an error and it returns that error.
|
||||
func (filters Args) WalkValues(field string, op func(value string) error) error {
|
||||
if _, ok := filters.fields[field]; !ok {
|
||||
// WalkValues iterates over the list of values for a key in the mapping and calls
|
||||
// op() for each value. If op returns an error the iteration stops and the
|
||||
// error is returned.
|
||||
func (args Args) WalkValues(field string, op func(value string) error) error {
|
||||
if _, ok := args.fields[field]; !ok {
|
||||
return nil
|
||||
}
|
||||
for v := range filters.fields[field] {
|
||||
for v := range args.fields[field] {
|
||||
if err := op(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
4
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
4
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
|
@ -15,6 +15,8 @@ const (
|
|||
TypeVolume Type = "volume"
|
||||
// TypeTmpfs is the type for mounting tmpfs
|
||||
TypeTmpfs Type = "tmpfs"
|
||||
// TypeNamedPipe is the type for mounting Windows named pipes
|
||||
TypeNamedPipe Type = "npipe"
|
||||
)
|
||||
|
||||
// Mount represents a mount (volume).
|
||||
|
@ -65,7 +67,7 @@ var Propagations = []Propagation{
|
|||
type Consistency string
|
||||
|
||||
const (
|
||||
// ConsistencyFull guarantees bind-mount-like consistency
|
||||
// ConsistencyFull guarantees bind mount-like consistency
|
||||
ConsistencyFull Consistency = "consistent"
|
||||
// ConsistencyCached mounts can cache read data and FS structure
|
||||
ConsistencyCached Consistency = "cached"
|
||||
|
|
2
vendor/github.com/docker/docker/api/types/plugin.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/plugin.go
generated
vendored
|
@ -11,7 +11,7 @@ type Plugin struct {
|
|||
// Required: true
|
||||
Config PluginConfig `json:"Config"`
|
||||
|
||||
// True when the plugin is running. False when the plugin is not running, only installed.
|
||||
// True if the plugin is running. False if the plugin is not running, only installed.
|
||||
// Required: true
|
||||
Enabled bool `json:"Enabled"`
|
||||
|
||||
|
|
8
vendor/github.com/docker/docker/api/types/plugin_responses.go
generated
vendored
8
vendor/github.com/docker/docker/api/types/plugin_responses.go
generated
vendored
|
@ -9,14 +9,6 @@ import (
|
|||
// PluginsListResponse contains the response for the Engine API
|
||||
type PluginsListResponse []*Plugin
|
||||
|
||||
const (
|
||||
authzDriver = "AuthzDriver"
|
||||
graphDriver = "GraphDriver"
|
||||
ipamDriver = "IpamDriver"
|
||||
networkDriver = "NetworkDriver"
|
||||
volumeDriver = "VolumeDriver"
|
||||
)
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
|
||||
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
|
||||
versionIndex := len(p)
|
||||
|
|
2
vendor/github.com/docker/docker/api/types/swarm/common.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/swarm/common.go
generated
vendored
|
@ -20,7 +20,7 @@ type Annotations struct {
|
|||
Labels map[string]string `json:"Labels"`
|
||||
}
|
||||
|
||||
// Driver represents a driver (network, logging).
|
||||
// Driver represents a driver (network, logging, secrets backend).
|
||||
type Driver struct {
|
||||
Name string `json:",omitempty"`
|
||||
Options map[string]string `json:",omitempty"`
|
||||
|
|
3
vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
generated
vendored
Normal file
3
vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
|
||||
|
||||
package runtime
|
712
vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
generated
vendored
Normal file
712
vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,712 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: plugin.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package runtime is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
plugin.proto
|
||||
|
||||
It has these top-level messages:
|
||||
PluginSpec
|
||||
PluginPrivilege
|
||||
*/
|
||||
package runtime
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// PluginSpec defines the base payload which clients can specify for creating
|
||||
// a service with the plugin runtime.
|
||||
type PluginSpec struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"`
|
||||
Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"`
|
||||
Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PluginSpec) Reset() { *m = PluginSpec{} }
|
||||
func (m *PluginSpec) String() string { return proto.CompactTextString(m) }
|
||||
func (*PluginSpec) ProtoMessage() {}
|
||||
func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} }
|
||||
|
||||
func (m *PluginSpec) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PluginSpec) GetRemote() string {
|
||||
if m != nil {
|
||||
return m.Remote
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PluginSpec) GetPrivileges() []*PluginPrivilege {
|
||||
if m != nil {
|
||||
return m.Privileges
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PluginSpec) GetDisabled() bool {
|
||||
if m != nil {
|
||||
return m.Disabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PluginPrivilege describes a permission the user has to accept
|
||||
// upon installing a plugin.
|
||||
type PluginPrivilege struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
|
||||
Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} }
|
||||
func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) }
|
||||
func (*PluginPrivilege) ProtoMessage() {}
|
||||
func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} }
|
||||
|
||||
func (m *PluginPrivilege) GetName() string {
|
||||
if m != nil {
|
||||
return m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) GetDescription() string {
|
||||
if m != nil {
|
||||
return m.Description
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) GetValue() []string {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*PluginSpec)(nil), "PluginSpec")
|
||||
proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege")
|
||||
}
|
||||
func (m *PluginSpec) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.Remote) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote)))
|
||||
i += copy(dAtA[i:], m.Remote)
|
||||
}
|
||||
if len(m.Privileges) > 0 {
|
||||
for _, msg := range m.Privileges {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if m.Disabled {
|
||||
dAtA[i] = 0x20
|
||||
i++
|
||||
if m.Disabled {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Name) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name)))
|
||||
i += copy(dAtA[i:], m.Name)
|
||||
}
|
||||
if len(m.Description) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description)))
|
||||
i += copy(dAtA[i:], m.Description)
|
||||
}
|
||||
if len(m.Value) > 0 {
|
||||
for _, s := range m.Value {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *PluginSpec) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
l = len(m.Remote)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
if len(m.Privileges) > 0 {
|
||||
for _, e := range m.Privileges {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Disabled {
|
||||
n += 2
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *PluginPrivilege) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Name)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
l = len(m.Description)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
if len(m.Value) > 0 {
|
||||
for _, s := range m.Value {
|
||||
l = len(s)
|
||||
n += 1 + l + sovPlugin(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovPlugin(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozPlugin(x uint64) (n int) {
|
||||
return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *PluginSpec) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Remote = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Privileges = append(m.Privileges, &PluginPrivilege{})
|
||||
if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Disabled = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *PluginPrivilege) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Name = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Description = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Value = append(m.Value, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipPlugin(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthPlugin
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipPlugin(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthPlugin
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowPlugin
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipPlugin(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) }
|
||||
|
||||
var fileDescriptorPlugin = []byte{
|
||||
// 196 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
|
||||
0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b,
|
||||
0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30,
|
||||
0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12,
|
||||
0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35,
|
||||
0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c,
|
||||
0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a,
|
||||
0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab,
|
||||
0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0,
|
||||
0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33,
|
||||
0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
|
||||
0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79,
|
||||
0x0c, 0x01, 0x00, 0x00,
|
||||
}
|
3
vendor/github.com/docker/docker/api/types/swarm/secret.go
generated
vendored
3
vendor/github.com/docker/docker/api/types/swarm/secret.go
generated
vendored
|
@ -12,7 +12,8 @@ type Secret struct {
|
|||
// SecretSpec represents a secret specification from a secret in swarm
|
||||
type SecretSpec struct {
|
||||
Annotations
|
||||
Data []byte `json:",omitempty"`
|
||||
Data []byte `json:",omitempty"`
|
||||
Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store
|
||||
}
|
||||
|
||||
// SecretReferenceFileTarget is a file target in a secret reference
|
||||
|
|
2
vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
|
@ -2,7 +2,7 @@ package swarm
|
|||
|
||||
import "time"
|
||||
|
||||
// ClusterInfo represents info about the cluster for outputing in "info"
|
||||
// ClusterInfo represents info about the cluster for outputting in "info"
|
||||
// it contains the same information as "Swarm", but without the JoinTokens
|
||||
type ClusterInfo struct {
|
||||
ID string
|
||||
|
|
46
vendor/github.com/docker/docker/api/types/swarm/task.go
generated
vendored
46
vendor/github.com/docker/docker/api/types/swarm/task.go
generated
vendored
|
@ -1,6 +1,10 @@
|
|||
package swarm
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/swarm/runtime"
|
||||
)
|
||||
|
||||
// TaskState represents the state of a task.
|
||||
type TaskState string
|
||||
|
@ -47,11 +51,16 @@ type Task struct {
|
|||
Status TaskStatus `json:",omitempty"`
|
||||
DesiredState TaskState `json:",omitempty"`
|
||||
NetworksAttachments []NetworkAttachment `json:",omitempty"`
|
||||
GenericResources []GenericResource `json:",omitempty"`
|
||||
}
|
||||
|
||||
// TaskSpec represents the spec of a task.
|
||||
type TaskSpec struct {
|
||||
ContainerSpec ContainerSpec `json:",omitempty"`
|
||||
// ContainerSpec and PluginSpec are mutually exclusive.
|
||||
// PluginSpec will only be used when the `Runtime` field is set to `plugin`
|
||||
ContainerSpec *ContainerSpec `json:",omitempty"`
|
||||
PluginSpec *runtime.PluginSpec `json:",omitempty"`
|
||||
|
||||
Resources *ResourceRequirements `json:",omitempty"`
|
||||
RestartPolicy *RestartPolicy `json:",omitempty"`
|
||||
Placement *Placement `json:",omitempty"`
|
||||
|
@ -67,15 +76,38 @@ type TaskSpec struct {
|
|||
ForceUpdate uint64
|
||||
|
||||
Runtime RuntimeType `json:",omitempty"`
|
||||
// TODO (ehazlett): this should be removed and instead
|
||||
// use struct tags (proto) for the runtimes
|
||||
RuntimeData []byte `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Resources represents resources (CPU/Memory).
|
||||
type Resources struct {
|
||||
NanoCPUs int64 `json:",omitempty"`
|
||||
MemoryBytes int64 `json:",omitempty"`
|
||||
NanoCPUs int64 `json:",omitempty"`
|
||||
MemoryBytes int64 `json:",omitempty"`
|
||||
GenericResources []GenericResource `json:",omitempty"`
|
||||
}
|
||||
|
||||
// GenericResource represents a "user defined" resource which can
|
||||
// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
|
||||
type GenericResource struct {
|
||||
NamedResourceSpec *NamedGenericResource `json:",omitempty"`
|
||||
DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NamedGenericResource represents a "user defined" resource which is defined
|
||||
// as a string.
|
||||
// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
|
||||
// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...)
|
||||
type NamedGenericResource struct {
|
||||
Kind string `json:",omitempty"`
|
||||
Value string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// DiscreteGenericResource represents a "user defined" resource which is defined
|
||||
// as an integer
|
||||
// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
|
||||
// Value is used to count the resource (SSD=5, HDD=3, ...)
|
||||
type DiscreteGenericResource struct {
|
||||
Kind string `json:",omitempty"`
|
||||
Value int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ResourceRequirements represents resources requirements.
|
||||
|
|
4
vendor/github.com/docker/docker/api/types/time/timestamp.go
generated
vendored
4
vendor/github.com/docker/docker/api/types/time/timestamp.go
generated
vendored
|
@ -29,10 +29,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
|||
}
|
||||
|
||||
var format string
|
||||
var parseInLocation bool
|
||||
|
||||
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
|
||||
parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
|
||||
parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
|
||||
|
||||
if strings.Contains(value, ".") {
|
||||
if parseInLocation {
|
||||
|
|
31
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
31
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
|
@ -45,6 +45,12 @@ type ImageInspect struct {
|
|||
VirtualSize int64
|
||||
GraphDriver GraphDriverData
|
||||
RootFS RootFS
|
||||
Metadata ImageMetadata
|
||||
}
|
||||
|
||||
// ImageMetadata contains engine-local data about the image
|
||||
type ImageMetadata struct {
|
||||
LastTagTime time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Container contains response of Engine API:
|
||||
|
@ -162,6 +168,7 @@ type Info struct {
|
|||
RegistryConfig *registry.ServiceConfig
|
||||
NCPU int
|
||||
MemTotal int64
|
||||
GenericResources []swarm.GenericResource
|
||||
DockerRootDir string
|
||||
HTTPProxy string `json:"HttpProxy"`
|
||||
HTTPSProxy string `json:"HttpsProxy"`
|
||||
|
@ -277,7 +284,7 @@ type Health struct {
|
|||
// ContainerState stores container's running state
|
||||
// it's part of ContainerJSONBase and will return by "inspect" command
|
||||
type ContainerState struct {
|
||||
Status string
|
||||
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
Running bool
|
||||
Paused bool
|
||||
Restarting bool
|
||||
|
@ -320,6 +327,7 @@ type ContainerJSONBase struct {
|
|||
Name string
|
||||
RestartCount int
|
||||
Driver string
|
||||
Platform string
|
||||
MountLabel string
|
||||
ProcessLabel string
|
||||
AppArmorProfile string
|
||||
|
@ -468,6 +476,12 @@ type NetworkDisconnect struct {
|
|||
Force bool
|
||||
}
|
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network
|
||||
type NetworkInspectOptions struct {
|
||||
Scope string
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
// Checkpoint represents the details of a checkpoint
|
||||
type Checkpoint struct {
|
||||
Name string // Name is the name of the checkpoint
|
||||
|
@ -482,10 +496,11 @@ type Runtime struct {
|
|||
// DiskUsage contains response of Engine API:
|
||||
// GET "/system/df"
|
||||
type DiskUsage struct {
|
||||
LayersSize int64
|
||||
Images []*ImageSummary
|
||||
Containers []*Container
|
||||
Volumes []*Volume
|
||||
LayersSize int64
|
||||
Images []*ImageSummary
|
||||
Containers []*Container
|
||||
Volumes []*Volume
|
||||
BuilderSize int64
|
||||
}
|
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
|
@ -509,6 +524,12 @@ type ImagesPruneReport struct {
|
|||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// BuildCachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
type BuildCachePruneReport struct {
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
type NetworksPruneReport struct {
|
||||
|
|
17
vendor/github.com/docker/docker/api/types/volume.go
generated
vendored
17
vendor/github.com/docker/docker/api/types/volume.go
generated
vendored
|
@ -7,6 +7,9 @@ package types
|
|||
// swagger:model Volume
|
||||
type Volume struct {
|
||||
|
||||
// Date/Time the volume was created.
|
||||
CreatedAt string `json:"CreatedAt,omitempty"`
|
||||
|
||||
// Name of the volume driver used by the volume.
|
||||
// Required: true
|
||||
Driver string `json:"Driver"`
|
||||
|
@ -44,15 +47,23 @@ type Volume struct {
|
|||
UsageData *VolumeUsageData `json:"UsageData,omitempty"`
|
||||
}
|
||||
|
||||
// VolumeUsageData volume usage data
|
||||
// VolumeUsageData Usage details about the volume. This information is used by the
|
||||
// `GET /system/df` endpoint, and omitted in other endpoints.
|
||||
//
|
||||
// swagger:model VolumeUsageData
|
||||
type VolumeUsageData struct {
|
||||
|
||||
// The number of containers referencing this volume.
|
||||
// The number of containers referencing this volume. This field
|
||||
// is set to `-1` if the reference-count is not available.
|
||||
//
|
||||
// Required: true
|
||||
RefCount int64 `json:"RefCount"`
|
||||
|
||||
// The disk space used by the volume (local driver only)
|
||||
// Amount of disk space used by the volume (in bytes). This information
|
||||
// is only available for volumes created with the `"local"` volume
|
||||
// driver. For volumes created with other volume drivers, this field
|
||||
// is set to `-1` ("not available")
|
||||
//
|
||||
// Required: true
|
||||
Size int64 `json:"Size"`
|
||||
}
|
||||
|
|
48
vendor/github.com/docker/docker/builder/builder.go
generated
vendored
48
vendor/github.com/docker/docker/builder/builder.go
generated
vendored
|
@ -7,12 +7,13 @@ package builder
|
|||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -24,7 +25,7 @@ const (
|
|||
// instructions in the builder.
|
||||
type Source interface {
|
||||
// Root returns root path for accessing source
|
||||
Root() string
|
||||
Root() containerfs.ContainerFS
|
||||
// Close allows to signal that the filesystem tree won't be used anymore.
|
||||
// For Context implementations using a temporary directory, it is recommended to
|
||||
// delete the temporary directory in Close().
|
||||
|
@ -36,34 +37,37 @@ type Source interface {
|
|||
// Backend abstracts calls to a Docker Daemon.
|
||||
type Backend interface {
|
||||
ImageBackend
|
||||
ExecBackend
|
||||
|
||||
// Commit creates a new Docker image from an existing Docker container.
|
||||
Commit(string, *backend.ContainerCommitConfig) (string, error)
|
||||
// ContainerCreateWorkdir creates the workdir
|
||||
ContainerCreateWorkdir(containerID string) error
|
||||
|
||||
CreateImage(config []byte, parent string, platform string) (Image, error)
|
||||
|
||||
ImageCacheBuilder
|
||||
}
|
||||
|
||||
// ImageBackend are the interface methods required from an image component
|
||||
type ImageBackend interface {
|
||||
GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error)
|
||||
}
|
||||
|
||||
// ExecBackend contains the interface methods required for executing containers
|
||||
type ExecBackend interface {
|
||||
// ContainerAttachRaw attaches to container.
|
||||
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error
|
||||
// ContainerCreate creates a new Docker container and returns potential warnings
|
||||
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
|
||||
// ContainerRm removes a container specified by `id`.
|
||||
ContainerRm(name string, config *types.ContainerRmConfig) error
|
||||
// Commit creates a new Docker image from an existing Docker container.
|
||||
Commit(string, *backend.ContainerCommitConfig) (string, error)
|
||||
// ContainerKill stops the container execution abruptly.
|
||||
ContainerKill(containerID string, sig uint64) error
|
||||
// ContainerStart starts a new container
|
||||
ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
// ContainerWait stops processing until the given container is stopped.
|
||||
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
||||
// ContainerCreateWorkdir creates the workdir
|
||||
ContainerCreateWorkdir(containerID string) error
|
||||
|
||||
// ContainerCopy copies/extracts a source FileInfo to a destination path inside a container
|
||||
// specified by a container object.
|
||||
// TODO: extract in the builder instead of passing `decompress`
|
||||
// TODO: use containerd/fs.changestream instead as a source
|
||||
CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error
|
||||
}
|
||||
|
||||
// ImageBackend are the interface methods required from an image component
|
||||
type ImageBackend interface {
|
||||
GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error)
|
||||
}
|
||||
|
||||
// Result is the output produced by a Builder
|
||||
|
@ -75,7 +79,7 @@ type Result struct {
|
|||
// ImageCacheBuilder represents a generator for stateful image cache.
|
||||
type ImageCacheBuilder interface {
|
||||
// MakeImageCache creates a stateful image cache.
|
||||
MakeImageCache(cacheFrom []string) ImageCache
|
||||
MakeImageCache(cacheFrom []string, platform string) ImageCache
|
||||
}
|
||||
|
||||
// ImageCache abstracts an image cache.
|
||||
|
@ -90,10 +94,14 @@ type ImageCache interface {
|
|||
type Image interface {
|
||||
ImageID() string
|
||||
RunConfig() *container.Config
|
||||
MarshalJSON() ([]byte, error)
|
||||
OperatingSystem() string
|
||||
}
|
||||
|
||||
// ReleaseableLayer is an image layer that can be mounted and released
|
||||
type ReleaseableLayer interface {
|
||||
Release() error
|
||||
Mount() (string, error)
|
||||
Mount() (containerfs.ContainerFS, error)
|
||||
Commit(platform string) (ReleaseableLayer, error)
|
||||
DiffID() layer.DiffID
|
||||
}
|
||||
|
|
19
vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go
generated
vendored
19
vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go
generated
vendored
|
@ -38,8 +38,23 @@ func ReadAll(reader io.Reader) ([]string, error) {
|
|||
if pattern == "" {
|
||||
continue
|
||||
}
|
||||
pattern = filepath.Clean(pattern)
|
||||
pattern = filepath.ToSlash(pattern)
|
||||
// normalize absolute paths to paths relative to the context
|
||||
// (taking care of '!' prefix)
|
||||
invert := pattern[0] == '!'
|
||||
if invert {
|
||||
pattern = strings.TrimSpace(pattern[1:])
|
||||
}
|
||||
if len(pattern) > 0 {
|
||||
pattern = filepath.Clean(pattern)
|
||||
pattern = filepath.ToSlash(pattern)
|
||||
if len(pattern) > 1 && pattern[0] == '/' {
|
||||
pattern = pattern[1:]
|
||||
}
|
||||
}
|
||||
if invert {
|
||||
pattern = "!" + pattern
|
||||
}
|
||||
|
||||
excludes = append(excludes, pattern)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
|
|
129
vendor/github.com/docker/docker/builder/remotecontext/archive.go
generated
vendored
Normal file
129
vendor/github.com/docker/docker/builder/remotecontext/archive.go
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type archiveContext struct {
|
||||
root containerfs.ContainerFS
|
||||
sums tarsum.FileInfoSums
|
||||
}
|
||||
|
||||
func (c *archiveContext) Close() error {
|
||||
return c.root.RemoveAll(c.root.Path())
|
||||
}
|
||||
|
||||
func convertPathError(err error, cleanpath string) error {
|
||||
if err, ok := err.(*os.PathError); ok {
|
||||
err.Path = cleanpath
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type modifiableContext interface {
|
||||
builder.Source
|
||||
// Remove deletes the entry specified by `path`.
|
||||
// It is usual for directory entries to delete all its subentries.
|
||||
Remove(path string) error
|
||||
}
|
||||
|
||||
// FromArchive returns a build source from a tar stream.
|
||||
//
|
||||
// It extracts the tar stream to a temporary folder that is deleted as soon as
|
||||
// the Context is closed.
|
||||
// As the extraction happens, a tarsum is calculated for every file, and the set of
|
||||
// all those sums then becomes the source of truth for all operations on this Context.
|
||||
//
|
||||
// Closing tarStream has to be done by the caller.
|
||||
func FromArchive(tarStream io.Reader) (builder.Source, error) {
|
||||
root, err := ioutils.TempDir("", "docker-builder")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Assume local file system. Since it's coming from a tar file.
|
||||
tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)}
|
||||
|
||||
// Make sure we clean-up upon error. In the happy case the caller
|
||||
// is expected to manage the clean-up
|
||||
defer func() {
|
||||
if err != nil {
|
||||
tsc.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
decompressedStream, err := archive.DecompressStream(tarStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = chrootarchive.Untar(sum, root, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tsc.sums = sum.GetSums()
|
||||
|
||||
return tsc, nil
|
||||
}
|
||||
|
||||
func (c *archiveContext) Root() containerfs.ContainerFS {
|
||||
return c.root
|
||||
}
|
||||
|
||||
func (c *archiveContext) Remove(path string) error {
|
||||
_, fullpath, err := normalize(path, c.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.root.RemoveAll(fullpath)
|
||||
}
|
||||
|
||||
func (c *archiveContext) Hash(path string) (string, error) {
|
||||
cleanpath, fullpath, err := normalize(path, c.root)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
rel, err := c.root.Rel(c.root.Path(), fullpath)
|
||||
if err != nil {
|
||||
return "", convertPathError(err, cleanpath)
|
||||
}
|
||||
|
||||
// Use the checksum of the followed path(not the possible symlink) because
|
||||
// this is the file that is actually copied.
|
||||
if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil {
|
||||
return tsInfo.Sum(), nil
|
||||
}
|
||||
// We set sum to path by default for the case where GetFile returns nil.
|
||||
// The usual case is if relative path is empty.
|
||||
return path, nil // backwards compat TODO: see if really needed
|
||||
}
|
||||
|
||||
func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) {
|
||||
cleanPath = root.Clean(string(root.Separator()) + path)[1:]
|
||||
fullPath, err = root.ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath)
|
||||
}
|
||||
if _, err := root.Lstat(fullPath); err != nil {
|
||||
return "", "", errors.WithStack(convertPathError(err, path))
|
||||
}
|
||||
return
|
||||
}
|
183
vendor/github.com/docker/docker/builder/remotecontext/detect.go
generated
vendored
Normal file
183
vendor/github.com/docker/docker/builder/remotecontext/detect.go
generated
vendored
Normal file
|
@ -0,0 +1,183 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/dockerignore"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ClientSessionRemote is identifier for client-session context transport
|
||||
const ClientSessionRemote = "client-session"
|
||||
|
||||
// Detect returns a context and dockerfile from remote location or local
|
||||
// archive. progressReader is only used if remoteURL is actually a URL
|
||||
// (not empty, and not a Git endpoint).
|
||||
func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) {
|
||||
remoteURL := config.Options.RemoteContext
|
||||
dockerfilePath := config.Options.Dockerfile
|
||||
|
||||
switch {
|
||||
case remoteURL == "":
|
||||
remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath)
|
||||
case remoteURL == ClientSessionRemote:
|
||||
res, err := parser.Parse(config.Source)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, res, nil
|
||||
case urlutil.IsGitURL(remoteURL):
|
||||
remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)
|
||||
case urlutil.IsURL(remoteURL):
|
||||
remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc)
|
||||
default:
|
||||
err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) {
|
||||
defer rc.Close()
|
||||
c, err := FromArchive(rc)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return withDockerfileFromContext(c.(modifiableContext), dockerfilePath)
|
||||
}
|
||||
|
||||
func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) {
|
||||
df, err := openAt(c, dockerfilePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if dockerfilePath == builder.DefaultDockerfileName {
|
||||
lowercase := strings.ToLower(dockerfilePath)
|
||||
if _, err := StatAt(c, lowercase); err == nil {
|
||||
return withDockerfileFromContext(c, lowercase)
|
||||
}
|
||||
}
|
||||
return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error
|
||||
}
|
||||
c.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
res, err := readAndParseDockerfile(dockerfilePath, df)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
df.Close()
|
||||
|
||||
if err := removeDockerfile(c, dockerfilePath); err != nil {
|
||||
c.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return c, res, nil
|
||||
}
|
||||
|
||||
func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) {
|
||||
c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return withDockerfileFromContext(c.(modifiableContext), dockerfilePath)
|
||||
}
|
||||
|
||||
func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) {
|
||||
var dockerfile io.ReadCloser
|
||||
dockerfileFoundErr := errors.New("found-dockerfile")
|
||||
c, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){
|
||||
mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
dockerfile = rc
|
||||
return nil, dockerfileFoundErr
|
||||
},
|
||||
// fallback handler (tar context)
|
||||
"": func(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
return progressReader(rc), nil
|
||||
},
|
||||
})
|
||||
switch {
|
||||
case err == dockerfileFoundErr:
|
||||
res, err := parser.Parse(dockerfile)
|
||||
return nil, res, err
|
||||
case err != nil:
|
||||
return nil, nil, err
|
||||
}
|
||||
return withDockerfileFromContext(c.(modifiableContext), dockerfilePath)
|
||||
}
|
||||
|
||||
func removeDockerfile(c modifiableContext, filesToRemove ...string) error {
|
||||
f, err := openAt(c, ".dockerignore")
|
||||
// Note that a missing .dockerignore file isn't treated as an error
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
excludes, err := dockerignore.ReadAll(f)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
|
||||
for _, fileToRemove := range filesToRemove {
|
||||
if rm, _ := fileutils.Matches(fileToRemove, excludes); rm {
|
||||
if err := c.Remove(fileToRemove); err != nil {
|
||||
logrus.Errorf("failed to remove %s: %v", fileToRemove, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {
|
||||
br := bufio.NewReader(rc)
|
||||
if _, err := br.Peek(1); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name)
|
||||
}
|
||||
return nil, errors.Wrap(err, "unexpected error reading Dockerfile")
|
||||
}
|
||||
return parser.Parse(br)
|
||||
}
|
||||
|
||||
func openAt(remote builder.Source, path string) (driver.File, error) {
|
||||
fullPath, err := FullPath(remote, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remote.Root().Open(fullPath)
|
||||
}
|
||||
|
||||
// StatAt is a helper for calling Stat on a path from a source
|
||||
func StatAt(remote builder.Source, path string) (os.FileInfo, error) {
|
||||
fullPath, err := FullPath(remote, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remote.Root().Stat(fullPath)
|
||||
}
|
||||
|
||||
// FullPath is a helper for getting a full path for a path from a source
|
||||
func FullPath(remote builder.Source, path string) (string, error) {
|
||||
fullPath, err := remote.Root().ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error
|
||||
}
|
||||
return fullPath, nil
|
||||
}
|
75
vendor/github.com/docker/docker/builder/remotecontext/errors.go
generated
vendored
Normal file
75
vendor/github.com/docker/docker/builder/remotecontext/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
package remotecontext
|
||||
|
||||
type notFoundError string
|
||||
|
||||
func (e notFoundError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (notFoundError) NotFound() {}
|
||||
|
||||
type requestError string
|
||||
|
||||
func (e requestError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e requestError) InvalidParameter() {}
|
||||
|
||||
type unauthorizedError string
|
||||
|
||||
func (e unauthorizedError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (unauthorizedError) Unauthorized() {}
|
||||
|
||||
type forbiddenError string
|
||||
|
||||
func (e forbiddenError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (forbiddenError) Forbidden() {}
|
||||
|
||||
type dnsError struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e dnsError) Error() string {
|
||||
return e.cause.Error()
|
||||
}
|
||||
|
||||
func (e dnsError) NotFound() {}
|
||||
|
||||
func (e dnsError) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
type systemError struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e systemError) Error() string {
|
||||
return e.cause.Error()
|
||||
}
|
||||
|
||||
func (e systemError) SystemError() {}
|
||||
|
||||
func (e systemError) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
type unknownError struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e unknownError) Error() string {
|
||||
return e.cause.Error()
|
||||
}
|
||||
|
||||
func (unknownError) Unknown() {}
|
||||
|
||||
func (e unknownError) Cause() error {
|
||||
return e.cause
|
||||
}
|
45
vendor/github.com/docker/docker/builder/remotecontext/filehash.go
generated
vendored
Normal file
45
vendor/github.com/docker/docker/builder/remotecontext/filehash.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
)
|
||||
|
||||
// NewFileHash returns new hash that is used for the builder cache keys
|
||||
func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) {
|
||||
var link string
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
var err error
|
||||
link, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
hdr, err := archive.FileInfoHeader(name, fi, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()}
|
||||
tsh.Reset() // initialize header
|
||||
return tsh, nil
|
||||
}
|
||||
|
||||
type tarsumHash struct {
|
||||
hash.Hash
|
||||
hdr *tar.Header
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (tsh *tarsumHash) Reset() {
|
||||
// comply with hash.Hash and reset to the state hash had before any writes
|
||||
tsh.Hash.Reset()
|
||||
tarsum.WriteV1Header(tsh.hdr, tsh.Hash)
|
||||
}
|
3
vendor/github.com/docker/docker/builder/remotecontext/generate.go
generated
vendored
Normal file
3
vendor/github.com/docker/docker/builder/remotecontext/generate.go
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
package remotecontext
|
||||
|
||||
//go:generate protoc --gogoslick_out=. tarsum.proto
|
29
vendor/github.com/docker/docker/builder/remotecontext/git.go
generated
vendored
Normal file
29
vendor/github.com/docker/docker/builder/remotecontext/git.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext/git"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
|
||||
func MakeGitContext(gitURL string) (builder.Source, error) {
|
||||
root, err := git.Clone(gitURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := archive.Tar(root, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// TODO: print errors?
|
||||
c.Close()
|
||||
os.RemoveAll(root)
|
||||
}()
|
||||
return FromArchive(c)
|
||||
}
|
|
@ -1,7 +1,6 @@
|
|||
package gitutils
|
||||
package git
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -15,18 +14,24 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type gitRepo struct {
|
||||
remote string
|
||||
ref string
|
||||
subdir string
|
||||
}
|
||||
|
||||
// Clone clones a repository into a newly created directory which
|
||||
// will be under "docker-build-git"
|
||||
func Clone(remoteURL string) (string, error) {
|
||||
if !urlutil.IsGitTransport(remoteURL) {
|
||||
remoteURL = "https://" + remoteURL
|
||||
}
|
||||
root, err := ioutil.TempDir("", "docker-build-git")
|
||||
repo, err := parseRemoteURL(remoteURL)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
u, err := url.Parse(remoteURL)
|
||||
fetch := fetchArgs(repo.remote, repo.ref)
|
||||
|
||||
root, err := ioutil.TempDir("", "docker-build-git")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -35,22 +40,47 @@ func Clone(remoteURL string) (string, error) {
|
|||
return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out)
|
||||
}
|
||||
|
||||
ref, subdir := getRefAndSubdir(u.Fragment)
|
||||
fetch := fetchArgs(u, ref)
|
||||
|
||||
u.Fragment = ""
|
||||
|
||||
// Add origin remote for compatibility with previous implementation that
|
||||
// used "git clone" and also to make sure local refs are created for branches
|
||||
if out, err := gitWithinDir(root, "remote", "add", "origin", u.String()); err != nil {
|
||||
return "", errors.Wrapf(err, "failed add origin repo at %s: %s", u.String(), out)
|
||||
if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil {
|
||||
return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out)
|
||||
}
|
||||
|
||||
if output, err := gitWithinDir(root, fetch...); err != nil {
|
||||
return "", errors.Wrapf(err, "error fetching: %s", output)
|
||||
}
|
||||
|
||||
return checkoutGit(root, ref, subdir)
|
||||
return checkoutGit(root, repo.ref, repo.subdir)
|
||||
}
|
||||
|
||||
func parseRemoteURL(remoteURL string) (gitRepo, error) {
|
||||
repo := gitRepo{}
|
||||
|
||||
if !isGitTransport(remoteURL) {
|
||||
remoteURL = "https://" + remoteURL
|
||||
}
|
||||
|
||||
var fragment string
|
||||
if strings.HasPrefix(remoteURL, "git@") {
|
||||
// git@.. is not an URL, so cannot be parsed as URL
|
||||
parts := strings.SplitN(remoteURL, "#", 2)
|
||||
|
||||
repo.remote = parts[0]
|
||||
if len(parts) == 2 {
|
||||
fragment = parts[1]
|
||||
}
|
||||
repo.ref, repo.subdir = getRefAndSubdir(fragment)
|
||||
} else {
|
||||
u, err := url.Parse(remoteURL)
|
||||
if err != nil {
|
||||
return repo, err
|
||||
}
|
||||
|
||||
repo.ref, repo.subdir = getRefAndSubdir(u.Fragment)
|
||||
u.Fragment = ""
|
||||
repo.remote = u.String()
|
||||
}
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func getRefAndSubdir(fragment string) (ref string, subdir string) {
|
||||
|
@ -65,24 +95,48 @@ func getRefAndSubdir(fragment string) (ref string, subdir string) {
|
|||
return
|
||||
}
|
||||
|
||||
func fetchArgs(remoteURL *url.URL, ref string) []string {
|
||||
func fetchArgs(remoteURL string, ref string) []string {
|
||||
args := []string{"fetch", "--recurse-submodules=yes"}
|
||||
shallow := true
|
||||
|
||||
if strings.HasPrefix(remoteURL.Scheme, "http") {
|
||||
res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL))
|
||||
if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" {
|
||||
shallow = false
|
||||
}
|
||||
}
|
||||
|
||||
if shallow {
|
||||
if supportsShallowClone(remoteURL) {
|
||||
args = append(args, "--depth", "1")
|
||||
}
|
||||
|
||||
return append(args, "origin", ref)
|
||||
}
|
||||
|
||||
// Check if a given git URL supports a shallow git clone,
|
||||
// i.e. it is a non-HTTP server or a smart HTTP server.
|
||||
func supportsShallowClone(remoteURL string) bool {
|
||||
if urlutil.IsURL(remoteURL) {
|
||||
// Check if the HTTP server is smart
|
||||
|
||||
// Smart servers must correctly respond to a query for the git-upload-pack service
|
||||
serviceURL := remoteURL + "/info/refs?service=git-upload-pack"
|
||||
|
||||
// Try a HEAD request and fallback to a Get request on error
|
||||
res, err := http.Head(serviceURL)
|
||||
if err != nil || res.StatusCode != http.StatusOK {
|
||||
res, err = http.Get(serviceURL)
|
||||
if err == nil {
|
||||
res.Body.Close()
|
||||
}
|
||||
if err != nil || res.StatusCode != http.StatusOK {
|
||||
// request failed
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" {
|
||||
// Fallback, not a smart server
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
// Non-HTTP protocols always support shallow clones
|
||||
return true
|
||||
}
|
||||
|
||||
func checkoutGit(root, ref, subdir string) (string, error) {
|
||||
// Try checking out by ref name first. This will work on branches and sets
|
||||
// .git/HEAD to the current branch name
|
||||
|
@ -120,3 +174,9 @@ func gitWithinDir(dir string, args ...string) ([]byte, error) {
|
|||
func git(args ...string) ([]byte, error) {
|
||||
return exec.Command("git", args...).CombinedOutput()
|
||||
}
|
||||
|
||||
// isGitTransport returns true if the provided str is a git transport by inspecting
|
||||
// the prefix of the string for known protocols used in git.
|
||||
func isGitTransport(str string) bool {
|
||||
return urlutil.IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@")
|
||||
}
|
100
vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go
generated
vendored
Normal file
100
vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewLazySource creates a new LazyContext. LazyContext defines a hashed build
|
||||
// context based on a root directory. Individual files are hashed first time
|
||||
// they are asked. It is not safe to call methods of LazyContext concurrently.
|
||||
func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) {
|
||||
return &lazySource{
|
||||
root: root,
|
||||
sums: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type lazySource struct {
|
||||
root containerfs.ContainerFS
|
||||
sums map[string]string
|
||||
}
|
||||
|
||||
func (c *lazySource) Root() containerfs.ContainerFS {
|
||||
return c.root
|
||||
}
|
||||
|
||||
func (c *lazySource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *lazySource) Hash(path string) (string, error) {
|
||||
cleanPath, fullPath, err := normalize(path, c.root)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fi, err := c.root.Lstat(fullPath)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
||||
relPath, err := Rel(c.root, fullPath)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(convertPathError(err, cleanPath))
|
||||
}
|
||||
|
||||
sum, ok := c.sums[relPath]
|
||||
if !ok {
|
||||
sum, err = c.prepareHash(relPath, fi)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) {
|
||||
p := c.root.Join(c.root.Path(), relPath)
|
||||
h, err := NewFileHash(p, relPath, fi)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create hash for %s", relPath)
|
||||
}
|
||||
if fi.Mode().IsRegular() && fi.Size() > 0 {
|
||||
f, err := c.root.Open(p)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to open %s", relPath)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := pools.Copy(h, f); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to copy file data for %s", relPath)
|
||||
}
|
||||
}
|
||||
sum := hex.EncodeToString(h.Sum(nil))
|
||||
c.sums[relPath] = sum
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
// Rel makes a path relative to base path. Same as `filepath.Rel` but can also
|
||||
// handle UUID paths in windows.
|
||||
func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) {
|
||||
// filepath.Rel can't handle UUID paths in windows
|
||||
if basepath.OS() == "windows" {
|
||||
pfx := basepath.Path() + `\`
|
||||
if strings.HasPrefix(targpath, pfx) {
|
||||
p := strings.TrimPrefix(targpath, pfx)
|
||||
if p == "" {
|
||||
p = "."
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
return basepath.Rel(basepath.Path(), targpath)
|
||||
}
|
|
@ -1,29 +1,27 @@
|
|||
package httputils
|
||||
package remotecontext
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// MimeTypes stores the MIME content type.
|
||||
var MimeTypes = struct {
|
||||
// mimeTypes stores the MIME content type.
|
||||
var mimeTypes = struct {
|
||||
TextPlain string
|
||||
OctetStream string
|
||||
}{"text/plain", "application/octet-stream"}
|
||||
|
||||
// DetectContentType returns a best guess representation of the MIME
|
||||
// detectContentType returns a best guess representation of the MIME
|
||||
// content type for the bytes at c. The value detected by
|
||||
// http.DetectContentType is guaranteed not be nil, defaulting to
|
||||
// application/octet-stream when a better guess cannot be made. The
|
||||
// result of this detection is then run through mime.ParseMediaType()
|
||||
// which separates the actual MIME string from any parameters.
|
||||
func DetectContentType(c []byte) (string, map[string]string, error) {
|
||||
|
||||
func detectContentType(c []byte) (string, map[string]string, error) {
|
||||
ct := http.DetectContentType(c)
|
||||
contentType, args, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return contentType, args, nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue