Switch to golang/dep.

This commit is contained in:
Ludovic Fernandez 2018-01-11 17:46:04 +01:00 committed by Traefiker
parent 709d50836b
commit 2618aef008
246 changed files with 42564 additions and 17452 deletions

150
vendor/github.com/docker/cli/cli/cobra.go generated vendored Normal file
View file

@ -0,0 +1,150 @@
package cli
import (
"fmt"
"strings"
"github.com/docker/docker/pkg/term"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// SetupRootCommand sets default usage, help, and error handling for the
// root command.
func SetupRootCommand(rootCmd *cobra.Command) {
cobra.AddTemplateFunc("hasSubCommands", hasSubCommands)
cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands)
cobra.AddTemplateFunc("operationSubCommands", operationSubCommands)
cobra.AddTemplateFunc("managementSubCommands", managementSubCommands)
cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages)
rootCmd.SetUsageTemplate(usageTemplate)
rootCmd.SetHelpTemplate(helpTemplate)
rootCmd.SetFlagErrorFunc(FlagErrorFunc)
rootCmd.SetHelpCommand(helpCommand)
rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage")
rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help")
}
// FlagErrorFunc prints an error message which matches the format of the
// docker/cli/cli error messages
func FlagErrorFunc(cmd *cobra.Command, err error) error {
if err == nil {
return nil
}
usage := ""
if cmd.HasSubCommands() {
usage = "\n\n" + cmd.UsageString()
}
return StatusError{
Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage),
StatusCode: 125,
}
}
var helpCommand = &cobra.Command{
Use: "help [command]",
Short: "Help about the command",
PersistentPreRun: func(cmd *cobra.Command, args []string) {},
PersistentPostRun: func(cmd *cobra.Command, args []string) {},
RunE: func(c *cobra.Command, args []string) error {
cmd, args, e := c.Root().Find(args)
if cmd == nil || e != nil || len(args) > 0 {
return errors.Errorf("unknown help topic: %v", strings.Join(args, " "))
}
helpFunc := cmd.HelpFunc()
helpFunc(cmd, args)
return nil
},
}
func hasSubCommands(cmd *cobra.Command) bool {
return len(operationSubCommands(cmd)) > 0
}
func hasManagementSubCommands(cmd *cobra.Command) bool {
return len(managementSubCommands(cmd)) > 0
}
func operationSubCommands(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
for _, sub := range cmd.Commands() {
if sub.IsAvailableCommand() && !sub.HasSubCommands() {
cmds = append(cmds, sub)
}
}
return cmds
}
func wrappedFlagUsages(cmd *cobra.Command) string {
width := 80
if ws, err := term.GetWinsize(0); err == nil {
width = int(ws.Width)
}
return cmd.Flags().FlagUsagesWrapped(width - 1)
}
func managementSubCommands(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
for _, sub := range cmd.Commands() {
if sub.IsAvailableCommand() && sub.HasSubCommands() {
cmds = append(cmds, sub)
}
}
return cmds
}
var usageTemplate = `Usage:
{{- if not .HasSubCommands}} {{.UseLine}}{{end}}
{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}}
{{ .Short | trim }}
{{- if gt .Aliases 0}}
Aliases:
{{.NameAndAliases}}
{{- end}}
{{- if .HasExample}}
Examples:
{{ .Example }}
{{- end}}
{{- if .HasFlags}}
Options:
{{ wrappedFlagUsages . | trimRightSpace}}
{{- end}}
{{- if hasManagementSubCommands . }}
Management Commands:
{{- range managementSubCommands . }}
{{rpad .Name .NamePadding }} {{.Short}}
{{- end}}
{{- end}}
{{- if hasSubCommands .}}
Commands:
{{- range operationSubCommands . }}
{{rpad .Name .NamePadding }} {{.Short}}
{{- end}}
{{- end}}
{{- if .HasSubCommands }}
Run '{{.CommandPath}} COMMAND --help' for more information on a command.
{{- end}}
`
var helpTemplate = `
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`

305
vendor/github.com/docker/cli/cli/command/cli.go generated vendored Normal file
View file

@ -0,0 +1,305 @@
package command
import (
"fmt"
"io"
"net/http"
"os"
"runtime"
"github.com/docker/cli/cli"
cliconfig "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/credentials"
cliflags "github.com/docker/cli/cli/flags"
dopts "github.com/docker/cli/opts"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/docker/notary/passphrase"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
// Streams is an interface which exposes the standard input and output streams
type Streams interface {
In() *InStream
Out() *OutStream
Err() io.Writer
}
// Cli represents the docker command line client.
type Cli interface {
Client() client.APIClient
Out() *OutStream
Err() io.Writer
In() *InStream
SetIn(in *InStream)
ConfigFile() *configfile.ConfigFile
CredentialsStore(serverAddress string) credentials.Store
}
// DockerCli is an instance the docker command line client.
// Instances of the client can be returned from NewDockerCli.
type DockerCli struct {
configFile *configfile.ConfigFile
in *InStream
out *OutStream
err io.Writer
client client.APIClient
defaultVersion string
server ServerInfo
}
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
func (cli *DockerCli) DefaultVersion() string {
return cli.defaultVersion
}
// Client returns the APIClient
func (cli *DockerCli) Client() client.APIClient {
return cli.client
}
// Out returns the writer used for stdout
func (cli *DockerCli) Out() *OutStream {
return cli.out
}
// Err returns the writer used for stderr
func (cli *DockerCli) Err() io.Writer {
return cli.err
}
// SetIn sets the reader used for stdin
func (cli *DockerCli) SetIn(in *InStream) {
cli.in = in
}
// In returns the reader used for stdin
func (cli *DockerCli) In() *InStream {
return cli.in
}
// ShowHelp shows the command help.
func ShowHelp(err io.Writer) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
cmd.SetOutput(err)
cmd.HelpFunc()(cmd, args)
return nil
}
}
// ConfigFile returns the ConfigFile
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
return cli.configFile
}
// ServerInfo returns the server version details for the host this client is
// connected to
func (cli *DockerCli) ServerInfo() ServerInfo {
return cli.server
}
// GetAllCredentials returns all of the credentials stored in all of the
// configured credential stores.
func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) {
auths := make(map[string]types.AuthConfig)
for registry := range cli.configFile.CredentialHelpers {
helper := cli.CredentialsStore(registry)
newAuths, err := helper.GetAll()
if err != nil {
return nil, err
}
addAll(auths, newAuths)
}
defaultStore := cli.CredentialsStore("")
newAuths, err := defaultStore.GetAll()
if err != nil {
return nil, err
}
addAll(auths, newAuths)
return auths, nil
}
func addAll(to, from map[string]types.AuthConfig) {
for reg, ac := range from {
to[reg] = ac
}
}
// CredentialsStore returns a new credentials store based
// on the settings provided in the configuration file. Empty string returns
// the default credential store.
func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store {
if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" {
return credentials.NewNativeStore(cli.configFile, helper)
}
return credentials.NewFileStore(cli.configFile)
}
// getConfiguredCredentialStore returns the credential helper configured for the
// given registry, the default credsStore, or the empty string if neither are
// configured.
func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string {
if c.CredentialHelpers != nil && serverAddress != "" {
if helper, exists := c.CredentialHelpers[serverAddress]; exists {
return helper
}
}
return c.CredentialsStore
}
// Initialize the dockerCli runs initialization that must happen after command
// line flags are parsed.
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
cli.configFile = LoadDefaultConfigFile(cli.err)
var err error
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
if tlsconfig.IsErrEncryptedKey(err) {
var (
passwd string
giveup bool
)
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
for attempts := 0; tlsconfig.IsErrEncryptedKey(err); attempts++ {
// some code and comments borrowed from notary/trustmanager/keystore.go
passwd, giveup, err = passRetriever("private", "encrypted TLS private", false, attempts)
// Check if the passphrase retriever got an error or if it is telling us to give up
if giveup || err != nil {
return errors.Wrap(err, "private key is encrypted, but could not get passphrase")
}
opts.Common.TLSOptions.Passphrase = passwd
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
}
}
if err != nil {
return err
}
cli.defaultVersion = cli.client.ClientVersion()
if ping, err := cli.client.Ping(context.Background()); err == nil {
cli.server = ServerInfo{
HasExperimental: ping.Experimental,
OSType: ping.OSType,
}
// since the new header was added in 1.25, assume server is 1.24 if header is not present.
if ping.APIVersion == "" {
ping.APIVersion = "1.24"
}
// if server version is lower than the current cli, downgrade
if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) {
cli.client.UpdateClientVersion(ping.APIVersion)
}
}
return nil
}
// ServerInfo stores details about the supported features and platform of the
// server
type ServerInfo struct {
HasExperimental bool
OSType string
}
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli {
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err}
}
// LoadDefaultConfigFile attempts to load the default config file and returns
// an initialized ConfigFile struct if none is found.
func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile {
configFile, e := cliconfig.Load(cliconfig.Dir())
if e != nil {
fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e)
}
if !configFile.ContainsAuth() {
credentials.DetectDefaultStore(configFile)
}
return configFile
}
// NewAPIClientFromFlags creates a new APIClient from command line flags
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
if err != nil {
return &client.Client{}, err
}
customHeaders := configFile.HTTPHeaders
if customHeaders == nil {
customHeaders = map[string]string{}
}
customHeaders["User-Agent"] = UserAgent()
verStr := api.DefaultVersion
if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" {
verStr = tmpStr
}
httpClient, err := newHTTPClient(host, opts.TLSOptions)
if err != nil {
return &client.Client{}, err
}
return client.NewClient(host, verStr, httpClient, customHeaders)
}
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) {
switch len(hosts) {
case 0:
host = os.Getenv("DOCKER_HOST")
case 1:
host = hosts[0]
default:
return "", errors.New("Please specify only one -H")
}
host, err = dopts.ParseHost(tlsOptions != nil, host)
return
}
func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) {
if tlsOptions == nil {
// let the api client configure the default transport.
return nil, nil
}
opts := *tlsOptions
opts.ExclusiveRootPools = true
config, err := tlsconfig.Client(opts)
if err != nil {
return nil, err
}
tr := &http.Transport{
TLSClientConfig: config,
}
proto, addr, _, err := client.ParseHost(host)
if err != nil {
return nil, err
}
sockets.ConfigureTransport(tr, proto, addr)
return &http.Client{
Transport: tr,
CheckRedirect: client.CheckRedirect,
}, nil
}
// UserAgent returns the user agent string used for making API requests
func UserAgent() string {
return "Docker-Client/" + cli.Version + " (" + runtime.GOOS + ")"
}

View file

@ -0,0 +1,47 @@
package command
import (
"sync"
"github.com/Sirupsen/logrus"
eventtypes "github.com/docker/docker/api/types/events"
)
// EventHandler is abstract interface for user to customize
// own handle functions of each type of events
type EventHandler interface {
Handle(action string, h func(eventtypes.Message))
Watch(c <-chan eventtypes.Message)
}
// InitEventHandler initializes and returns an EventHandler
func InitEventHandler() EventHandler {
return &eventHandler{handlers: make(map[string]func(eventtypes.Message))}
}
type eventHandler struct {
handlers map[string]func(eventtypes.Message)
mu sync.Mutex
}
func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) {
w.mu.Lock()
w.handlers[action] = h
w.mu.Unlock()
}
// Watch ranges over the passed in event chan and processes the events based on the
// handlers created for a given action.
// To stop watching, close the event chan.
func (w *eventHandler) Watch(c <-chan eventtypes.Message) {
for e := range c {
w.mu.Lock()
h, exists := w.handlers[e.Action]
w.mu.Unlock()
if !exists {
continue
}
logrus.Debugf("event handler: received event: %v", e)
go h(e)
}
}

500
vendor/github.com/docker/cli/cli/command/image/build.go generated vendored Normal file
View file

@ -0,0 +1,500 @@
package image
import (
"archive/tar"
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"runtime"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/image/build"
"github.com/docker/cli/opts"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/urlutil"
runconfigopts "github.com/docker/docker/runconfig/opts"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
type buildOptions struct {
context string
dockerfileName string
tags opts.ListOpts
labels opts.ListOpts
buildArgs opts.ListOpts
extraHosts opts.ListOpts
ulimits *opts.UlimitOpt
memory opts.MemBytes
memorySwap opts.MemSwapBytes
shmSize opts.MemBytes
cpuShares int64
cpuPeriod int64
cpuQuota int64
cpuSetCpus string
cpuSetMems string
cgroupParent string
isolation string
quiet bool
noCache bool
rm bool
forceRm bool
pull bool
cacheFrom []string
compress bool
securityOpt []string
networkMode string
squash bool
target string
imageIDFile string
}
// dockerfileFromStdin returns true when the user specified that the Dockerfile
// should be read from stdin instead of a file
func (o buildOptions) dockerfileFromStdin() bool {
return o.dockerfileName == "-"
}
// contextFromStdin returns true when the user specified that the build context
// should be read from stdin
func (o buildOptions) contextFromStdin() bool {
return o.context == "-"
}
// NewBuildCommand creates a new `docker build` command
func NewBuildCommand(dockerCli *command.DockerCli) *cobra.Command {
ulimits := make(map[string]*units.Ulimit)
options := buildOptions{
tags: opts.NewListOpts(validateTag),
buildArgs: opts.NewListOpts(opts.ValidateEnv),
ulimits: opts.NewUlimitOpt(&ulimits),
labels: opts.NewListOpts(opts.ValidateEnv),
extraHosts: opts.NewListOpts(opts.ValidateExtraHost),
}
cmd := &cobra.Command{
Use: "build [OPTIONS] PATH | URL | -",
Short: "Build an image from a Dockerfile",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
options.context = args[0]
return runBuild(dockerCli, options)
},
}
flags := cmd.Flags()
flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format")
flags.Var(&options.buildArgs, "build-arg", "Set build-time variables")
flags.Var(options.ulimits, "ulimit", "Ulimit options")
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
flags.VarP(&options.memory, "memory", "m", "Memory limit")
flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm")
flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology")
flags.Var(&options.labels, "label", "Set metadata for an image")
flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image")
flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build")
flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers")
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
flags.SetAnnotation("network", "version", []string{"1.25"})
flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)")
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
command.AddTrustVerificationFlags(flags)
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
flags.SetAnnotation("squash", "experimental", nil)
flags.SetAnnotation("squash", "version", []string{"1.25"})
return cmd
}
// lastProgressOutput is the same as progress.Output except
// that it only output with the last update. It is used in
// non terminal scenarios to suppress verbose messages
type lastProgressOutput struct {
output progress.Output
}
// WriteProgress formats progress information from a ProgressReader.
func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
if !prog.LastUpdate {
return nil
}
return out.output.WriteProgress(prog)
}
// nolint: gocyclo
func runBuild(dockerCli *command.DockerCli, options buildOptions) error {
var (
buildCtx io.ReadCloser
dockerfileCtx io.ReadCloser
err error
contextDir string
tempDir string
relDockerfile string
progBuff io.Writer
buildBuff io.Writer
)
if options.dockerfileFromStdin() {
if options.contextFromStdin() {
return errors.New("invalid argument: can't use stdin for both build context and dockerfile")
}
dockerfileCtx = dockerCli.In()
}
specifiedContext := options.context
progBuff = dockerCli.Out()
buildBuff = dockerCli.Out()
if options.quiet {
progBuff = bytes.NewBuffer(nil)
buildBuff = bytes.NewBuffer(nil)
}
if options.imageIDFile != "" {
// Avoid leaving a stale file if we eventually fail
if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "Removing image ID file")
}
}
switch {
case options.contextFromStdin():
buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName)
case isLocalDir(specifiedContext):
contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName)
case urlutil.IsGitURL(specifiedContext):
tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName)
case urlutil.IsURL(specifiedContext):
buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName)
default:
return errors.Errorf("unable to prepare context: path %q not found", specifiedContext)
}
if err != nil {
if options.quiet && urlutil.IsURL(specifiedContext) {
fmt.Fprintln(dockerCli.Err(), progBuff)
}
return errors.Errorf("unable to prepare context: %s", err)
}
if tempDir != "" {
defer os.RemoveAll(tempDir)
contextDir = tempDir
}
if buildCtx == nil {
excludes, err := build.ReadDockerignore(contextDir)
if err != nil {
return err
}
if err := build.ValidateContextDirectory(contextDir, excludes); err != nil {
return errors.Errorf("error checking context: '%s'.", err)
}
// And canonicalize dockerfile name to a platform-independent one
relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
if err != nil {
return errors.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
}
excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin())
compression := archive.Uncompressed
if options.compress {
compression = archive.Gzip
}
buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
Compression: compression,
ExcludePatterns: excludes,
})
if err != nil {
return err
}
}
// replace Dockerfile if added dynamically
if dockerfileCtx != nil {
buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx)
if err != nil {
return err
}
}
ctx := context.Background()
var resolvedTags []*resolvedTag
if command.IsTrusted() {
translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
return TrustedReference(ctx, dockerCli, ref, nil)
}
// Wrap the tar archive to replace the Dockerfile entry with the rewritten
// Dockerfile which uses trusted pulls.
buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags)
}
// Setup an upload progress bar
progressOutput := streamformatter.NewProgressOutput(progBuff)
if !dockerCli.Out().IsTerminal() {
progressOutput = &lastProgressOutput{output: progressOutput}
}
var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
authConfigs, _ := dockerCli.GetAllCredentials()
buildOptions := types.ImageBuildOptions{
Memory: options.memory.Value(),
MemorySwap: options.memorySwap.Value(),
Tags: options.tags.GetAll(),
SuppressOutput: options.quiet,
NoCache: options.noCache,
Remove: options.rm,
ForceRemove: options.forceRm,
PullParent: options.pull,
Isolation: container.Isolation(options.isolation),
CPUSetCPUs: options.cpuSetCpus,
CPUSetMems: options.cpuSetMems,
CPUShares: options.cpuShares,
CPUQuota: options.cpuQuota,
CPUPeriod: options.cpuPeriod,
CgroupParent: options.cgroupParent,
Dockerfile: relDockerfile,
ShmSize: options.shmSize.Value(),
Ulimits: options.ulimits.GetList(),
BuildArgs: runconfigopts.ConvertKVStringsToMapWithNil(options.buildArgs.GetAll()),
AuthConfigs: authConfigs,
Labels: runconfigopts.ConvertKVStringsToMap(options.labels.GetAll()),
CacheFrom: options.cacheFrom,
SecurityOpt: options.securityOpt,
NetworkMode: options.networkMode,
Squash: options.squash,
ExtraHosts: options.extraHosts.GetAll(),
Target: options.target,
}
response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
if err != nil {
if options.quiet {
fmt.Fprintf(dockerCli.Err(), "%s", progBuff)
}
return err
}
defer response.Body.Close()
imageID := ""
aux := func(auxJSON *json.RawMessage) {
var result types.BuildResult
if err := json.Unmarshal(*auxJSON, &result); err != nil {
fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err)
} else {
imageID = result.ID
}
}
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), aux)
if err != nil {
if jerr, ok := err.(*jsonmessage.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
if options.quiet {
fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff)
}
return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
}
return err
}
// Windows: show error message about modified file permissions if the
// daemon isn't running Windows.
if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet {
fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+
"image from Windows against a non-Windows Docker host. All files and "+
"directories added to build context will have '-rwxr-xr-x' permissions. "+
"It is recommended to double check and reset permissions for sensitive "+
"files and directories.")
}
// Everything worked so if -q was provided the output from the daemon
// should be just the image ID and we'll print that to stdout.
if options.quiet {
imageID = fmt.Sprintf("%s", buildBuff)
fmt.Fprintf(dockerCli.Out(), imageID)
}
if options.imageIDFile != "" {
if imageID == "" {
return errors.Errorf("Server did not provide an image ID. Cannot write %s", options.imageIDFile)
}
if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil {
return err
}
}
if command.IsTrusted() {
// Since the build was successful, now we must tag any of the resolved
// images from the above Dockerfile rewrite.
for _, resolved := range resolvedTags {
if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil {
return err
}
}
}
return nil
}
func isLocalDir(c string) bool {
_, err := os.Stat(c)
return err == nil
}
type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error)
// validateTag checks if the given image name can be resolved.
func validateTag(rawRepo string) (string, error) {
_, err := reference.ParseNormalizedNamed(rawRepo)
if err != nil {
return "", err
}
return rawRepo, nil
}
var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P<image>[^ \f\r\t\v\n#]+)`)
// resolvedTag records the repository, tag, and resolved digest reference
// from a Dockerfile rewrite.
type resolvedTag struct {
digestRef reference.Canonical
tagRef reference.NamedTagged
}
// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in
// "FROM <image>" instructions to a digest reference. `translator` is a
// function that takes a repository name and tag reference and returns a
// trusted digest reference.
func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) {
scanner := bufio.NewScanner(dockerfile)
buf := bytes.NewBuffer(nil)
// Scan the lines of the Dockerfile, looking for a "FROM" line.
for scanner.Scan() {
line := scanner.Text()
matches := dockerfileFromLinePattern.FindStringSubmatch(line)
if matches != nil && matches[1] != api.NoBaseImageSpecifier {
// Replace the line with a resolved "FROM repo@digest"
var ref reference.Named
ref, err = reference.ParseNormalizedNamed(matches[1])
if err != nil {
return nil, nil, err
}
ref = reference.TagNameOnly(ref)
if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() {
trustedRef, err := translator(ctx, ref)
if err != nil {
return nil, nil, err
}
line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef)))
resolvedTags = append(resolvedTags, &resolvedTag{
digestRef: trustedRef,
tagRef: ref,
})
}
}
_, err := fmt.Fprintln(buf, line)
if err != nil {
return nil, nil, err
}
}
return buf.Bytes(), resolvedTags, scanner.Err()
}
// replaceDockerfileTarWrapper wraps the given input tar archive stream and
// replaces the entry with the given Dockerfile name with the contents of the
// new Dockerfile. Returns a new tar archive stream with the replaced
// Dockerfile.
func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser {
pipeReader, pipeWriter := io.Pipe()
go func() {
tarReader := tar.NewReader(inputTarStream)
tarWriter := tar.NewWriter(pipeWriter)
defer inputTarStream.Close()
for {
hdr, err := tarReader.Next()
if err == io.EOF {
// Signals end of archive.
tarWriter.Close()
pipeWriter.Close()
return
}
if err != nil {
pipeWriter.CloseWithError(err)
return
}
content := io.Reader(tarReader)
if hdr.Name == dockerfileName {
// This entry is the Dockerfile. Since the tar archive was
// generated from a directory on the local filesystem, the
// Dockerfile will only appear once in the archive.
var newDockerfile []byte
newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator)
if err != nil {
pipeWriter.CloseWithError(err)
return
}
hdr.Size = int64(len(newDockerfile))
content = bytes.NewBuffer(newDockerfile)
}
if err := tarWriter.WriteHeader(hdr); err != nil {
pipeWriter.CloseWithError(err)
return
}
if _, err := io.Copy(tarWriter, content); err != nil {
pipeWriter.CloseWithError(err)
return
}
}
}()
return pipeReader
}

34
vendor/github.com/docker/cli/cli/command/image/cmd.go generated vendored Normal file
View file

@ -0,0 +1,34 @@
package image
import (
"github.com/spf13/cobra"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
)
// NewImageCommand returns a cobra command for `image` subcommands
// nolint: interfacer
func NewImageCommand(dockerCli *command.DockerCli) *cobra.Command {
cmd := &cobra.Command{
Use: "image",
Short: "Manage images",
Args: cli.NoArgs,
RunE: command.ShowHelp(dockerCli.Err()),
}
cmd.AddCommand(
NewBuildCommand(dockerCli),
NewHistoryCommand(dockerCli),
NewImportCommand(dockerCli),
NewLoadCommand(dockerCli),
NewPullCommand(dockerCli),
NewPushCommand(dockerCli),
NewSaveCommand(dockerCli),
NewTagCommand(dockerCli),
newListCommand(dockerCli),
newRemoveCommand(dockerCli),
newInspectCommand(dockerCli),
NewPruneCommand(dockerCli),
)
return cmd
}

View file

@ -0,0 +1,64 @@
package image
import (
"golang.org/x/net/context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/spf13/cobra"
)
type historyOptions struct {
image string
human bool
quiet bool
noTrunc bool
format string
}
// NewHistoryCommand creates a new `docker history` command
func NewHistoryCommand(dockerCli command.Cli) *cobra.Command {
var opts historyOptions
cmd := &cobra.Command{
Use: "history [OPTIONS] IMAGE",
Short: "Show the history of an image",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.image = args[0]
return runHistory(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format")
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs")
flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output")
flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template")
return cmd
}
func runHistory(dockerCli command.Cli, opts historyOptions) error {
ctx := context.Background()
history, err := dockerCli.Client().ImageHistory(ctx, opts.image)
if err != nil {
return err
}
format := opts.format
if len(format) == 0 {
format = formatter.TableFormatKey
}
historyCtx := formatter.Context{
Output: dockerCli.Out(),
Format: formatter.NewHistoryFormat(format, opts.quiet, opts.human),
Trunc: !opts.noTrunc,
}
return formatter.HistoryWrite(historyCtx, opts.human, history)
}

View file

@ -0,0 +1,87 @@
package image
import (
"io"
"os"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
dockeropts "github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/urlutil"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
type importOptions struct {
source string
reference string
changes dockeropts.ListOpts
message string
}
// NewImportCommand creates a new `docker import` command
func NewImportCommand(dockerCli command.Cli) *cobra.Command {
var options importOptions
cmd := &cobra.Command{
Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]",
Short: "Import the contents from a tarball to create a filesystem image",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
options.source = args[0]
if len(args) > 1 {
options.reference = args[1]
}
return runImport(dockerCli, options)
},
}
flags := cmd.Flags()
options.changes = dockeropts.NewListOpts(nil)
flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image")
flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image")
return cmd
}
func runImport(dockerCli command.Cli, options importOptions) error {
var (
in io.Reader
srcName = options.source
)
if options.source == "-" {
in = dockerCli.In()
} else if !urlutil.IsURL(options.source) {
srcName = "-"
file, err := os.Open(options.source)
if err != nil {
return err
}
defer file.Close()
in = file
}
source := types.ImageImportSource{
Source: in,
SourceName: srcName,
}
importOptions := types.ImageImportOptions{
Message: options.message,
Changes: options.changes.GetAll(),
}
clnt := dockerCli.Client()
responseBody, err := clnt.ImageImport(context.Background(), source, options.reference, importOptions)
if err != nil {
return err
}
defer responseBody.Close()
return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil)
}

View file

@ -0,0 +1,44 @@
package image
import (
"golang.org/x/net/context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/inspect"
"github.com/spf13/cobra"
)
type inspectOptions struct {
format string
refs []string
}
// newInspectCommand creates a new cobra.Command for `docker image inspect`
func newInspectCommand(dockerCli command.Cli) *cobra.Command {
var opts inspectOptions
cmd := &cobra.Command{
Use: "inspect [OPTIONS] IMAGE [IMAGE...]",
Short: "Display detailed information on one or more images",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.refs = args
return runInspect(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template")
return cmd
}
func runInspect(dockerCli command.Cli, opts inspectOptions) error {
client := dockerCli.Client()
ctx := context.Background()
getRefFunc := func(ref string) (interface{}, []byte, error) {
return client.ImageInspectWithRaw(ctx, ref)
}
return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc)
}

95
vendor/github.com/docker/cli/cli/command/image/list.go generated vendored Normal file
View file

@ -0,0 +1,95 @@
package image
import (
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
type imagesOptions struct {
matchName string
quiet bool
all bool
noTrunc bool
showDigests bool
format string
filter opts.FilterOpt
}
// NewImagesCommand creates a new `docker images` command
func NewImagesCommand(dockerCli command.Cli) *cobra.Command {
options := imagesOptions{filter: opts.NewFilterOpt()}
cmd := &cobra.Command{
Use: "images [OPTIONS] [REPOSITORY[:TAG]]",
Short: "List images",
Args: cli.RequiresMaxArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 0 {
options.matchName = args[0]
}
return runImages(dockerCli, options)
},
}
flags := cmd.Flags()
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only show numeric IDs")
flags.BoolVarP(&options.all, "all", "a", false, "Show all images (default hides intermediate images)")
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
flags.BoolVar(&options.showDigests, "digests", false, "Show digests")
flags.StringVar(&options.format, "format", "", "Pretty-print images using a Go template")
flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided")
return cmd
}
func newListCommand(dockerCli command.Cli) *cobra.Command {
cmd := *NewImagesCommand(dockerCli)
cmd.Aliases = []string{"images", "list"}
cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]"
return &cmd
}
func runImages(dockerCli command.Cli, options imagesOptions) error {
ctx := context.Background()
filters := options.filter.Value()
if options.matchName != "" {
filters.Add("reference", options.matchName)
}
listOptions := types.ImageListOptions{
All: options.all,
Filters: filters,
}
images, err := dockerCli.Client().ImageList(ctx, listOptions)
if err != nil {
return err
}
format := options.format
if len(format) == 0 {
if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !options.quiet {
format = dockerCli.ConfigFile().ImagesFormat
} else {
format = formatter.TableFormatKey
}
}
imageCtx := formatter.ImageContext{
Context: formatter.Context{
Output: dockerCli.Out(),
Format: formatter.NewImageFormat(format, options.quiet, options.showDigests),
Trunc: !options.noTrunc,
},
Digest: options.showDigests,
}
return formatter.ImageWrite(imageCtx, images)
}

77
vendor/github.com/docker/cli/cli/command/image/load.go generated vendored Normal file
View file

@ -0,0 +1,77 @@
package image
import (
"io"
"golang.org/x/net/context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type loadOptions struct {
input string
quiet bool
}
// NewLoadCommand creates a new `docker load` command
func NewLoadCommand(dockerCli command.Cli) *cobra.Command {
var opts loadOptions
cmd := &cobra.Command{
Use: "load [OPTIONS]",
Short: "Load an image from a tar archive or STDIN",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return runLoad(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN")
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output")
return cmd
}
func runLoad(dockerCli command.Cli, opts loadOptions) error {
var input io.Reader = dockerCli.In()
if opts.input != "" {
// We use system.OpenSequential to use sequential file access on Windows, avoiding
// depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open.
file, err := system.OpenSequential(opts.input)
if err != nil {
return err
}
defer file.Close()
input = file
}
// To avoid getting stuck, verify that a tar file is given either in
// the input flag or through stdin and if not display an error message and exit.
if opts.input == "" && dockerCli.In().IsTerminal() {
return errors.Errorf("requested load from stdin, but stdin is empty")
}
if !dockerCli.Out().IsTerminal() {
opts.quiet = true
}
response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet)
if err != nil {
return err
}
defer response.Body.Close()
if response.Body != nil && response.JSON {
return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil)
}
_, err = io.Copy(dockerCli.Out(), response.Body)
return err
}

View file

@ -0,0 +1,95 @@
package image
import (
"fmt"
"golang.org/x/net/context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/opts"
units "github.com/docker/go-units"
"github.com/spf13/cobra"
)
type pruneOptions struct {
force bool
all bool
filter opts.FilterOpt
}
// NewPruneCommand returns a new cobra prune command for images
func NewPruneCommand(dockerCli command.Cli) *cobra.Command {
options := pruneOptions{filter: opts.NewFilterOpt()}
cmd := &cobra.Command{
Use: "prune [OPTIONS]",
Short: "Remove unused images",
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
spaceReclaimed, output, err := runPrune(dockerCli, options)
if err != nil {
return err
}
if output != "" {
fmt.Fprintln(dockerCli.Out(), output)
}
fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
return nil
},
Tags: map[string]string{"version": "1.25"},
}
flags := cmd.Flags()
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=<timestamp>')")
return cmd
}
const (
allImageWarning = `WARNING! This will remove all images without at least one container associated to them.
Are you sure you want to continue?`
danglingWarning = `WARNING! This will remove all dangling images.
Are you sure you want to continue?`
)
func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {
pruneFilters := options.filter.Value()
pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all))
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
warning := danglingWarning
if options.all {
warning = allImageWarning
}
if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
return
}
report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters)
if err != nil {
return
}
if len(report.ImagesDeleted) > 0 {
output = "Deleted Images:\n"
for _, st := range report.ImagesDeleted {
if st.Untagged != "" {
output += fmt.Sprintln("untagged:", st.Untagged)
} else {
output += fmt.Sprintln("deleted:", st.Deleted)
}
}
spaceReclaimed = report.SpaceReclaimed
}
return
}
// RunPrune calls the Image Prune API
// This returns the amount of space reclaimed and a detailed output string
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter})
}

85
vendor/github.com/docker/cli/cli/command/image/pull.go generated vendored Normal file
View file

@ -0,0 +1,85 @@
package image
import (
"fmt"
"strings"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/distribution/reference"
"github.com/docker/docker/registry"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
type pullOptions struct {
remote string
all bool
}
// NewPullCommand creates a new `docker pull` command
func NewPullCommand(dockerCli command.Cli) *cobra.Command {
var opts pullOptions
cmd := &cobra.Command{
Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]",
Short: "Pull an image or a repository from a registry",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.remote = args[0]
return runPull(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository")
command.AddTrustVerificationFlags(flags)
return cmd
}
func runPull(dockerCli command.Cli, opts pullOptions) error {
distributionRef, err := reference.ParseNormalizedNamed(opts.remote)
if err != nil {
return err
}
if opts.all && !reference.IsNameOnly(distributionRef) {
return errors.New("tag can't be used with --all-tags/-a")
}
if !opts.all && reference.IsNameOnly(distributionRef) {
distributionRef = reference.TagNameOnly(distributionRef)
if tagged, ok := distributionRef.(reference.Tagged); ok {
fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", tagged.Tag())
}
}
// Resolve the Repository name from fqn to RepositoryInfo
repoInfo, err := registry.ParseRepositoryInfo(distributionRef)
if err != nil {
return err
}
ctx := context.Background()
authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "pull")
// Check if reference has a digest
_, isCanonical := distributionRef.(reference.Canonical)
if command.IsTrusted() && !isCanonical {
err = trustedPull(ctx, dockerCli, repoInfo, distributionRef, authConfig, requestPrivilege)
} else {
err = imagePullPrivileged(ctx, dockerCli, authConfig, reference.FamiliarString(distributionRef), requestPrivilege, opts.all)
}
if err != nil {
if strings.Contains(err.Error(), "when fetching 'plugin'") {
return errors.New(err.Error() + " - Use `docker plugin install`")
}
return err
}
return nil
}

61
vendor/github.com/docker/cli/cli/command/image/push.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
package image
import (
"golang.org/x/net/context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/registry"
"github.com/spf13/cobra"
)
// NewPushCommand creates a new `docker push` command
func NewPushCommand(dockerCli command.Cli) *cobra.Command {
cmd := &cobra.Command{
Use: "push [OPTIONS] NAME[:TAG]",
Short: "Push an image or a repository to a registry",
Args: cli.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runPush(dockerCli, args[0])
},
}
flags := cmd.Flags()
command.AddTrustSigningFlags(flags)
return cmd
}
func runPush(dockerCli command.Cli, remote string) error {
ref, err := reference.ParseNormalizedNamed(remote)
if err != nil {
return err
}
// Resolve the Repository name from fqn to RepositoryInfo
repoInfo, err := registry.ParseRepositoryInfo(ref)
if err != nil {
return err
}
ctx := context.Background()
// Resolve the Auth config relevant for this server
authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push")
if command.IsTrusted() {
return trustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege)
}
responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref, requestPrivilege)
if err != nil {
return err
}
defer responseBody.Close()
return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil)
}

View file

@ -0,0 +1,78 @@
package image
import (
"fmt"
"strings"
"golang.org/x/net/context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type removeOptions struct {
force bool
noPrune bool
}
// NewRemoveCommand creates a new `docker remove` command
func NewRemoveCommand(dockerCli command.Cli) *cobra.Command {
var opts removeOptions
cmd := &cobra.Command{
Use: "rmi [OPTIONS] IMAGE [IMAGE...]",
Short: "Remove one or more images",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return runRemove(dockerCli, opts, args)
},
}
flags := cmd.Flags()
flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image")
flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents")
return cmd
}
func newRemoveCommand(dockerCli command.Cli) *cobra.Command {
cmd := *NewRemoveCommand(dockerCli)
cmd.Aliases = []string{"rmi", "remove"}
cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]"
return &cmd
}
func runRemove(dockerCli command.Cli, opts removeOptions, images []string) error {
client := dockerCli.Client()
ctx := context.Background()
options := types.ImageRemoveOptions{
Force: opts.force,
PruneChildren: !opts.noPrune,
}
var errs []string
for _, image := range images {
dels, err := client.ImageRemove(ctx, image, options)
if err != nil {
errs = append(errs, err.Error())
} else {
for _, del := range dels {
if del.Deleted != "" {
fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted)
} else {
fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged)
}
}
}
}
if len(errs) > 0 {
return errors.Errorf("%s", strings.Join(errs, "\n"))
}
return nil
}

56
vendor/github.com/docker/cli/cli/command/image/save.go generated vendored Normal file
View file

@ -0,0 +1,56 @@
package image
import (
"io"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/net/context"
)
type saveOptions struct {
images []string
output string
}
// NewSaveCommand creates a new `docker save` command
func NewSaveCommand(dockerCli command.Cli) *cobra.Command {
var opts saveOptions
cmd := &cobra.Command{
Use: "save [OPTIONS] IMAGE [IMAGE...]",
Short: "Save one or more images to a tar archive (streamed to STDOUT by default)",
Args: cli.RequiresMinArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
opts.images = args
return runSave(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT")
return cmd
}
func runSave(dockerCli command.Cli, opts saveOptions) error {
if opts.output == "" && dockerCli.Out().IsTerminal() {
return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect")
}
responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images)
if err != nil {
return err
}
defer responseBody.Close()
if opts.output == "" {
_, err := io.Copy(dockerCli.Out(), responseBody)
return err
}
return command.CopyToFile(opts.output, responseBody)
}

41
vendor/github.com/docker/cli/cli/command/image/tag.go generated vendored Normal file
View file

@ -0,0 +1,41 @@
package image
import (
"golang.org/x/net/context"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/spf13/cobra"
)
type tagOptions struct {
image string
name string
}
// NewTagCommand creates a new `docker tag` command
func NewTagCommand(dockerCli command.Cli) *cobra.Command {
var opts tagOptions
cmd := &cobra.Command{
Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]",
Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE",
Args: cli.ExactArgs(2),
RunE: func(cmd *cobra.Command, args []string) error {
opts.image = args[0]
opts.name = args[1]
return runTag(dockerCli, opts)
},
}
flags := cmd.Flags()
flags.SetInterspersed(false)
return cmd
}
func runTag(dockerCli command.Cli, opts tagOptions) error {
ctx := context.Background()
return dockerCli.Client().ImageTag(ctx, opts.image, opts.name)
}

384
vendor/github.com/docker/cli/cli/command/image/trust.go generated vendored Normal file
View file

@ -0,0 +1,384 @@
package image
import (
"encoding/hex"
"encoding/json"
"fmt"
"io"
"path"
"sort"
"github.com/Sirupsen/logrus"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/trust"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/registry"
"github.com/docker/notary/client"
"github.com/docker/notary/tuf/data"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
type target struct {
name string
digest digest.Digest
size int64
}
// trustedPush handles content trust pushing of an image
func trustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref, requestPrivilege)
if err != nil {
return err
}
defer responseBody.Close()
return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody)
}
// PushTrustedReference pushes a canonical reference to the trust server.
// nolint: gocyclo
func PushTrustedReference(streams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error {
// If it is a trusted push we would like to find the target entry which match the
// tag provided in the function and then do an AddTarget later.
target := &client.Target{}
// Count the times of calling for handleTarget,
// if it is called more that once, that should be considered an error in a trusted push.
cnt := 0
handleTarget := func(aux *json.RawMessage) {
cnt++
if cnt > 1 {
// handleTarget should only be called one. This will be treated as an error.
return
}
var pushResult types.PushResult
err := json.Unmarshal(*aux, &pushResult)
if err == nil && pushResult.Tag != "" {
if dgst, err := digest.Parse(pushResult.Digest); err == nil {
h, err := hex.DecodeString(dgst.Hex())
if err != nil {
target = nil
return
}
target.Name = pushResult.Tag
target.Hashes = data.Hashes{string(dgst.Algorithm()): h}
target.Length = int64(pushResult.Size)
}
}
}
var tag string
switch x := ref.(type) {
case reference.Canonical:
return errors.New("cannot push a digest reference")
case reference.NamedTagged:
tag = x.Tag()
default:
// We want trust signatures to always take an explicit tag,
// otherwise it will act as an untrusted push.
if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil {
return err
}
fmt.Fprintln(streams.Out(), "No tag specified, skipping trust metadata push")
return nil
}
if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), handleTarget); err != nil {
return err
}
if cnt > 1 {
return errors.Errorf("internal error: only one call to handleTarget expected")
}
if target == nil {
fmt.Fprintln(streams.Out(), "No targets found, please provide a specific tag in order to sign it")
return nil
}
fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata")
repo, err := trust.GetNotaryRepository(streams, repoInfo, authConfig, "push", "pull")
if err != nil {
fmt.Fprintf(streams.Out(), "Error establishing connection to notary repository: %s\n", err)
return err
}
// get the latest repository metadata so we can figure out which roles to sign
err = repo.Update(false)
switch err.(type) {
case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist:
keys := repo.CryptoService.ListKeys(data.CanonicalRootRole)
var rootKeyID string
// always select the first root key
if len(keys) > 0 {
sort.Strings(keys)
rootKeyID = keys[0]
} else {
rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey)
if err != nil {
return err
}
rootKeyID = rootPublicKey.ID()
}
// Initialize the notary repository with a remotely managed snapshot key
if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil {
return trust.NotaryError(repoInfo.Name.Name(), err)
}
fmt.Fprintf(streams.Out(), "Finished initializing %q\n", repoInfo.Name.Name())
err = repo.AddTarget(target, data.CanonicalTargetsRole)
case nil:
// already initialized and we have successfully downloaded the latest metadata
err = addTargetToAllSignableRoles(repo, target)
default:
return trust.NotaryError(repoInfo.Name.Name(), err)
}
if err == nil {
err = repo.Publish()
}
if err != nil {
fmt.Fprintf(streams.Out(), "Failed to sign %q:%s - %s\n", repoInfo.Name.Name(), tag, err.Error())
return trust.NotaryError(repoInfo.Name.Name(), err)
}
fmt.Fprintf(streams.Out(), "Successfully signed %q:%s\n", repoInfo.Name.Name(), tag)
return nil
}
// Attempt to add the image target to all the top level delegation roles we can
// (based on whether we have the signing key and whether the role's path allows
// us to).
// If there are no delegation roles, we add to the targets role.
func addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error {
var signableRoles []string
// translate the full key names, which includes the GUN, into just the key IDs
allCanonicalKeyIDs := make(map[string]struct{})
for fullKeyID := range repo.CryptoService.ListAllKeys() {
allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{}
}
allDelegationRoles, err := repo.GetDelegationRoles()
if err != nil {
return err
}
// if there are no delegation roles, then just try to sign it into the targets role
if len(allDelegationRoles) == 0 {
return repo.AddTarget(target, data.CanonicalTargetsRole)
}
// there are delegation roles, find every delegation role we have a key for, and
// attempt to sign into into all those roles.
for _, delegationRole := range allDelegationRoles {
// We do not support signing any delegation role that isn't a direct child of the targets role.
// Also don't bother checking the keys if we can't add the target
// to this role due to path restrictions
if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) {
continue
}
for _, canonicalKeyID := range delegationRole.KeyIDs {
if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok {
signableRoles = append(signableRoles, delegationRole.Name)
break
}
}
}
if len(signableRoles) == 0 {
return errors.Errorf("no valid signing keys for delegation roles")
}
return repo.AddTarget(target, signableRoles...)
}
// imagePushPrivileged push the image
func imagePushPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref reference.Reference, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) {
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
if err != nil {
return nil, err
}
options := types.ImagePushOptions{
RegistryAuth: encodedAuth,
PrivilegeFunc: requestPrivilege,
}
return cli.Client().ImagePush(ctx, reference.FamiliarString(ref), options)
}
// trustedPull handles content trust pulling of an image
func trustedPull(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
var refs []target
notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull")
if err != nil {
fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err)
return err
}
if tagged, isTagged := ref.(reference.NamedTagged); !isTagged {
// List all targets
targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole)
if err != nil {
return trust.NotaryError(ref.Name(), err)
}
for _, tgt := range targets {
t, err := convertTarget(tgt.Target)
if err != nil {
fmt.Fprintf(cli.Out(), "Skipping target for %q\n", reference.FamiliarName(ref))
continue
}
// Only list tags in the top level targets role or the releases delegation role - ignore
// all other delegation roles
if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole {
continue
}
refs = append(refs, t)
}
if len(refs) == 0 {
return trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name()))
}
} else {
t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
if err != nil {
return trust.NotaryError(ref.Name(), err)
}
// Only get the tag if it's in the top level targets role or the releases delegation role
// ignore it if it's in any other delegation roles
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
return trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag()))
}
logrus.Debugf("retrieving target for %s role\n", t.Role)
r, err := convertTarget(t.Target)
if err != nil {
return err
}
refs = append(refs, r)
}
for i, r := range refs {
displayTag := r.name
if displayTag != "" {
displayTag = ":" + displayTag
}
fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), reference.FamiliarName(ref), displayTag, r.digest)
trustedRef, err := reference.WithDigest(reference.TrimNamed(ref), r.digest)
if err != nil {
return err
}
if err := imagePullPrivileged(ctx, cli, authConfig, reference.FamiliarString(trustedRef), requestPrivilege, false); err != nil {
return err
}
tagged, err := reference.WithTag(reference.TrimNamed(ref), r.name)
if err != nil {
return err
}
if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil {
return err
}
}
return nil
}
// imagePullPrivileged pulls the image and displays it to the output
func imagePullPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error {
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
if err != nil {
return err
}
options := types.ImagePullOptions{
RegistryAuth: encodedAuth,
PrivilegeFunc: requestPrivilege,
All: all,
}
responseBody, err := cli.Client().ImagePull(ctx, ref, options)
if err != nil {
return err
}
defer responseBody.Close()
return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil)
}
// TrustedReference returns the canonical trusted reference for an image reference
func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) {
var (
repoInfo *registry.RepositoryInfo
err error
)
if rs != nil {
repoInfo, err = rs.ResolveRepository(ref)
} else {
repoInfo, err = registry.ParseRepositoryInfo(ref)
}
if err != nil {
return nil, err
}
// Resolve the Auth config relevant for this server
authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index)
notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull")
if err != nil {
fmt.Fprintf(cli.Out(), "Error establishing connection to trust repository: %s\n", err)
return nil, err
}
t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
if err != nil {
return nil, trust.NotaryError(repoInfo.Name.Name(), err)
}
// Only list tags in the top level targets role or the releases delegation role - ignore
// all other delegation roles
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", ref.Tag()))
}
r, err := convertTarget(t.Target)
if err != nil {
return nil, err
}
return reference.WithDigest(reference.TrimNamed(ref), r.digest)
}
func convertTarget(t client.Target) (target, error) {
h, ok := t.Hashes["sha256"]
if !ok {
return target{}, errors.New("no valid hash, expecting sha256")
}
return target{
name: t.Name,
digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)),
size: t.Length,
}, nil
}
// TagTrusted tags a trusted ref
// nolint: interfacer
func TagTrusted(ctx context.Context, cli command.Cli, trustedRef reference.Canonical, ref reference.NamedTagged) error {
// Use familiar references when interacting with client and output
familiarRef := reference.FamiliarString(ref)
trustedFamiliarRef := reference.FamiliarString(trustedRef)
fmt.Fprintf(cli.Out(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef)
return cli.Client().ImageTag(ctx, trustedFamiliarRef, familiarRef)
}

56
vendor/github.com/docker/cli/cli/command/in.go generated vendored Normal file
View file

@ -0,0 +1,56 @@
package command
import (
"errors"
"io"
"os"
"runtime"
"github.com/docker/docker/pkg/term"
)
// InStream is an input stream used by the DockerCli to read user input
type InStream struct {
CommonStream
in io.ReadCloser
}
func (i *InStream) Read(p []byte) (int, error) {
return i.in.Read(p)
}
// Close implements the Closer interface
func (i *InStream) Close() error {
return i.in.Close()
}
// SetRawTerminal sets raw mode on the input terminal
func (i *InStream) SetRawTerminal() (err error) {
if os.Getenv("NORAW") != "" || !i.CommonStream.isTerminal {
return nil
}
i.CommonStream.state, err = term.SetRawTerminal(i.CommonStream.fd)
return err
}
// CheckTty checks if we are trying to attach to a container tty
// from a non-tty client input stream, and if so, returns an error.
func (i *InStream) CheckTty(attachStdin, ttyMode bool) error {
// In order to attach to a container tty, input stream for the client must
// be a tty itself: redirecting or piping the client standard input is
// incompatible with `docker run -t`, `docker exec -t` or `docker attach`.
if ttyMode && attachStdin && !i.isTerminal {
eText := "the input device is not a TTY"
if runtime.GOOS == "windows" {
return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'")
}
return errors.New(eText)
}
return nil
}
// NewInStream returns a new InStream object from a ReadCloser
func NewInStream(in io.ReadCloser) *InStream {
fd, isTerminal := term.GetFdInfo(in)
return &InStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, in: in}
}

50
vendor/github.com/docker/cli/cli/command/out.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
package command
import (
"io"
"os"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/term"
)
// OutStream is an output stream used by the DockerCli to write normal program
// output.
type OutStream struct {
CommonStream
out io.Writer
}
func (o *OutStream) Write(p []byte) (int, error) {
return o.out.Write(p)
}
// SetRawTerminal sets raw mode on the input terminal
func (o *OutStream) SetRawTerminal() (err error) {
if os.Getenv("NORAW") != "" || !o.CommonStream.isTerminal {
return nil
}
o.CommonStream.state, err = term.SetRawTerminalOutput(o.CommonStream.fd)
return err
}
// GetTtySize returns the height and width in characters of the tty
func (o *OutStream) GetTtySize() (uint, uint) {
if !o.isTerminal {
return 0, 0
}
ws, err := term.GetWinsize(o.fd)
if err != nil {
logrus.Debugf("Error getting size: %s", err)
if ws == nil {
return 0, 0
}
}
return uint(ws.Height), uint(ws.Width)
}
// NewOutStream returns a new OutStream object from a Writer
func NewOutStream(out io.Writer) *OutStream {
fd, isTerminal := term.GetFdInfo(out)
return &OutStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, out: out}
}

189
vendor/github.com/docker/cli/cli/command/registry.go generated vendored Normal file
View file

@ -0,0 +1,189 @@
package command
import (
"bufio"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"runtime"
"strings"
"golang.org/x/net/context"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/registry"
"github.com/pkg/errors"
)
// ElectAuthServer returns the default registry to use (by asking the daemon)
func ElectAuthServer(ctx context.Context, cli Cli) string {
// The daemon `/info` endpoint informs us of the default registry being
// used. This is essential in cross-platforms environment, where for
// example a Linux client might be interacting with a Windows daemon, hence
// the default registry URL might be Windows specific.
serverAddress := registry.IndexServer
if info, err := cli.Client().Info(ctx); err != nil {
fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress)
} else if info.IndexServerAddress == "" {
fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress)
} else {
serverAddress = info.IndexServerAddress
}
return serverAddress
}
// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload
func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(buf), nil
}
// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info
// for the given command.
func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc {
return func() (string, error) {
fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName)
indexServer := registry.GetAuthConfigKey(index)
isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli)
authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry)
if err != nil {
return "", err
}
return EncodeAuthToBase64(authConfig)
}
}
// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the
// default index, it uses the default index name for the daemon's platform,
// not the client's platform.
func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig {
configKey := index.Name
if index.Official {
configKey = ElectAuthServer(ctx, cli)
}
a, _ := cli.CredentialsStore(configKey).Get(configKey)
return a
}
// ConfigureAuth returns an AuthConfig from the specified user, password and server.
func ConfigureAuth(cli Cli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) {
// On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210
if runtime.GOOS == "windows" {
cli.SetIn(NewInStream(os.Stdin))
}
if !isDefaultRegistry {
serverAddress = registry.ConvertToHostname(serverAddress)
}
authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress)
if err != nil {
return authconfig, err
}
// Some links documenting this:
// - https://code.google.com/archive/p/mintty/issues/56
// - https://github.com/docker/docker/issues/15272
// - https://mintty.github.io/ (compatibility)
// Linux will hit this if you attempt `cat | docker login`, and Windows
// will hit this if you attempt docker login from mintty where stdin
// is a pipe, not a character based console.
if flPassword == "" && !cli.In().IsTerminal() {
return authconfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device")
}
authconfig.Username = strings.TrimSpace(authconfig.Username)
if flUser = strings.TrimSpace(flUser); flUser == "" {
if isDefaultRegistry {
// if this is a default registry (docker hub), then display the following message.
fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.")
}
promptWithDefault(cli.Out(), "Username", authconfig.Username)
flUser = readInput(cli.In(), cli.Out())
flUser = strings.TrimSpace(flUser)
if flUser == "" {
flUser = authconfig.Username
}
}
if flUser == "" {
return authconfig, errors.Errorf("Error: Non-null Username Required")
}
if flPassword == "" {
oldState, err := term.SaveState(cli.In().FD())
if err != nil {
return authconfig, err
}
fmt.Fprintf(cli.Out(), "Password: ")
term.DisableEcho(cli.In().FD(), oldState)
flPassword = readInput(cli.In(), cli.Out())
fmt.Fprint(cli.Out(), "\n")
term.RestoreTerminal(cli.In().FD(), oldState)
if flPassword == "" {
return authconfig, errors.Errorf("Error: Password Required")
}
}
authconfig.Username = flUser
authconfig.Password = flPassword
authconfig.ServerAddress = serverAddress
authconfig.IdentityToken = ""
return authconfig, nil
}
func readInput(in io.Reader, out io.Writer) string {
reader := bufio.NewReader(in)
line, _, err := reader.ReadLine()
if err != nil {
fmt.Fprintln(out, err.Error())
os.Exit(1)
}
return string(line)
}
func promptWithDefault(out io.Writer, prompt string, configDefault string) {
if configDefault == "" {
fmt.Fprintf(out, "%s: ", prompt)
} else {
fmt.Fprintf(out, "%s (%s): ", prompt, configDefault)
}
}
// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image
func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) {
// Retrieve encoded auth token from the image reference
authConfig, err := resolveAuthConfigFromImage(ctx, cli, image)
if err != nil {
return "", err
}
encodedAuth, err := EncodeAuthToBase64(authConfig)
if err != nil {
return "", err
}
return encodedAuth, nil
}
// resolveAuthConfigFromImage retrieves that AuthConfig using the image string
func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) {
registryRef, err := reference.ParseNormalizedNamed(image)
if err != nil {
return types.AuthConfig{}, err
}
repoInfo, err := registry.ParseRepositoryInfo(registryRef)
if err != nil {
return types.AuthConfig{}, err
}
return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil
}

34
vendor/github.com/docker/cli/cli/command/stream.go generated vendored Normal file
View file

@ -0,0 +1,34 @@
package command
import (
"github.com/docker/docker/pkg/term"
)
// CommonStream is an input stream used by the DockerCli to read user input
type CommonStream struct {
fd uintptr
isTerminal bool
state *term.State
}
// FD returns the file descriptor number for this stream
func (s *CommonStream) FD() uintptr {
return s.fd
}
// IsTerminal returns true if this stream is connected to a terminal
func (s *CommonStream) IsTerminal() bool {
return s.isTerminal
}
// RestoreTerminal restores normal mode to the terminal
func (s *CommonStream) RestoreTerminal() {
if s.state != nil {
term.RestoreTerminal(s.fd, s.state)
}
}
// SetIsTerminal sets the boolean used for isTerminal
func (s *CommonStream) SetIsTerminal(isTerminal bool) {
s.isTerminal = isTerminal
}

43
vendor/github.com/docker/cli/cli/command/trust.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
package command
import (
"os"
"strconv"
"github.com/spf13/pflag"
)
var (
// TODO: make this not global
untrusted bool
)
// AddTrustVerificationFlags adds content trust flags to the provided flagset
func AddTrustVerificationFlags(fs *pflag.FlagSet) {
trusted := getDefaultTrustState()
fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image verification")
}
// AddTrustSigningFlags adds "signing" flags to the provided flagset
func AddTrustSigningFlags(fs *pflag.FlagSet) {
trusted := getDefaultTrustState()
fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image signing")
}
// getDefaultTrustState returns true if content trust is enabled through the $DOCKER_CONTENT_TRUST environment variable.
func getDefaultTrustState() bool {
var trusted bool
if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" {
if t, err := strconv.ParseBool(e); t || err != nil {
// treat any other value as true
trusted = true
}
}
return trusted
}
// IsTrusted returns true if content trust is enabled, either through the $DOCKER_CONTENT_TRUST environment variable,
// or through `--disabled-content-trust=false` on a command.
func IsTrusted() bool {
return !untrusted
}

119
vendor/github.com/docker/cli/cli/command/utils.go generated vendored Normal file
View file

@ -0,0 +1,119 @@
package command
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/system"
)
// CopyToFile writes the content of the reader to the specified file
func CopyToFile(outfile string, r io.Reader) error {
// We use sequential file access here to avoid depleting the standby list
// on Windows. On Linux, this is a call directly to ioutil.TempFile
tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_")
if err != nil {
return err
}
tmpPath := tmpFile.Name()
_, err = io.Copy(tmpFile, r)
tmpFile.Close()
if err != nil {
os.Remove(tmpPath)
return err
}
if err = os.Rename(tmpPath, outfile); err != nil {
os.Remove(tmpPath)
return err
}
return nil
}
// capitalizeFirst capitalizes the first character of string
func capitalizeFirst(s string) string {
switch l := len(s); l {
case 0:
return s
case 1:
return strings.ToLower(s)
default:
return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:])
}
}
// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter.
func PrettyPrint(i interface{}) string {
switch t := i.(type) {
case nil:
return "None"
case string:
return capitalizeFirst(t)
default:
return capitalizeFirst(fmt.Sprintf("%s", t))
}
}
// PromptForConfirmation requests and checks confirmation from user.
// This will display the provided message followed by ' [y/N] '. If
// the user input 'y' or 'Y' it returns true other false. If no
// message is provided "Are you sure you want to proceed? [y/N] "
// will be used instead.
func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool {
if message == "" {
message = "Are you sure you want to proceed?"
}
message += " [y/N] "
fmt.Fprintf(outs, message)
// On Windows, force the use of the regular OS stdin stream.
if runtime.GOOS == "windows" {
ins = NewInStream(os.Stdin)
}
reader := bufio.NewReader(ins)
answer, _, _ := reader.ReadLine()
return strings.ToLower(string(answer)) == "y"
}
// PruneFilters returns consolidated prune filters obtained from config.json and cli
func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args {
if dockerCli.ConfigFile() == nil {
return pruneFilters
}
for _, f := range dockerCli.ConfigFile().PruneFilters {
parts := strings.SplitN(f, "=", 2)
if len(parts) != 2 {
continue
}
if parts[0] == "label" {
// CLI label filter supersede config.json.
// If CLI label filter conflict with config.json,
// skip adding label! filter in config.json.
if pruneFilters.Include("label!") && pruneFilters.ExactMatch("label!", parts[1]) {
continue
}
} else if parts[0] == "label!" {
// CLI label! filter supersede config.json.
// If CLI label! filter conflict with config.json,
// skip adding label filter in config.json.
if pruneFilters.Include("label") && pruneFilters.ExactMatch("label", parts[1]) {
continue
}
}
pruneFilters.Add(parts[0], parts[1])
}
return pruneFilters
}

33
vendor/github.com/docker/cli/cli/error.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
package cli
import (
"fmt"
"strings"
)
// Errors is a list of errors.
// Useful in a loop if you don't want to return the error right away and you want to display after the loop,
// all the errors that happened during the loop.
type Errors []error
func (errList Errors) Error() string {
if len(errList) < 1 {
return ""
}
out := make([]string, len(errList))
for i := range errList {
out[i] = errList[i].Error()
}
return strings.Join(out, ", ")
}
// StatusError reports an unsuccessful exit by a command.
type StatusError struct {
Status string
StatusCode int
}
func (e StatusError) Error() string {
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
}

96
vendor/github.com/docker/cli/cli/required.go generated vendored Normal file
View file

@ -0,0 +1,96 @@
package cli
import (
"strings"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// NoArgs validates args and returns an error if there are any args
func NoArgs(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return nil
}
if cmd.HasSubCommands() {
return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n"))
}
return errors.Errorf(
"\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
// RequiresMinArgs returns an error if there is not at least min args
func RequiresMinArgs(min int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) >= min {
return nil
}
return errors.Errorf(
"\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
min,
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
}
// RequiresMaxArgs returns an error if there is not at most max args
func RequiresMaxArgs(max int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) <= max {
return nil
}
return errors.Errorf(
"\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
max,
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
}
// RequiresRangeArgs returns an error if there is not at least min args and at most max args
func RequiresRangeArgs(min int, max int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) >= min && len(args) <= max {
return nil
}
return errors.Errorf(
"\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
min,
max,
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
}
// ExactArgs returns an error if there is not the exact number of args
func ExactArgs(number int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) == number {
return nil
}
return errors.Errorf(
"\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
number,
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
}

9
vendor/github.com/docker/cli/cli/version.go generated vendored Normal file
View file

@ -0,0 +1,9 @@
package cli
// Default build-time variable.
// These values are overriding via ldflags
var (
Version = "unknown-version"
GitCommit = "unknown-commit"
BuildTime = "unknown-buildtime"
)

View file

@ -0,0 +1,2 @@
// Package registry provides the main entrypoints for running a registry.
package registry

View file

@ -0,0 +1,356 @@
package registry
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
"rsc.io/letsencrypt"
log "github.com/Sirupsen/logrus"
logstash "github.com/bshuster-repo/logrus-logstash-hook"
"github.com/bugsnag/bugsnag-go"
"github.com/docker/distribution/configuration"
"github.com/docker/distribution/context"
"github.com/docker/distribution/health"
"github.com/docker/distribution/registry/handlers"
"github.com/docker/distribution/registry/listener"
"github.com/docker/distribution/uuid"
"github.com/docker/distribution/version"
gorhandlers "github.com/gorilla/handlers"
"github.com/spf13/cobra"
"github.com/yvasiyarov/gorelic"
)
// ServeCmd is a cobra command for running the registry.
var ServeCmd = &cobra.Command{
Use: "serve <config>",
Short: "`serve` stores and distributes Docker images",
Long: "`serve` stores and distributes Docker images.",
Run: func(cmd *cobra.Command, args []string) {
// setup context
ctx := context.WithVersion(context.Background(), version.Version)
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
cmd.Usage()
os.Exit(1)
}
if config.HTTP.Debug.Addr != "" {
go func(addr string) {
log.Infof("debug server listening %v", addr)
if err := http.ListenAndServe(addr, nil); err != nil {
log.Fatalf("error listening on debug interface: %v", err)
}
}(config.HTTP.Debug.Addr)
}
registry, err := NewRegistry(ctx, config)
if err != nil {
log.Fatalln(err)
}
if err = registry.ListenAndServe(); err != nil {
log.Fatalln(err)
}
},
}
// A Registry represents a complete instance of the registry.
// TODO(aaronl): It might make sense for Registry to become an interface.
type Registry struct {
config *configuration.Configuration
app *handlers.App
server *http.Server
}
// NewRegistry creates a new registry from a context and configuration struct.
func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) {
var err error
ctx, err = configureLogging(ctx, config)
if err != nil {
return nil, fmt.Errorf("error configuring logger: %v", err)
}
// inject a logger into the uuid library. warns us if there is a problem
// with uuid generation under low entropy.
uuid.Loggerf = context.GetLogger(ctx).Warnf
app := handlers.NewApp(ctx, config)
// TODO(aaronl): The global scope of the health checks means NewRegistry
// can only be called once per process.
app.RegisterHealthChecks()
handler := configureReporting(app)
handler = alive("/", handler)
handler = health.Handler(handler)
handler = panicHandler(handler)
if !config.Log.AccessLog.Disabled {
handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler)
}
server := &http.Server{
Handler: handler,
}
return &Registry{
app: app,
config: config,
server: server,
}, nil
}
// ListenAndServe runs the registry's HTTP server.
func (registry *Registry) ListenAndServe() error {
config := registry.config
ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr)
if err != nil {
return err
}
if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
tlsConf := &tls.Config{
ClientAuth: tls.NoClientCert,
NextProtos: nextProtos(config),
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
},
}
if config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
if config.HTTP.TLS.Certificate != "" {
return fmt.Errorf("cannot specify both certificate and Let's Encrypt")
}
var m letsencrypt.Manager
if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil {
return err
}
if !m.Registered() {
if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil {
return err
}
}
tlsConf.GetCertificate = m.GetCertificate
} else {
tlsConf.Certificates = make([]tls.Certificate, 1)
tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key)
if err != nil {
return err
}
}
if len(config.HTTP.TLS.ClientCAs) != 0 {
pool := x509.NewCertPool()
for _, ca := range config.HTTP.TLS.ClientCAs {
caPem, err := ioutil.ReadFile(ca)
if err != nil {
return err
}
if ok := pool.AppendCertsFromPEM(caPem); !ok {
return fmt.Errorf("Could not add CA to pool")
}
}
for _, subj := range pool.Subjects() {
context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj))
}
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
tlsConf.ClientCAs = pool
}
ln = tls.NewListener(ln, tlsConf)
context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr())
} else {
context.GetLogger(registry.app).Infof("listening on %v", ln.Addr())
}
return registry.server.Serve(ln)
}
func configureReporting(app *handlers.App) http.Handler {
var handler http.Handler = app
if app.Config.Reporting.Bugsnag.APIKey != "" {
bugsnagConfig := bugsnag.Configuration{
APIKey: app.Config.Reporting.Bugsnag.APIKey,
// TODO(brianbland): provide the registry version here
// AppVersion: "2.0",
}
if app.Config.Reporting.Bugsnag.ReleaseStage != "" {
bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage
}
if app.Config.Reporting.Bugsnag.Endpoint != "" {
bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint
}
bugsnag.Configure(bugsnagConfig)
handler = bugsnag.Handler(handler)
}
if app.Config.Reporting.NewRelic.LicenseKey != "" {
agent := gorelic.NewAgent()
agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey
if app.Config.Reporting.NewRelic.Name != "" {
agent.NewrelicName = app.Config.Reporting.NewRelic.Name
}
agent.CollectHTTPStat = true
agent.Verbose = app.Config.Reporting.NewRelic.Verbose
agent.Run()
handler = agent.WrapHTTPHandler(handler)
}
return handler
}
// configureLogging prepares the context with a logger using the
// configuration.
func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) {
if config.Log.Level == "" && config.Log.Formatter == "" {
// If no config for logging is set, fallback to deprecated "Loglevel".
log.SetLevel(logLevel(config.Loglevel))
ctx = context.WithLogger(ctx, context.GetLogger(ctx))
return ctx, nil
}
log.SetLevel(logLevel(config.Log.Level))
formatter := config.Log.Formatter
if formatter == "" {
formatter = "text" // default formatter
}
switch formatter {
case "json":
log.SetFormatter(&log.JSONFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "text":
log.SetFormatter(&log.TextFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "logstash":
log.SetFormatter(&logstash.LogstashFormatter{
TimestampFormat: time.RFC3339Nano,
})
default:
// just let the library use default on empty string.
if config.Log.Formatter != "" {
return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter)
}
}
if config.Log.Formatter != "" {
log.Debugf("using %q logging formatter", config.Log.Formatter)
}
if len(config.Log.Fields) > 0 {
// build up the static fields, if present.
var fields []interface{}
for k := range config.Log.Fields {
fields = append(fields, k)
}
ctx = context.WithValues(ctx, config.Log.Fields)
ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...))
}
return ctx, nil
}
func logLevel(level configuration.Loglevel) log.Level {
l, err := log.ParseLevel(string(level))
if err != nil {
l = log.InfoLevel
log.Warnf("error parsing level %q: %v, using %q ", level, err, l)
}
return l
}
// panicHandler add an HTTP handler to web app. The handler recover the happening
// panic. logrus.Panic transmits panic message to pre-config log hooks, which is
// defined in config.yml.
func panicHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
log.Panic(fmt.Sprintf("%v", err))
}
}()
handler.ServeHTTP(w, r)
})
}
// alive simply wraps the handler with a route that always returns an http 200
// response when the path is matched. If the path is not matched, the request
// is passed to the provided handler. There is no guarantee of anything but
// that the server is up. Wrap with other handlers (such as health.Handler)
// for greater affect.
func alive(path string, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == path {
w.Header().Set("Cache-Control", "no-cache")
w.WriteHeader(http.StatusOK)
return
}
handler.ServeHTTP(w, r)
})
}
func resolveConfiguration(args []string) (*configuration.Configuration, error) {
var configurationPath string
if len(args) > 0 {
configurationPath = args[0]
} else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" {
configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH")
}
if configurationPath == "" {
return nil, fmt.Errorf("configuration path unspecified")
}
fp, err := os.Open(configurationPath)
if err != nil {
return nil, err
}
defer fp.Close()
config, err := configuration.Parse(fp)
if err != nil {
return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err)
}
return config, nil
}
func nextProtos(config *configuration.Configuration) []string {
switch config.HTTP.HTTP2.Disabled {
case true:
return []string{"http/1.1"}
default:
return []string{"h2", "http/1.1"}
}
}

84
vendor/github.com/docker/distribution/registry/root.go generated vendored Normal file
View file

@ -0,0 +1,84 @@
package registry
import (
"fmt"
"os"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage"
"github.com/docker/distribution/registry/storage/driver/factory"
"github.com/docker/distribution/version"
"github.com/docker/libtrust"
"github.com/spf13/cobra"
)
var showVersion bool
func init() {
RootCmd.AddCommand(ServeCmd)
RootCmd.AddCommand(GCCmd)
GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs")
RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit")
}
// RootCmd is the main command for the 'registry' binary.
var RootCmd = &cobra.Command{
Use: "registry",
Short: "`registry`",
Long: "`registry`",
Run: func(cmd *cobra.Command, args []string) {
if showVersion {
version.PrintVersion()
return
}
cmd.Usage()
},
}
var dryRun bool
// GCCmd is the cobra command that corresponds to the garbage-collect subcommand
var GCCmd = &cobra.Command{
Use: "garbage-collect <config>",
Short: "`garbage-collect` deletes layers not referenced by any manifests",
Long: "`garbage-collect` deletes layers not referenced by any manifests",
Run: func(cmd *cobra.Command, args []string) {
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
cmd.Usage()
os.Exit(1)
}
driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters())
if err != nil {
fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err)
os.Exit(1)
}
ctx := context.Background()
ctx, err = configureLogging(ctx, config)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err)
os.Exit(1)
}
k, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
fmt.Fprint(os.Stderr, err)
os.Exit(1)
}
registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k))
if err != nil {
fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err)
os.Exit(1)
}
err = storage.MarkAndSweep(ctx, driver, registry, dryRun)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err)
os.Exit(1)
}
},
}

View file

@ -0,0 +1,60 @@
package storage
import (
"expvar"
"sync/atomic"
"github.com/docker/distribution/registry/storage/cache"
)
type blobStatCollector struct {
metrics cache.Metrics
}
func (bsc *blobStatCollector) Hit() {
atomic.AddUint64(&bsc.metrics.Requests, 1)
atomic.AddUint64(&bsc.metrics.Hits, 1)
}
func (bsc *blobStatCollector) Miss() {
atomic.AddUint64(&bsc.metrics.Requests, 1)
atomic.AddUint64(&bsc.metrics.Misses, 1)
}
func (bsc *blobStatCollector) Metrics() cache.Metrics {
return bsc.metrics
}
// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor
// cache requests. Note this is kept globally and made available via expvar.
// For more detailed metrics, its recommend to instrument a particular cache
// implementation.
var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{}
func init() {
registry := expvar.Get("registry")
if registry == nil {
registry = expvar.NewMap("registry")
}
cache := registry.(*expvar.Map).Get("cache")
if cache == nil {
cache = &expvar.Map{}
cache.(*expvar.Map).Init()
registry.(*expvar.Map).Set("cache", cache)
}
storage := cache.(*expvar.Map).Get("storage")
if storage == nil {
storage = &expvar.Map{}
storage.(*expvar.Map).Init()
cache.(*expvar.Map).Set("storage", storage)
}
storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} {
// no need for synchronous access: the increments are atomic and
// during reading, we don't care if the data is up to date. The
// numbers will always *eventually* be reported correctly.
return blobStatterCacheMetrics
}))
}

View file

@ -0,0 +1,78 @@
package storage
import (
"fmt"
"net/http"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
"github.com/opencontainers/go-digest"
)
// TODO(stevvooe): This should configurable in the future.
const blobCacheControlMaxAge = 365 * 24 * time.Hour
// blobServer simply serves blobs from a driver instance using a path function
// to identify paths and a descriptor service to fill in metadata.
type blobServer struct {
driver driver.StorageDriver
statter distribution.BlobStatter
pathFn func(dgst digest.Digest) (string, error)
redirect bool // allows disabling URLFor redirects
}
func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
desc, err := bs.statter.Stat(ctx, dgst)
if err != nil {
return err
}
path, err := bs.pathFn(desc.Digest)
if err != nil {
return err
}
if bs.redirect {
redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method})
switch err.(type) {
case nil:
// Redirect to storage URL.
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
return err
case driver.ErrUnsupportedMethod:
// Fallback to serving the content directly.
default:
// Some unexpected error.
return err
}
}
br, err := newFileReader(ctx, bs.driver, path, desc.Size)
if err != nil {
return err
}
defer br.Close()
w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent
w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds()))
if w.Header().Get("Docker-Content-Digest") == "" {
w.Header().Set("Docker-Content-Digest", desc.Digest.String())
}
if w.Header().Get("Content-Type") == "" {
// Set the content type if not already set.
w.Header().Set("Content-Type", desc.MediaType)
}
if w.Header().Get("Content-Length") == "" {
// Set the content length if not already set.
w.Header().Set("Content-Length", fmt.Sprint(desc.Size))
}
http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br)
return nil
}

View file

@ -0,0 +1,223 @@
package storage
import (
"path"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
"github.com/opencontainers/go-digest"
)
// blobStore implements the read side of the blob store interface over a
// driver without enforcing per-repository membership. This object is
// intentionally a leaky abstraction, providing utility methods that support
// creating and traversing backend links.
type blobStore struct {
driver driver.StorageDriver
statter distribution.BlobStatter
}
var _ distribution.BlobProvider = &blobStore{}
// Get implements the BlobReadService.Get call.
func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
bp, err := bs.path(dgst)
if err != nil {
return nil, err
}
p, err := bs.driver.GetContent(ctx, bp)
if err != nil {
switch err.(type) {
case driver.PathNotFoundError:
return nil, distribution.ErrBlobUnknown
}
return nil, err
}
return p, err
}
func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
desc, err := bs.statter.Stat(ctx, dgst)
if err != nil {
return nil, err
}
path, err := bs.path(desc.Digest)
if err != nil {
return nil, err
}
return newFileReader(ctx, bs.driver, path, desc.Size)
}
// Put stores the content p in the blob store, calculating the digest. If the
// content is already present, only the digest will be returned. This should
// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations
func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
dgst := digest.FromBytes(p)
desc, err := bs.statter.Stat(ctx, dgst)
if err == nil {
// content already present
return desc, nil
} else if err != distribution.ErrBlobUnknown {
context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err)
// real error, return it
return distribution.Descriptor{}, err
}
bp, err := bs.path(dgst)
if err != nil {
return distribution.Descriptor{}, err
}
// TODO(stevvooe): Write out mediatype here, as well.
return distribution.Descriptor{
Size: int64(len(p)),
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}, bs.driver.PutContent(ctx, bp, p)
}
func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error {
specPath, err := pathFor(blobsPathSpec{})
if err != nil {
return err
}
err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error {
// skip directories
if fileInfo.IsDir() {
return nil
}
currentPath := fileInfo.Path()
// we only want to parse paths that end with /data
_, fileName := path.Split(currentPath)
if fileName != "data" {
return nil
}
digest, err := digestFromPath(currentPath)
if err != nil {
return err
}
return ingester(digest)
})
return err
}
// path returns the canonical path for the blob identified by digest. The blob
// may or may not exist.
func (bs *blobStore) path(dgst digest.Digest) (string, error) {
bp, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return "", err
}
return bp, nil
}
// link links the path to the provided digest by writing the digest into the
// target file. Caller must ensure that the blob actually exists.
func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error {
// The contents of the "link" file are the exact string contents of the
// digest, which is specified in that package.
return bs.driver.PutContent(ctx, path, []byte(dgst))
}
// readlink returns the linked digest at path.
func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) {
content, err := bs.driver.GetContent(ctx, path)
if err != nil {
return "", err
}
linked, err := digest.Parse(string(content))
if err != nil {
return "", err
}
return linked, nil
}
// resolve reads the digest link at path and returns the blob store path.
func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) {
dgst, err := bs.readlink(ctx, path)
if err != nil {
return "", err
}
return bs.path(dgst)
}
type blobStatter struct {
driver driver.StorageDriver
}
var _ distribution.BlobDescriptorService = &blobStatter{}
// Stat implements BlobStatter.Stat by returning the descriptor for the blob
// in the main blob store. If this method returns successfully, there is
// strong guarantee that the blob exists and is available.
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
path, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return distribution.Descriptor{}, err
}
fi, err := bs.driver.Stat(ctx, path)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return distribution.Descriptor{}, distribution.ErrBlobUnknown
default:
return distribution.Descriptor{}, err
}
}
if fi.IsDir() {
// NOTE(stevvooe): This represents a corruption situation. Somehow, we
// calculated a blob path and then detected a directory. We log the
// error and then error on the side of not knowing about the blob.
context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path)
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
// TODO(stevvooe): Add method to resolve the mediatype. We can store and
// cache a "global" media type for the blob, even if a specific repo has a
// mediatype that overrides the main one.
return distribution.Descriptor{
Size: fi.Size(),
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}, nil
}
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
return distribution.ErrUnsupported
}
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
return distribution.ErrUnsupported
}

View file

@ -0,0 +1,400 @@
package storage
import (
"errors"
"fmt"
"io"
"path"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/opencontainers/go-digest"
)
var (
errResumableDigestNotAvailable = errors.New("resumable digest not available")
)
const (
// digestSha256Empty is the canonical sha256 digest of empty data
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
// blobWriter is used to control the various aspects of resumable
// blob upload.
type blobWriter struct {
ctx context.Context
blobStore *linkedBlobStore
id string
startedAt time.Time
digester digest.Digester
written int64 // track the contiguous write
fileWriter storagedriver.FileWriter
driver storagedriver.StorageDriver
path string
resumableDigestEnabled bool
committed bool
}
var _ distribution.BlobWriter = &blobWriter{}
// ID returns the identifier for this upload.
func (bw *blobWriter) ID() string {
return bw.id
}
func (bw *blobWriter) StartedAt() time.Time {
return bw.startedAt
}
// Commit marks the upload as completed, returning a valid descriptor. The
// final size and digest are checked against the first descriptor provided.
func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
context.GetLogger(ctx).Debug("(*blobWriter).Commit")
if err := bw.fileWriter.Commit(); err != nil {
return distribution.Descriptor{}, err
}
bw.Close()
desc.Size = bw.Size()
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
return distribution.Descriptor{}, err
}
if err := bw.moveBlob(ctx, canonical); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil {
return distribution.Descriptor{}, err
}
if err := bw.removeResources(ctx); err != nil {
return distribution.Descriptor{}, err
}
err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical)
if err != nil {
return distribution.Descriptor{}, err
}
bw.committed = true
return canonical, nil
}
// Cancel the blob upload process, releasing any resources associated with
// the writer and canceling the operation.
func (bw *blobWriter) Cancel(ctx context.Context) error {
context.GetLogger(ctx).Debug("(*blobWriter).Cancel")
if err := bw.fileWriter.Cancel(); err != nil {
return err
}
if err := bw.Close(); err != nil {
context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err)
}
if err := bw.removeResources(ctx); err != nil {
return err
}
return nil
}
func (bw *blobWriter) Size() int64 {
return bw.fileWriter.Size()
}
func (bw *blobWriter) Write(p []byte) (int, error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p)
bw.written += int64(n)
return n, err
}
func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r)
bw.written += nn
return nn, err
}
func (bw *blobWriter) Close() error {
if bw.committed {
return errors.New("blobwriter close after commit")
}
if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return err
}
return bw.fileWriter.Close()
}
// validateBlob checks the data against the digest, returning an error if it
// does not match. The canonical descriptor is returned.
func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
var (
verified, fullHash bool
canonical digest.Digest
)
if desc.Digest == "" {
// if no descriptors are provided, we have nothing to validate
// against. We don't really want to support this for the registry.
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Reason: fmt.Errorf("cannot validate against empty digest"),
}
}
var size int64
// Stat the on disk file
if fi, err := bw.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// NOTE(stevvooe): We really don't care if the file is
// not actually present for the reader. We now assume
// that the desc length is zero.
desc.Size = 0
default:
// Any other error we want propagated up the stack.
return distribution.Descriptor{}, err
}
} else {
if fi.IsDir() {
return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
}
size = fi.Size()
}
if desc.Size > 0 {
if desc.Size != size {
return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
}
} else {
// if provided 0 or negative length, we can assume caller doesn't know or
// care about length.
desc.Size = size
}
// TODO(stevvooe): This section is very meandering. Need to be broken down
// to be a lot more clear.
if err := bw.resumeDigest(ctx); err == nil {
canonical = bw.digester.Digest()
if canonical.Algorithm() == desc.Digest.Algorithm() {
// Common case: client and server prefer the same canonical digest
// algorithm - currently SHA256.
verified = desc.Digest == canonical
} else {
// The client wants to use a different digest algorithm. They'll just
// have to be patient and wait for us to download and re-hash the
// uploaded content using that digest algorithm.
fullHash = true
}
} else if err == errResumableDigestNotAvailable {
// Not using resumable digests, so we need to hash the entire layer.
fullHash = true
} else {
return distribution.Descriptor{}, err
}
if fullHash {
// a fantastic optimization: if the the written data and the size are
// the same, we don't need to read the data from the backend. This is
// because we've written the entire file in the lifecycle of the
// current instance.
if bw.written == size && digest.Canonical == desc.Digest.Algorithm() {
canonical = bw.digester.Digest()
verified = desc.Digest == canonical
}
// If the check based on size fails, we fall back to the slowest of
// paths. We may be able to make the size-based check a stronger
// guarantee, so this may be defensive.
if !verified {
digester := digest.Canonical.Digester()
verifier := desc.Digest.Verifier()
// Read the file from the backend driver and validate it.
fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size)
if err != nil {
return distribution.Descriptor{}, err
}
defer fr.Close()
tr := io.TeeReader(fr, digester.Hash())
if _, err := io.Copy(verifier, tr); err != nil {
return distribution.Descriptor{}, err
}
canonical = digester.Digest()
verified = verifier.Verified()
}
}
if !verified {
context.GetLoggerWithFields(ctx,
map[interface{}]interface{}{
"canonical": canonical,
"provided": desc.Digest,
}, "canonical", "provided").
Errorf("canonical digest does match provided digest")
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
Digest: desc.Digest,
Reason: fmt.Errorf("content does not match digest"),
}
}
// update desc with canonical hash
desc.Digest = canonical
if desc.MediaType == "" {
desc.MediaType = "application/octet-stream"
}
return desc, nil
}
// moveBlob moves the data into its final, hash-qualified destination,
// identified by dgst. The layer should be validated before commencing the
// move.
func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error {
blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest,
})
if err != nil {
return err
}
// Check for existence
if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // ensure that it doesn't exist.
default:
return err
}
} else {
// If the path exists, we can assume that the content has already
// been uploaded, since the blob storage is content-addressable.
// While it may be corrupted, detection of such corruption belongs
// elsewhere.
return nil
}
// If no data was received, we may not actually have a file on disk. Check
// the size here and write a zero-length file to blobPath if this is the
// case. For the most part, this should only ever happen with zero-length
// blobs.
if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// HACK(stevvooe): This is slightly dangerous: if we verify above,
// get a hash, then the underlying file is deleted, we risk moving
// a zero-length blob into a nonzero-length blob location. To
// prevent this horrid thing, we employ the hack of only allowing
// to this happen for the digest of an empty blob.
if desc.Digest == digestSha256Empty {
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
}
// We let this fail during the move below.
logrus.
WithField("upload.id", bw.ID()).
WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest")
default:
return err // unrelated error
}
}
// TODO(stevvooe): We should also write the mediatype when executing this move.
return bw.blobStore.driver.Move(ctx, bw.path, blobPath)
}
// removeResources should clean up all resources associated with the upload
// instance. An error will be returned if the clean up cannot proceed. If the
// resources are already not present, no error will be returned.
func (bw *blobWriter) removeResources(ctx context.Context) error {
dataPath, err := pathFor(uploadDataPathSpec{
name: bw.blobStore.repository.Named().Name(),
id: bw.id,
})
if err != nil {
return err
}
// Resolve and delete the containing directory, which should include any
// upload related files.
dirPath := path.Dir(dataPath)
if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // already gone!
default:
// This should be uncommon enough such that returning an error
// should be okay. At this point, the upload should be mostly
// complete, but perhaps the backend became unaccessible.
context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err)
return err
}
}
return nil
}
func (bw *blobWriter) Reader() (io.ReadCloser, error) {
// todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4
try := 1
for try <= 5 {
_, err := bw.driver.Stat(bw.ctx, bw.path)
if err == nil {
break
}
switch err.(type) {
case storagedriver.PathNotFoundError:
context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try)
time.Sleep(1 * time.Second)
try++
default:
return nil, err
}
}
readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0)
if err != nil {
return nil, err
}
return readCloser, nil
}

View file

@ -0,0 +1,17 @@
// +build noresumabledigest
package storage
import (
"github.com/docker/distribution/context"
)
// resumeHashAt is a noop when resumable digest support is disabled.
func (bw *blobWriter) resumeDigest(ctx context.Context) error {
return errResumableDigestNotAvailable
}
// storeHashState is a noop when resumable digest support is disabled.
func (bw *blobWriter) storeHashState(ctx context.Context) error {
return errResumableDigestNotAvailable
}

View file

@ -0,0 +1,145 @@
// +build !noresumabledigest
package storage
import (
"fmt"
"path"
"strconv"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/stevvooe/resumable"
// register resumable hashes with import
_ "github.com/stevvooe/resumable/sha256"
_ "github.com/stevvooe/resumable/sha512"
)
// resumeDigest attempts to restore the state of the internal hash function
// by loading the most recent saved hash state equal to the current size of the blob.
func (bw *blobWriter) resumeDigest(ctx context.Context) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
h, ok := bw.digester.Hash().(resumable.Hash)
if !ok {
return errResumableDigestNotAvailable
}
offset := bw.fileWriter.Size()
if offset == int64(h.Len()) {
// State of digester is already at the requested offset.
return nil
}
// List hash states from storage backend.
var hashStateMatch hashStateEntry
hashStates, err := bw.getStoredHashStates(ctx)
if err != nil {
return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err)
}
// Find the highest stored hashState with offset equal to
// the requested offset.
for _, hashState := range hashStates {
if hashState.offset == offset {
hashStateMatch = hashState
break // Found an exact offset match.
}
}
if hashStateMatch.offset == 0 {
// No need to load any state, just reset the hasher.
h.Reset()
} else {
storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path)
if err != nil {
return err
}
if err = h.Restore(storedState); err != nil {
return err
}
}
// Mind the gap.
if gapLen := offset - int64(h.Len()); gapLen > 0 {
return errResumableDigestNotAvailable
}
return nil
}
type hashStateEntry struct {
offset int64
path string
}
// getStoredHashStates returns a slice of hashStateEntries for this upload.
func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) {
uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Named().String(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
list: true,
})
if err != nil {
return nil, err
}
paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix)
if err != nil {
if _, ok := err.(storagedriver.PathNotFoundError); !ok {
return nil, err
}
// Treat PathNotFoundError as no entries.
paths = nil
}
hashStateEntries := make([]hashStateEntry, 0, len(paths))
for _, p := range paths {
pathSuffix := path.Base(p)
// The suffix should be the offset.
offset, err := strconv.ParseInt(pathSuffix, 0, 64)
if err != nil {
logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err)
}
hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p})
}
return hashStateEntries, nil
}
func (bw *blobWriter) storeHashState(ctx context.Context) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
h, ok := bw.digester.Hash().(resumable.Hash)
if !ok {
return errResumableDigestNotAvailable
}
uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Named().String(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
offset: int64(h.Len()),
})
if err != nil {
return err
}
hashState, err := h.State()
if err != nil {
return err
}
return bw.driver.PutContent(ctx, uploadHashStatePath, hashState)
}

View file

@ -0,0 +1,153 @@
package storage
import (
"errors"
"io"
"path"
"strings"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
)
// errFinishedWalk signals an early exit to the walk when the current query
// is satisfied.
var errFinishedWalk = errors.New("finished walk")
// Returns a list, or partial list, of repositories in the registry.
// Because it's a quite expensive operation, it should only be used when building up
// an initial set of repositories.
func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) {
var foundRepos []string
if len(repos) == 0 {
return 0, errors.New("no space in slice")
}
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return 0, err
}
err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error {
err := handleRepository(fileInfo, root, last, func(repoPath string) error {
foundRepos = append(foundRepos, repoPath)
return nil
})
if err != nil {
return err
}
// if we've filled our array, no need to walk any further
if len(foundRepos) == len(repos) {
return errFinishedWalk
}
return nil
})
n = copy(repos, foundRepos)
switch err {
case nil:
// nil means that we completed walk and didn't fill buffer. No more
// records are available.
err = io.EOF
case errFinishedWalk:
// more records are available.
err = nil
}
return n, err
}
// Enumerate applies ingester to each repository
func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error {
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return err
}
err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error {
return handleRepository(fileInfo, root, "", ingester)
})
return err
}
// lessPath returns true if one path a is less than path b.
//
// A component-wise comparison is done, rather than the lexical comparison of
// strings.
func lessPath(a, b string) bool {
// we provide this behavior by making separator always sort first.
return compareReplaceInline(a, b, '/', '\x00') < 0
}
// compareReplaceInline modifies runtime.cmpstring to replace old with new
// during a byte-wise comparison.
func compareReplaceInline(s1, s2 string, old, new byte) int {
// TODO(stevvooe): We are missing an optimization when the s1 and s2 have
// the exact same slice header. It will make the code unsafe but can
// provide some extra performance.
l := len(s1)
if len(s2) < l {
l = len(s2)
}
for i := 0; i < l; i++ {
c1, c2 := s1[i], s2[i]
if c1 == old {
c1 = new
}
if c2 == old {
c2 = new
}
if c1 < c2 {
return -1
}
if c1 > c2 {
return +1
}
}
if len(s1) < len(s2) {
return -1
}
if len(s1) > len(s2) {
return +1
}
return 0
}
// handleRepository calls function fn with a repository path if fileInfo
// has a path of a repository under root and that it is lexographically
// after last. Otherwise, it will return ErrSkipDir. This should be used
// with Walk to do handling with repositories in a storage.
func handleRepository(fileInfo driver.FileInfo, root, last string, fn func(repoPath string) error) error {
filePath := fileInfo.Path()
// lop the base path off
repo := filePath[len(root)+1:]
_, file := path.Split(repo)
if file == "_layers" {
repo = strings.TrimSuffix(repo, "/_layers")
if lessPath(last, repo) {
if err := fn(repo); err != nil {
return err
}
}
return ErrSkipDir
} else if strings.HasPrefix(file, "_") {
return ErrSkipDir
}
return nil
}

View file

@ -0,0 +1,3 @@
// Package storage contains storage services for use in the registry
// application. It should be considered an internal package, as of Go 1.4.
package storage

View file

@ -0,0 +1,177 @@
package storage
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
)
// TODO(stevvooe): Set an optimal buffer size here. We'll have to
// understand the latency characteristics of the underlying network to
// set this correctly, so we may want to leave it to the driver. For
// out of process drivers, we'll have to optimize this buffer size for
// local communication.
const fileReaderBufferSize = 4 << 20
// remoteFileReader provides a read seeker interface to files stored in
// storagedriver. Used to implement part of layer interface and will be used
// to implement read side of LayerUpload.
type fileReader struct {
driver storagedriver.StorageDriver
ctx context.Context
// identifying fields
path string
size int64 // size is the total size, must be set.
// mutable fields
rc io.ReadCloser // remote read closer
brd *bufio.Reader // internal buffered io
offset int64 // offset is the current read offset
err error // terminal error, if set, reader is closed
}
// newFileReader initializes a file reader for the remote file. The reader
// takes on the size and path that must be determined externally with a stat
// call. The reader operates optimistically, assuming that the file is already
// there.
func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) {
return &fileReader{
ctx: ctx,
driver: driver,
path: path,
size: size,
}, nil
}
func (fr *fileReader) Read(p []byte) (n int, err error) {
if fr.err != nil {
return 0, fr.err
}
rd, err := fr.reader()
if err != nil {
return 0, err
}
n, err = rd.Read(p)
fr.offset += int64(n)
// Simulate io.EOR error if we reach filesize.
if err == nil && fr.offset >= fr.size {
err = io.EOF
}
return n, err
}
func (fr *fileReader) Seek(offset int64, whence int) (int64, error) {
if fr.err != nil {
return 0, fr.err
}
var err error
newOffset := fr.offset
switch whence {
case os.SEEK_CUR:
newOffset += int64(offset)
case os.SEEK_END:
newOffset = fr.size + int64(offset)
case os.SEEK_SET:
newOffset = int64(offset)
}
if newOffset < 0 {
err = fmt.Errorf("cannot seek to negative position")
} else {
if fr.offset != newOffset {
fr.reset()
}
// No problems, set the offset.
fr.offset = newOffset
}
return fr.offset, err
}
func (fr *fileReader) Close() error {
return fr.closeWithErr(fmt.Errorf("fileReader: closed"))
}
// reader prepares the current reader at the lrs offset, ensuring its buffered
// and ready to go.
func (fr *fileReader) reader() (io.Reader, error) {
if fr.err != nil {
return nil, fr.err
}
if fr.rc != nil {
return fr.brd, nil
}
// If we don't have a reader, open one up.
rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset)
if err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// NOTE(stevvooe): If the path is not found, we simply return a
// reader that returns io.EOF. However, we do not set fr.rc,
// allowing future attempts at getting a reader to possibly
// succeed if the file turns up later.
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
default:
return nil, err
}
}
fr.rc = rc
if fr.brd == nil {
fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize)
} else {
fr.brd.Reset(fr.rc)
}
return fr.brd, nil
}
// resetReader resets the reader, forcing the read method to open up a new
// connection and rebuild the buffered reader. This should be called when the
// offset and the reader will become out of sync, such as during a seek
// operation.
func (fr *fileReader) reset() {
if fr.err != nil {
return
}
if fr.rc != nil {
fr.rc.Close()
fr.rc = nil
}
}
func (fr *fileReader) closeWithErr(err error) error {
if fr.err != nil {
return fr.err
}
fr.err = err
// close and release reader chain
if fr.rc != nil {
fr.rc.Close()
}
fr.rc = nil
fr.brd = nil
return fr.err
}

View file

@ -0,0 +1,114 @@
package storage
import (
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/driver"
"github.com/opencontainers/go-digest"
)
func emit(format string, a ...interface{}) {
fmt.Printf(format+"\n", a...)
}
// MarkAndSweep performs a mark and sweep of registry data
func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error {
repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator)
if !ok {
return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator")
}
// mark
markSet := make(map[digest.Digest]struct{})
err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error {
emit(repoName)
var err error
named, err := reference.WithName(repoName)
if err != nil {
return fmt.Errorf("failed to parse repo name %s: %v", repoName, err)
}
repository, err := registry.Repository(ctx, named)
if err != nil {
return fmt.Errorf("failed to construct repository: %v", err)
}
manifestService, err := repository.Manifests(ctx)
if err != nil {
return fmt.Errorf("failed to construct manifest service: %v", err)
}
manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator)
if !ok {
return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator")
}
err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
// Mark the manifest's blob
emit("%s: marking manifest %s ", repoName, dgst)
markSet[dgst] = struct{}{}
manifest, err := manifestService.Get(ctx, dgst)
if err != nil {
return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err)
}
descriptors := manifest.References()
for _, descriptor := range descriptors {
markSet[descriptor.Digest] = struct{}{}
emit("%s: marking blob %s", repoName, descriptor.Digest)
}
return nil
})
if err != nil {
// In certain situations such as unfinished uploads, deleting all
// tags in S3 or removing the _manifests folder manually, this
// error may be of type PathNotFound.
//
// In these cases we can continue marking other manifests safely.
if _, ok := err.(driver.PathNotFoundError); ok {
return nil
}
}
return err
})
if err != nil {
return fmt.Errorf("failed to mark: %v", err)
}
// sweep
blobService := registry.Blobs()
deleteSet := make(map[digest.Digest]struct{})
err = blobService.Enumerate(ctx, func(dgst digest.Digest) error {
// check if digest is in markSet. If not, delete it!
if _, ok := markSet[dgst]; !ok {
deleteSet[dgst] = struct{}{}
}
return nil
})
if err != nil {
return fmt.Errorf("error enumerating blobs: %v", err)
}
emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet))
// Construct vacuum
vacuum := NewVacuum(ctx, storageDriver)
for dgst := range deleteSet {
emit("blob eligible for deletion: %s", dgst)
if dryRun {
continue
}
err = vacuum.RemoveBlob(string(dgst))
if err != nil {
return fmt.Errorf("failed to delete blob %s: %v", dgst, err)
}
}
return err
}

View file

@ -0,0 +1,470 @@
package storage
import (
"fmt"
"net/http"
"path"
"time"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/uuid"
"github.com/opencontainers/go-digest"
)
// linkPathFunc describes a function that can resolve a link based on the
// repository name and digest.
type linkPathFunc func(name string, dgst digest.Digest) (string, error)
// linkedBlobStore provides a full BlobService that namespaces the blobs to a
// given repository. Effectively, it manages the links in a given repository
// that grant access to the global blob store.
type linkedBlobStore struct {
*blobStore
registry *registry
blobServer distribution.BlobServer
blobAccessController distribution.BlobDescriptorService
repository distribution.Repository
ctx context.Context // only to be used where context can't come through method args
deleteEnabled bool
resumableDigestEnabled bool
// linkPathFns specifies one or more path functions allowing one to
// control the repository blob link set to which the blob store
// dispatches. This is required because manifest and layer blobs have not
// yet been fully merged. At some point, this functionality should be
// removed the blob links folder should be merged. The first entry is
// treated as the "canonical" link location and will be used for writes.
linkPathFns []linkPathFunc
// linkDirectoryPathSpec locates the root directories in which one might find links
linkDirectoryPathSpec pathSpec
}
var _ distribution.BlobStore = &linkedBlobStore{}
func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
return lbs.blobAccessController.Stat(ctx, dgst)
}
func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
canonical, err := lbs.Stat(ctx, dgst) // access check
if err != nil {
return nil, err
}
return lbs.blobStore.Get(ctx, canonical.Digest)
}
func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
canonical, err := lbs.Stat(ctx, dgst) // access check
if err != nil {
return nil, err
}
return lbs.blobStore.Open(ctx, canonical.Digest)
}
func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
canonical, err := lbs.Stat(ctx, dgst) // access check
if err != nil {
return err
}
if canonical.MediaType != "" {
// Set the repository local content type.
w.Header().Set("Content-Type", canonical.MediaType)
}
return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest)
}
func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
dgst := digest.FromBytes(p)
// Place the data in the blob store first.
desc, err := lbs.blobStore.Put(ctx, mediaType, p)
if err != nil {
context.GetLogger(ctx).Errorf("error putting into main store: %v", err)
return distribution.Descriptor{}, err
}
if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil {
return distribution.Descriptor{}, err
}
// TODO(stevvooe): Write out mediatype if incoming differs from what is
// returned by Put above. Note that we should allow updates for a given
// repository.
return desc, lbs.linkBlob(ctx, desc)
}
type optionFunc func(interface{}) error
func (f optionFunc) Apply(v interface{}) error {
return f(v)
}
// WithMountFrom returns a BlobCreateOption which designates that the blob should be
// mounted from the given canonical reference.
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
return optionFunc(func(v interface{}) error {
opts, ok := v.(*distribution.CreateOptions)
if !ok {
return fmt.Errorf("unexpected options type: %T", v)
}
opts.Mount.ShouldMount = true
opts.Mount.From = ref
return nil
})
}
// Writer begins a blob write session, returning a handle.
func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer")
var opts distribution.CreateOptions
for _, option := range options {
err := option.Apply(&opts)
if err != nil {
return nil, err
}
}
if opts.Mount.ShouldMount {
desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest(), opts.Mount.Stat)
if err == nil {
// Mount successful, no need to initiate an upload session
return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
}
}
uuid := uuid.Generate().String()
startedAt := time.Now().UTC()
path, err := pathFor(uploadDataPathSpec{
name: lbs.repository.Named().Name(),
id: uuid,
})
if err != nil {
return nil, err
}
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
name: lbs.repository.Named().Name(),
id: uuid,
})
if err != nil {
return nil, err
}
// Write a startedat file for this upload
if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
return nil, err
}
return lbs.newBlobUpload(ctx, uuid, path, startedAt, false)
}
func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume")
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
name: lbs.repository.Named().Name(),
id: id,
})
if err != nil {
return nil, err
}
startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return nil, distribution.ErrBlobUploadUnknown
default:
return nil, err
}
}
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
if err != nil {
return nil, err
}
path, err := pathFor(uploadDataPathSpec{
name: lbs.repository.Named().Name(),
id: id,
})
if err != nil {
return nil, err
}
return lbs.newBlobUpload(ctx, id, path, startedAt, true)
}
func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error {
if !lbs.deleteEnabled {
return distribution.ErrUnsupported
}
// Ensure the blob is available for deletion
_, err := lbs.blobAccessController.Stat(ctx, dgst)
if err != nil {
return err
}
err = lbs.blobAccessController.Clear(ctx, dgst)
if err != nil {
return err
}
return nil
}
func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error {
rootPath, err := pathFor(lbs.linkDirectoryPathSpec)
if err != nil {
return err
}
err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error {
// exit early if directory...
if fileInfo.IsDir() {
return nil
}
filePath := fileInfo.Path()
// check if it's a link
_, fileName := path.Split(filePath)
if fileName != "link" {
return nil
}
// read the digest found in link
digest, err := lbs.blobStore.readlink(ctx, filePath)
if err != nil {
return err
}
// ensure this conforms to the linkPathFns
_, err = lbs.Stat(ctx, digest)
if err != nil {
// we expect this error to occur so we move on
if err == distribution.ErrBlobUnknown {
return nil
}
return err
}
err = ingestor(digest)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest, sourceStat *distribution.Descriptor) (distribution.Descriptor, error) {
var stat distribution.Descriptor
if sourceStat == nil {
// look up the blob info from the sourceRepo if not already provided
repo, err := lbs.registry.Repository(ctx, sourceRepo)
if err != nil {
return distribution.Descriptor{}, err
}
stat, err = repo.Blobs(ctx).Stat(ctx, dgst)
if err != nil {
return distribution.Descriptor{}, err
}
} else {
// use the provided blob info
stat = *sourceStat
}
desc := distribution.Descriptor{
Size: stat.Size,
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}
return desc, lbs.linkBlob(ctx, desc)
}
// newBlobUpload allocates a new upload controller with the given state.
func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) {
fw, err := lbs.driver.Writer(ctx, path, append)
if err != nil {
return nil, err
}
bw := &blobWriter{
ctx: ctx,
blobStore: lbs,
id: uuid,
startedAt: startedAt,
digester: digest.Canonical.Digester(),
fileWriter: fw,
driver: lbs.driver,
path: path,
resumableDigestEnabled: lbs.resumableDigestEnabled,
}
return bw, nil
}
// linkBlob links a valid, written blob into the registry under the named
// repository for the upload controller.
func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error {
dgsts := append([]digest.Digest{canonical.Digest}, aliases...)
// TODO(stevvooe): Need to write out mediatype for only canonical hash
// since we don't care about the aliases. They are generally unused except
// for tarsum but those versions don't care about mediatype.
// Don't make duplicate links.
seenDigests := make(map[digest.Digest]struct{}, len(dgsts))
// only use the first link
linkPathFn := lbs.linkPathFns[0]
for _, dgst := range dgsts {
if _, seen := seenDigests[dgst]; seen {
continue
}
seenDigests[dgst] = struct{}{}
blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst)
if err != nil {
return err
}
if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil {
return err
}
}
return nil
}
type linkedBlobStatter struct {
*blobStore
repository distribution.Repository
// linkPathFns specifies one or more path functions allowing one to
// control the repository blob link set to which the blob store
// dispatches. This is required because manifest and layer blobs have not
// yet been fully merged. At some point, this functionality should be
// removed an the blob links folder should be merged. The first entry is
// treated as the "canonical" link location and will be used for writes.
linkPathFns []linkPathFunc
}
var _ distribution.BlobDescriptorService = &linkedBlobStatter{}
func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
var (
found bool
target digest.Digest
)
// try the many link path functions until we get success or an error that
// is not PathNotFoundError.
for _, linkPathFn := range lbs.linkPathFns {
var err error
target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn)
if err == nil {
found = true
break // success!
}
switch err := err.(type) {
case driver.PathNotFoundError:
// do nothing, just move to the next linkPathFn
default:
return distribution.Descriptor{}, err
}
}
if !found {
return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
if target != dgst {
// Track when we are doing cross-digest domain lookups. ie, sha512 to sha256.
context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target)
}
// TODO(stevvooe): Look up repository local mediatype and replace that on
// the returned descriptor.
return lbs.blobStore.statter.Stat(ctx, target)
}
func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) {
// clear any possible existence of a link described in linkPathFns
for _, linkPathFn := range lbs.linkPathFns {
blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst)
if err != nil {
return err
}
err = lbs.blobStore.driver.Delete(ctx, blobLinkPath)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
continue // just ignore this error and continue
default:
return err
}
}
}
return nil
}
// resolveTargetWithFunc allows us to read a link to a resource with different
// linkPathFuncs to let us try a few different paths before returning not
// found.
func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) {
blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst)
if err != nil {
return "", err
}
return lbs.blobStore.readlink(ctx, blobLinkPath)
}
func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
// The canonical descriptor for a blob is set at the commit phase of upload
return nil
}
// blobLinkPath provides the path to the blob link, also known as layers.
func blobLinkPath(name string, dgst digest.Digest) (string, error) {
return pathFor(layerLinkPathSpec{name: name, digest: dgst})
}
// manifestRevisionLinkPath provides the path to the manifest revision link.
func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst})
}

View file

@ -0,0 +1,92 @@
package storage
import (
"fmt"
"encoding/json"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/opencontainers/go-digest"
)
// manifestListHandler is a ManifestHandler that covers schema2 manifest lists.
type manifestListHandler struct {
repository distribution.Repository
blobStore distribution.BlobStore
ctx context.Context
}
var _ ManifestHandler = &manifestListHandler{}
func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal")
var m manifestlist.DeserializedManifestList
if err := json.Unmarshal(content, &m); err != nil {
return nil, err
}
return &m, nil
}
func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put")
m, ok := manifestList.(*manifestlist.DeserializedManifestList)
if !ok {
return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList)
}
if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
return "", err
}
mt, payload, err := m.Payload()
if err != nil {
return "", err
}
revision, err := ms.blobStore.Put(ctx, mt, payload)
if err != nil {
context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
return "", err
}
return revision.Digest, nil
}
// verifyManifest ensures that the manifest content is valid from the
// perspective of the registry. As a policy, the registry only tries to
// store valid content, leaving trust policies of that content up to
// consumers.
func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification
if !skipDependencyVerification {
// This manifest service is different from the blob service
// returned by Blob. It uses a linked blob store to ensure that
// only manifests are accessible.
manifestService, err := ms.repository.Manifests(ctx)
if err != nil {
return err
}
for _, manifestDescriptor := range mnfst.References() {
exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest)
if err != nil && err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
if err != nil || !exists {
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest})
}
}
}
if len(errs) != 0 {
return errs
}
return nil
}

View file

@ -0,0 +1,141 @@
package storage
import (
"fmt"
"encoding/json"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/opencontainers/go-digest"
)
// A ManifestHandler gets and puts manifests of a particular type.
type ManifestHandler interface {
// Unmarshal unmarshals the manifest from a byte slice.
Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error)
// Put creates or updates the given manifest returning the manifest digest.
Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error)
}
// SkipLayerVerification allows a manifest to be Put before its
// layers are on the filesystem
func SkipLayerVerification() distribution.ManifestServiceOption {
return skipLayerOption{}
}
type skipLayerOption struct{}
func (o skipLayerOption) Apply(m distribution.ManifestService) error {
if ms, ok := m.(*manifestStore); ok {
ms.skipDependencyVerification = true
return nil
}
return fmt.Errorf("skip layer verification only valid for manifestStore")
}
type manifestStore struct {
repository *repository
blobStore *linkedBlobStore
ctx context.Context
skipDependencyVerification bool
schema1Handler ManifestHandler
schema2Handler ManifestHandler
manifestListHandler ManifestHandler
}
var _ distribution.ManifestService = &manifestStore{}
func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists")
_, err := ms.blobStore.Stat(ms.ctx, dgst)
if err != nil {
if err == distribution.ErrBlobUnknown {
return false, nil
}
return false, err
}
return true, nil
}
func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
context.GetLogger(ms.ctx).Debug("(*manifestStore).Get")
// TODO(stevvooe): Need to check descriptor from above to ensure that the
// mediatype is as we expect for the manifest store.
content, err := ms.blobStore.Get(ctx, dgst)
if err != nil {
if err == distribution.ErrBlobUnknown {
return nil, distribution.ErrManifestUnknownRevision{
Name: ms.repository.Named().Name(),
Revision: dgst,
}
}
return nil, err
}
var versioned manifest.Versioned
if err = json.Unmarshal(content, &versioned); err != nil {
return nil, err
}
switch versioned.SchemaVersion {
case 1:
return ms.schema1Handler.Unmarshal(ctx, dgst, content)
case 2:
// This can be an image manifest or a manifest list
switch versioned.MediaType {
case schema2.MediaTypeManifest:
return ms.schema2Handler.Unmarshal(ctx, dgst, content)
case manifestlist.MediaTypeManifestList:
return ms.manifestListHandler.Unmarshal(ctx, dgst, content)
default:
return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)}
}
}
return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion)
}
func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
context.GetLogger(ms.ctx).Debug("(*manifestStore).Put")
switch manifest.(type) {
case *schema1.SignedManifest:
return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification)
case *schema2.DeserializedManifest:
return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification)
case *manifestlist.DeserializedManifestList:
return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification)
}
return "", fmt.Errorf("unrecognized manifest type %T", manifest)
}
// Delete removes the revision of the specified manifest.
func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete")
return ms.blobStore.Delete(ctx, dgst)
}
func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error {
err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error {
err := ingester(dgst)
if err != nil {
return err
}
return nil
})
return err
}

View file

@ -0,0 +1,490 @@
package storage
import (
"fmt"
"path"
"strings"
"github.com/opencontainers/go-digest"
)
const (
storagePathVersion = "v2" // fixed storage layout version
storagePathRoot = "/docker/registry/" // all driver paths have a prefix
// TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though
// storage path root would configurable for all drivers through this
// package. In reality, we've found it simpler to do this on a per driver
// basis.
)
// pathFor maps paths based on "object names" and their ids. The "object
// names" mapped by are internal to the storage system.
//
// The path layout in the storage backend is roughly as follows:
//
// <root>/v2
// -> repositories/
// -><name>/
// -> _manifests/
// revisions
// -> <manifest digest path>
// -> link
// tags/<tag>
// -> current/link
// -> index
// -> <algorithm>/<hex digest>/link
// -> _layers/
// <layer links to blob store>
// -> _uploads/<id>
// data
// startedat
// hashstates/<algorithm>/<offset>
// -> blob/<algorithm>
// <split directory content addressable storage>
//
// The storage backend layout is broken up into a content-addressable blob
// store and repositories. The content-addressable blob store holds most data
// throughout the backend, keyed by algorithm and digests of the underlying
// content. Access to the blob store is controlled through links from the
// repository to blobstore.
//
// A repository is made up of layers, manifests and tags. The layers component
// is just a directory of layers which are "linked" into a repository. A layer
// can only be accessed through a qualified repository name if it is linked in
// the repository. Uploads of layers are managed in the uploads directory,
// which is key by upload id. When all data for an upload is received, the
// data is moved into the blob store and the upload directory is deleted.
// Abandoned uploads can be garbage collected by reading the startedat file
// and removing uploads that have been active for longer than a certain time.
//
// The third component of the repository directory is the manifests store,
// which is made up of a revision store and tag store. Manifests are stored in
// the blob store and linked into the revision store.
// While the registry can save all revisions of a manifest, no relationship is
// implied as to the ordering of changes to a manifest. The tag store provides
// support for name, tag lookups of manifests, using "current/link" under a
// named tag directory. An index is maintained to support deletions of all
// revisions of a given manifest tag.
//
// We cover the path formats implemented by this path mapper below.
//
// Manifests:
//
// manifestRevisionsPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/
// manifestRevisionPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/
// manifestRevisionLinkPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link
//
// Tags:
//
// manifestTagsPathSpec: <root>/v2/repositories/<name>/_manifests/tags/
// manifestTagPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/
// manifestTagCurrentPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/current/link
// manifestTagIndexPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/
// manifestTagIndexEntryPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/
// manifestTagIndexEntryLinkPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/link
//
// Blobs:
//
// layerLinkPathSpec: <root>/v2/repositories/<name>/_layers/<algorithm>/<hex digest>/link
//
// Uploads:
//
// uploadDataPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/data
// uploadStartedAtPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/startedat
// uploadHashStatePathSpec: <root>/v2/repositories/<name>/_uploads/<id>/hashstates/<algorithm>/<offset>
//
// Blob Store:
//
// blobsPathSpec: <root>/v2/blobs/
// blobPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
// blobDataPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
// blobMediaTypePathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
//
// For more information on the semantic meaning of each path and their
// contents, please see the path spec documentation.
func pathFor(spec pathSpec) (string, error) {
// Switch on the path object type and return the appropriate path. At
// first glance, one may wonder why we don't use an interface to
// accomplish this. By keep the formatting separate from the pathSpec, we
// keep separate the path generation componentized. These specs could be
// passed to a completely different mapper implementation and generate a
// different set of paths.
//
// For example, imagine migrating from one backend to the other: one could
// build a filesystem walker that converts a string path in one version,
// to an intermediate path object, than can be consumed and mapped by the
// other version.
rootPrefix := []string{storagePathRoot, storagePathVersion}
repoPrefix := append(rootPrefix, "repositories")
switch v := spec.(type) {
case manifestRevisionsPathSpec:
return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil
case manifestRevisionPathSpec:
components, err := digestPathComponents(v.revision, false)
if err != nil {
return "", err
}
return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil
case manifestRevisionLinkPathSpec:
root, err := pathFor(manifestRevisionPathSpec{
name: v.name,
revision: v.revision,
})
if err != nil {
return "", err
}
return path.Join(root, "link"), nil
case manifestTagsPathSpec:
return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil
case manifestTagPathSpec:
root, err := pathFor(manifestTagsPathSpec{
name: v.name,
})
if err != nil {
return "", err
}
return path.Join(root, v.tag), nil
case manifestTagCurrentPathSpec:
root, err := pathFor(manifestTagPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
return path.Join(root, "current", "link"), nil
case manifestTagIndexPathSpec:
root, err := pathFor(manifestTagPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
return path.Join(root, "index"), nil
case manifestTagIndexEntryLinkPathSpec:
root, err := pathFor(manifestTagIndexEntryPathSpec{
name: v.name,
tag: v.tag,
revision: v.revision,
})
if err != nil {
return "", err
}
return path.Join(root, "link"), nil
case manifestTagIndexEntryPathSpec:
root, err := pathFor(manifestTagIndexPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
components, err := digestPathComponents(v.revision, false)
if err != nil {
return "", err
}
return path.Join(root, path.Join(components...)), nil
case layerLinkPathSpec:
components, err := digestPathComponents(v.digest, false)
if err != nil {
return "", err
}
// TODO(stevvooe): Right now, all blobs are linked under "_layers". If
// we have future migrations, we may want to rename this to "_blobs".
// A migration strategy would simply leave existing items in place and
// write the new paths, commit a file then delete the old files.
blobLinkPathComponents := append(repoPrefix, v.name, "_layers")
return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil
case blobsPathSpec:
blobsPathPrefix := append(rootPrefix, "blobs")
return path.Join(blobsPathPrefix...), nil
case blobPathSpec:
components, err := digestPathComponents(v.digest, true)
if err != nil {
return "", err
}
blobPathPrefix := append(rootPrefix, "blobs")
return path.Join(append(blobPathPrefix, components...)...), nil
case blobDataPathSpec:
components, err := digestPathComponents(v.digest, true)
if err != nil {
return "", err
}
components = append(components, "data")
blobPathPrefix := append(rootPrefix, "blobs")
return path.Join(append(blobPathPrefix, components...)...), nil
case uploadDataPathSpec:
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil
case uploadStartedAtPathSpec:
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil
case uploadHashStatePathSpec:
offset := fmt.Sprintf("%d", v.offset)
if v.list {
offset = "" // Limit to the prefix for listing offsets.
}
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil
case repositoriesRootPathSpec:
return path.Join(repoPrefix...), nil
default:
// TODO(sday): This is an internal error. Ensure it doesn't escape (panic?).
return "", fmt.Errorf("unknown path spec: %#v", v)
}
}
// pathSpec is a type to mark structs as path specs. There is no
// implementation because we'd like to keep the specs and the mappers
// decoupled.
type pathSpec interface {
pathSpec()
}
// manifestRevisionsPathSpec describes the directory path for
// a manifest revision.
type manifestRevisionsPathSpec struct {
name string
}
func (manifestRevisionsPathSpec) pathSpec() {}
// manifestRevisionPathSpec describes the components of the directory path for
// a manifest revision.
type manifestRevisionPathSpec struct {
name string
revision digest.Digest
}
func (manifestRevisionPathSpec) pathSpec() {}
// manifestRevisionLinkPathSpec describes the path components required to look
// up the data link for a revision of a manifest. If this file is not present,
// the manifest blob is not available in the given repo. The contents of this
// file should just be the digest.
type manifestRevisionLinkPathSpec struct {
name string
revision digest.Digest
}
func (manifestRevisionLinkPathSpec) pathSpec() {}
// manifestTagsPathSpec describes the path elements required to point to the
// manifest tags directory.
type manifestTagsPathSpec struct {
name string
}
func (manifestTagsPathSpec) pathSpec() {}
// manifestTagPathSpec describes the path elements required to point to the
// manifest tag links files under a repository. These contain a blob id that
// can be used to look up the data and signatures.
type manifestTagPathSpec struct {
name string
tag string
}
func (manifestTagPathSpec) pathSpec() {}
// manifestTagCurrentPathSpec describes the link to the current revision for a
// given tag.
type manifestTagCurrentPathSpec struct {
name string
tag string
}
func (manifestTagCurrentPathSpec) pathSpec() {}
// manifestTagCurrentPathSpec describes the link to the index of revisions
// with the given tag.
type manifestTagIndexPathSpec struct {
name string
tag string
}
func (manifestTagIndexPathSpec) pathSpec() {}
// manifestTagIndexEntryPathSpec contains the entries of the index by revision.
type manifestTagIndexEntryPathSpec struct {
name string
tag string
revision digest.Digest
}
func (manifestTagIndexEntryPathSpec) pathSpec() {}
// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a
// manifest with given tag within the index.
type manifestTagIndexEntryLinkPathSpec struct {
name string
tag string
revision digest.Digest
}
func (manifestTagIndexEntryLinkPathSpec) pathSpec() {}
// blobLinkPathSpec specifies a path for a blob link, which is a file with a
// blob id. The blob link will contain a content addressable blob id reference
// into the blob store. The format of the contents is as follows:
//
// <algorithm>:<hex digest of layer data>
//
// The following example of the file contents is more illustrative:
//
// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36
//
// This indicates that there is a blob with the id/digest, calculated via
// sha256 that can be fetched from the blob store.
type layerLinkPathSpec struct {
name string
digest digest.Digest
}
func (layerLinkPathSpec) pathSpec() {}
// blobAlgorithmReplacer does some very simple path sanitization for user
// input. Paths should be "safe" before getting this far due to strict digest
// requirements but we can add further path conversion here, if needed.
var blobAlgorithmReplacer = strings.NewReplacer(
"+", "/",
".", "/",
";", "/",
)
// blobsPathSpec contains the path for the blobs directory
type blobsPathSpec struct{}
func (blobsPathSpec) pathSpec() {}
// blobPathSpec contains the path for the registry global blob store.
type blobPathSpec struct {
digest digest.Digest
}
func (blobPathSpec) pathSpec() {}
// blobDataPathSpec contains the path for the registry global blob store. For
// now, this contains layer data, exclusively.
type blobDataPathSpec struct {
digest digest.Digest
}
func (blobDataPathSpec) pathSpec() {}
// uploadDataPathSpec defines the path parameters of the data file for
// uploads.
type uploadDataPathSpec struct {
name string
id string
}
func (uploadDataPathSpec) pathSpec() {}
// uploadDataPathSpec defines the path parameters for the file that stores the
// start time of an uploads. If it is missing, the upload is considered
// unknown. Admittedly, the presence of this file is an ugly hack to make sure
// we have a way to cleanup old or stalled uploads that doesn't rely on driver
// FileInfo behavior. If we come up with a more clever way to do this, we
// should remove this file immediately and rely on the startetAt field from
// the client to enforce time out policies.
type uploadStartedAtPathSpec struct {
name string
id string
}
func (uploadStartedAtPathSpec) pathSpec() {}
// uploadHashStatePathSpec defines the path parameters for the file that stores
// the hash function state of an upload at a specific byte offset. If `list` is
// set, then the path mapper will generate a list prefix for all hash state
// offsets for the upload identified by the name, id, and alg.
type uploadHashStatePathSpec struct {
name string
id string
alg digest.Algorithm
offset int64
list bool
}
func (uploadHashStatePathSpec) pathSpec() {}
// repositoriesRootPathSpec returns the root of repositories
type repositoriesRootPathSpec struct {
}
func (repositoriesRootPathSpec) pathSpec() {}
// digestPathComponents provides a consistent path breakdown for a given
// digest. For a generic digest, it will be as follows:
//
// <algorithm>/<hex digest>
//
// If multilevel is true, the first two bytes of the digest will separate
// groups of digest folder. It will be as follows:
//
// <algorithm>/<first two bytes of digest>/<full digest>
//
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
if err := dgst.Validate(); err != nil {
return nil, err
}
algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm()))
hex := dgst.Hex()
prefix := []string{algorithm}
var suffix []string
if multilevel {
suffix = append(suffix, hex[:2])
}
suffix = append(suffix, hex)
return append(prefix, suffix...), nil
}
// Reconstructs a digest from a path
func digestFromPath(digestPath string) (digest.Digest, error) {
digestPath = strings.TrimSuffix(digestPath, "/data")
dir, hex := path.Split(digestPath)
dir = path.Dir(dir)
dir, next := path.Split(dir)
// next is either the algorithm OR the first two characters in the hex string
var algo string
if next == hex[:2] {
algo = path.Base(dir)
} else {
algo = next
}
dgst := digest.NewDigestFromHex(algo, hex)
return dgst, dgst.Validate()
}

View file

@ -0,0 +1,139 @@
package storage
import (
"path"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/context"
storageDriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/distribution/uuid"
)
// uploadData stored the location of temporary files created during a layer upload
// along with the date the upload was started
type uploadData struct {
containingDir string
startedAt time.Time
}
func newUploadData() uploadData {
return uploadData{
containingDir: "",
// default to far in future to protect against missing startedat
startedAt: time.Now().Add(time.Duration(10000 * time.Hour)),
}
}
// PurgeUploads deletes files from the upload directory
// created before olderThan. The list of files deleted and errors
// encountered are returned
func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) {
log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete)
uploadData, errors := getOutstandingUploads(ctx, driver)
var deleted []string
for _, uploadData := range uploadData {
if uploadData.startedAt.Before(olderThan) {
var err error
log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.",
uploadData.containingDir, uploadData.startedAt, olderThan)
if actuallyDelete {
err = driver.Delete(ctx, uploadData.containingDir)
}
if err == nil {
deleted = append(deleted, uploadData.containingDir)
} else {
errors = append(errors, err)
}
}
}
log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors))
return deleted, errors
}
// getOutstandingUploads walks the upload directory, collecting files
// which could be eligible for deletion. The only reliable way to
// classify the age of a file is with the date stored in the startedAt
// file, so gather files by UUID with a date from startedAt.
func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) {
var errors []error
uploads := make(map[string]uploadData, 0)
inUploadDir := false
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return uploads, append(errors, err)
}
err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error {
filePath := fileInfo.Path()
_, file := path.Split(filePath)
if file[0] == '_' {
// Reserved directory
inUploadDir = (file == "_uploads")
if fileInfo.IsDir() && !inUploadDir {
return ErrSkipDir
}
}
uuid, isContainingDir := uuidFromPath(filePath)
if uuid == "" {
// Cannot reliably delete
return nil
}
ud, ok := uploads[uuid]
if !ok {
ud = newUploadData()
}
if isContainingDir {
ud.containingDir = filePath
}
if file == "startedat" {
if t, err := readStartedAtFile(driver, filePath); err == nil {
ud.startedAt = t
} else {
errors = pushError(errors, filePath, err)
}
}
uploads[uuid] = ud
return nil
})
if err != nil {
errors = pushError(errors, root, err)
}
return uploads, errors
}
// uuidFromPath extracts the upload UUID from a given path
// If the UUID is the last path component, this is the containing
// directory for all upload files
func uuidFromPath(path string) (string, bool) {
components := strings.Split(path, "/")
for i := len(components) - 1; i >= 0; i-- {
if u, err := uuid.Parse(components[i]); err == nil {
return u.String(), i == len(components)-1
}
}
return "", false
}
// readStartedAtFile reads the date from an upload's startedAtFile
func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) {
// todo:(richardscothern) - pass in a context
startedAtBytes, err := driver.GetContent(context.Background(), path)
if err != nil {
return time.Now(), err
}
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
if err != nil {
return time.Now(), err
}
return startedAt, nil
}

View file

@ -0,0 +1,306 @@
package storage
import (
"regexp"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/reference"
"github.com/docker/distribution/registry/storage/cache"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/docker/libtrust"
)
// registry is the top-level implementation of Registry for use in the storage
// package. All instances should descend from this object.
type registry struct {
blobStore *blobStore
blobServer *blobServer
statter *blobStatter // global statter service.
blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider
deleteEnabled bool
resumableDigestEnabled bool
schema1SigningKey libtrust.PrivateKey
blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory
manifestURLs manifestURLs
}
// manifestURLs holds regular expressions for controlling manifest URL whitelisting
type manifestURLs struct {
allow *regexp.Regexp
deny *regexp.Regexp
}
// RegistryOption is the type used for functional options for NewRegistry.
type RegistryOption func(*registry) error
// EnableRedirect is a functional option for NewRegistry. It causes the backend
// blob server to attempt using (StorageDriver).URLFor to serve all blobs.
func EnableRedirect(registry *registry) error {
registry.blobServer.redirect = true
return nil
}
// EnableDelete is a functional option for NewRegistry. It enables deletion on
// the registry.
func EnableDelete(registry *registry) error {
registry.deleteEnabled = true
return nil
}
// DisableDigestResumption is a functional option for NewRegistry. It should be
// used if the registry is acting as a caching proxy.
func DisableDigestResumption(registry *registry) error {
registry.resumableDigestEnabled = false
return nil
}
// ManifestURLsAllowRegexp is a functional option for NewRegistry.
func ManifestURLsAllowRegexp(r *regexp.Regexp) RegistryOption {
return func(registry *registry) error {
registry.manifestURLs.allow = r
return nil
}
}
// ManifestURLsDenyRegexp is a functional option for NewRegistry.
func ManifestURLsDenyRegexp(r *regexp.Regexp) RegistryOption {
return func(registry *registry) error {
registry.manifestURLs.deny = r
return nil
}
}
// Schema1SigningKey returns a functional option for NewRegistry. It sets the
// key for signing all schema1 manifests.
func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption {
return func(registry *registry) error {
registry.schema1SigningKey = key
return nil
}
}
// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the
// factory to create BlobDescriptorServiceFactory middleware.
func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption {
return func(registry *registry) error {
registry.blobDescriptorServiceFactory = factory
return nil
}
}
// BlobDescriptorCacheProvider returns a functional option for
// NewRegistry. It creates a cached blob statter for use by the
// registry.
func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption {
// TODO(aaronl): The duplication of statter across several objects is
// ugly, and prevents us from using interface types in the registry
// struct. Ideally, blobStore and blobServer should be lazily
// initialized, and use the current value of
// blobDescriptorCacheProvider.
return func(registry *registry) error {
if blobDescriptorCacheProvider != nil {
statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter)
registry.blobStore.statter = statter
registry.blobServer.statter = statter
registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider
}
return nil
}
}
// NewRegistry creates a new registry instance from the provided driver. The
// resulting registry may be shared by multiple goroutines but is cheap to
// allocate. If the Redirect option is specified, the backend blob server will
// attempt to use (StorageDriver).URLFor to serve all blobs.
func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) {
// create global statter
statter := &blobStatter{
driver: driver,
}
bs := &blobStore{
driver: driver,
statter: statter,
}
registry := &registry{
blobStore: bs,
blobServer: &blobServer{
driver: driver,
statter: statter,
pathFn: bs.path,
},
statter: statter,
resumableDigestEnabled: true,
}
for _, option := range options {
if err := option(registry); err != nil {
return nil, err
}
}
return registry, nil
}
// Scope returns the namespace scope for a registry. The registry
// will only serve repositories contained within this scope.
func (reg *registry) Scope() distribution.Scope {
return distribution.GlobalScope
}
// Repository returns an instance of the repository tied to the registry.
// Instances should not be shared between goroutines but are cheap to
// allocate. In general, they should be request scoped.
func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) {
var descriptorCache distribution.BlobDescriptorService
if reg.blobDescriptorCacheProvider != nil {
var err error
descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name())
if err != nil {
return nil, err
}
}
return &repository{
ctx: ctx,
registry: reg,
name: canonicalName,
descriptorCache: descriptorCache,
}, nil
}
func (reg *registry) Blobs() distribution.BlobEnumerator {
return reg.blobStore
}
func (reg *registry) BlobStatter() distribution.BlobStatter {
return reg.statter
}
// repository provides name-scoped access to various services.
type repository struct {
*registry
ctx context.Context
name reference.Named
descriptorCache distribution.BlobDescriptorService
}
// Name returns the name of the repository.
func (repo *repository) Named() reference.Named {
return repo.name
}
func (repo *repository) Tags(ctx context.Context) distribution.TagService {
tags := &tagStore{
repository: repo,
blobStore: repo.registry.blobStore,
}
return tags
}
// Manifests returns an instance of ManifestService. Instantiation is cheap and
// may be context sensitive in the future. The instance should be used similar
// to a request local.
func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
manifestLinkPathFns := []linkPathFunc{
// NOTE(stevvooe): Need to search through multiple locations since
// 2.1.0 unintentionally linked into _layers.
manifestRevisionLinkPath,
blobLinkPath,
}
manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()}
var statter distribution.BlobDescriptorService = &linkedBlobStatter{
blobStore: repo.blobStore,
repository: repo,
linkPathFns: manifestLinkPathFns,
}
if repo.registry.blobDescriptorServiceFactory != nil {
statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
}
blobStore := &linkedBlobStore{
ctx: ctx,
blobStore: repo.blobStore,
repository: repo,
deleteEnabled: repo.registry.deleteEnabled,
blobAccessController: statter,
// TODO(stevvooe): linkPath limits this blob store to only
// manifests. This instance cannot be used for blob checks.
linkPathFns: manifestLinkPathFns,
linkDirectoryPathSpec: manifestDirectoryPathSpec,
}
ms := &manifestStore{
ctx: ctx,
repository: repo,
blobStore: blobStore,
schema1Handler: &signedManifestHandler{
ctx: ctx,
schema1SigningKey: repo.schema1SigningKey,
repository: repo,
blobStore: blobStore,
},
schema2Handler: &schema2ManifestHandler{
ctx: ctx,
repository: repo,
blobStore: blobStore,
manifestURLs: repo.registry.manifestURLs,
},
manifestListHandler: &manifestListHandler{
ctx: ctx,
repository: repo,
blobStore: blobStore,
},
}
// Apply options
for _, option := range options {
err := option.Apply(ms)
if err != nil {
return nil, err
}
}
return ms, nil
}
// Blobs returns an instance of the BlobStore. Instantiation is cheap and
// may be context sensitive in the future. The instance should be used similar
// to a request local.
func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore {
var statter distribution.BlobDescriptorService = &linkedBlobStatter{
blobStore: repo.blobStore,
repository: repo,
linkPathFns: []linkPathFunc{blobLinkPath},
}
if repo.descriptorCache != nil {
statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter)
}
if repo.registry.blobDescriptorServiceFactory != nil {
statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
}
return &linkedBlobStore{
registry: repo.registry,
blobStore: repo.blobStore,
blobServer: repo.blobServer,
blobAccessController: statter,
repository: repo,
ctx: ctx,
// TODO(stevvooe): linkPath limits this blob store to only layers.
// This instance cannot be used for manifest checks.
linkPathFns: []linkPathFunc{blobLinkPath},
deleteEnabled: repo.registry.deleteEnabled,
resumableDigestEnabled: repo.resumableDigestEnabled,
}
}

View file

@ -0,0 +1,136 @@
package storage
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/opencontainers/go-digest"
)
var (
errUnexpectedURL = errors.New("unexpected URL on layer")
errMissingURL = errors.New("missing URL on layer")
errInvalidURL = errors.New("invalid URL on layer")
)
//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
type schema2ManifestHandler struct {
repository distribution.Repository
blobStore distribution.BlobStore
ctx context.Context
manifestURLs manifestURLs
}
var _ ManifestHandler = &schema2ManifestHandler{}
func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal")
var m schema2.DeserializedManifest
if err := json.Unmarshal(content, &m); err != nil {
return nil, err
}
return &m, nil
}
func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put")
m, ok := manifest.(*schema2.DeserializedManifest)
if !ok {
return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest)
}
if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
return "", err
}
mt, payload, err := m.Payload()
if err != nil {
return "", err
}
revision, err := ms.blobStore.Put(ctx, mt, payload)
if err != nil {
context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
return "", err
}
return revision.Digest, nil
}
// verifyManifest ensures that the manifest content is valid from the
// perspective of the registry. As a policy, the registry only tries to store
// valid content, leaving trust policies of that content up to consumers.
func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification
if skipDependencyVerification {
return nil
}
manifestService, err := ms.repository.Manifests(ctx)
if err != nil {
return err
}
blobsService := ms.repository.Blobs(ctx)
for _, descriptor := range mnfst.References() {
var err error
switch descriptor.MediaType {
case schema2.MediaTypeForeignLayer:
// Clients download this layer from an external URL, so do not check for
// its presense.
if len(descriptor.URLs) == 0 {
err = errMissingURL
}
allow := ms.manifestURLs.allow
deny := ms.manifestURLs.deny
for _, u := range descriptor.URLs {
var pu *url.URL
pu, err = url.Parse(u)
if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) {
err = errInvalidURL
break
}
}
case schema2.MediaTypeManifest, schema1.MediaTypeManifest:
var exists bool
exists, err = manifestService.Exists(ctx, descriptor.Digest)
if err != nil || !exists {
err = distribution.ErrBlobUnknown // just coerce to unknown.
}
fallthrough // double check the blob store.
default:
// forward all else to blob storage
if len(descriptor.URLs) == 0 {
_, err = blobsService.Stat(ctx, descriptor.Digest)
}
}
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest})
}
}
if len(errs) != 0 {
return errs
}
return nil
}

View file

@ -0,0 +1,141 @@
package storage
import (
"encoding/json"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/reference"
"github.com/docker/libtrust"
"github.com/opencontainers/go-digest"
)
// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It
// can unmarshal and put schema1 manifests that have been signed by libtrust.
type signedManifestHandler struct {
repository distribution.Repository
schema1SigningKey libtrust.PrivateKey
blobStore distribution.BlobStore
ctx context.Context
}
var _ ManifestHandler = &signedManifestHandler{}
func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal")
var (
signatures [][]byte
err error
)
jsig, err := libtrust.NewJSONSignature(content, signatures...)
if err != nil {
return nil, err
}
if ms.schema1SigningKey != nil {
if err := jsig.Sign(ms.schema1SigningKey); err != nil {
return nil, err
}
}
// Extract the pretty JWS
raw, err := jsig.PrettySignature("signatures")
if err != nil {
return nil, err
}
var sm schema1.SignedManifest
if err := json.Unmarshal(raw, &sm); err != nil {
return nil, err
}
return &sm, nil
}
func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put")
sm, ok := manifest.(*schema1.SignedManifest)
if !ok {
return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest)
}
if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil {
return "", err
}
mt := schema1.MediaTypeManifest
payload := sm.Canonical
revision, err := ms.blobStore.Put(ctx, mt, payload)
if err != nil {
context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
return "", err
}
return revision.Digest, nil
}
// verifyManifest ensures that the manifest content is valid from the
// perspective of the registry. It ensures that the signature is valid for the
// enclosed payload. As a policy, the registry only tries to store valid
// content, leaving trust policies of that content up to consumers.
func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification
if len(mnfst.Name) > reference.NameTotalLengthMax {
errs = append(errs,
distribution.ErrManifestNameInvalid{
Name: mnfst.Name,
Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax),
})
}
if !reference.NameRegexp.MatchString(mnfst.Name) {
errs = append(errs,
distribution.ErrManifestNameInvalid{
Name: mnfst.Name,
Reason: fmt.Errorf("invalid manifest name format"),
})
}
if len(mnfst.History) != len(mnfst.FSLayers) {
errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d",
len(mnfst.History), len(mnfst.FSLayers)))
}
if _, err := schema1.Verify(&mnfst); err != nil {
switch err {
case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey:
errs = append(errs, distribution.ErrManifestUnverified{})
default:
if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust
errs = append(errs, distribution.ErrManifestUnverified{})
} else {
errs = append(errs, err)
}
}
}
if !skipDependencyVerification {
for _, fsLayer := range mnfst.References() {
_, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest)
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest})
}
}
}
if len(errs) != 0 {
return errs
}
return nil
}

View file

@ -0,0 +1,191 @@
package storage
import (
"path"
"github.com/docker/distribution"
"github.com/docker/distribution/context"
storagedriver "github.com/docker/distribution/registry/storage/driver"
"github.com/opencontainers/go-digest"
)
var _ distribution.TagService = &tagStore{}
// tagStore provides methods to manage manifest tags in a backend storage driver.
// This implementation uses the same on-disk layout as the (now deleted) tag
// store. This provides backward compatibility with current registry deployments
// which only makes use of the Digest field of the returned distribution.Descriptor
// but does not enable full roundtripping of Descriptor objects
type tagStore struct {
repository *repository
blobStore *blobStore
}
// All returns all tags
func (ts *tagStore) All(ctx context.Context) ([]string, error) {
var tags []string
pathSpec, err := pathFor(manifestTagPathSpec{
name: ts.repository.Named().Name(),
})
if err != nil {
return tags, err
}
entries, err := ts.blobStore.driver.List(ctx, pathSpec)
if err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()}
default:
return tags, err
}
}
for _, entry := range entries {
_, filename := path.Split(entry)
tags = append(tags, filename)
}
return tags, nil
}
// exists returns true if the specified manifest tag exists in the repository.
func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) {
tagPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
})
if err != nil {
return false, err
}
exists, err := exists(ctx, ts.blobStore.driver, tagPath)
if err != nil {
return false, err
}
return exists, nil
}
// Tag tags the digest with the given tag, updating the the store to point at
// the current tag. The digest must point to a manifest.
func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
currentPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
})
if err != nil {
return err
}
lbs := ts.linkedBlobStore(ctx, tag)
// Link into the index
if err := lbs.linkBlob(ctx, desc); err != nil {
return err
}
// Overwrite the current link
return ts.blobStore.link(ctx, currentPath, desc.Digest)
}
// resolve the current revision for name and tag.
func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
currentPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
})
if err != nil {
return distribution.Descriptor{}, err
}
revision, err := ts.blobStore.readlink(ctx, currentPath)
if err != nil {
switch err.(type) {
case storagedriver.PathNotFoundError:
return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag}
}
return distribution.Descriptor{}, err
}
return distribution.Descriptor{Digest: revision}, nil
}
// Untag removes the tag association
func (ts *tagStore) Untag(ctx context.Context, tag string) error {
tagPath, err := pathFor(manifestTagPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
})
switch err.(type) {
case storagedriver.PathNotFoundError:
return distribution.ErrTagUnknown{Tag: tag}
case nil:
break
default:
return err
}
return ts.blobStore.driver.Delete(ctx, tagPath)
}
// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one
// to index manifest blobs by tag name. While the tag store doesn't map
// precisely to the linked blob store, using this ensures the links are
// managed via the same code path.
func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore {
return &linkedBlobStore{
blobStore: ts.blobStore,
repository: ts.repository,
ctx: ctx,
linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestTagIndexEntryLinkPathSpec{
name: name,
tag: tag,
revision: dgst,
})
}},
}
}
// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by
// digest, tag entries which point to it need to be recovered to avoid dangling tags.
func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) {
allTags, err := ts.All(ctx)
switch err.(type) {
case distribution.ErrRepositoryUnknown:
// This tag store has been initialized but not yet populated
break
case nil:
break
default:
return nil, err
}
var tags []string
for _, tag := range allTags {
tagLinkPathSpec := manifestTagCurrentPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
}
tagLinkPath, err := pathFor(tagLinkPathSpec)
tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath)
if err != nil {
return nil, err
}
if tagDigest == desc.Digest {
tags = append(tags, tag)
}
}
return tags, nil
}

View file

@ -0,0 +1,21 @@
package storage
import (
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
)
// Exists provides a utility method to test whether or not a path exists in
// the given driver.
func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) {
if _, err := drv.Stat(ctx, path); err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return false, nil
default:
return false, err
}
}
return true, nil
}

View file

@ -0,0 +1,67 @@
package storage
import (
"path"
"github.com/docker/distribution/context"
"github.com/docker/distribution/registry/storage/driver"
"github.com/opencontainers/go-digest"
)
// vacuum contains functions for cleaning up repositories and blobs
// These functions will only reliably work on strongly consistent
// storage systems.
// https://en.wikipedia.org/wiki/Consistency_model
// NewVacuum creates a new Vacuum
func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum {
return Vacuum{
ctx: ctx,
driver: driver,
}
}
// Vacuum removes content from the filesystem
type Vacuum struct {
driver driver.StorageDriver
ctx context.Context
}
// RemoveBlob removes a blob from the filesystem
func (v Vacuum) RemoveBlob(dgst string) error {
d, err := digest.Parse(dgst)
if err != nil {
return err
}
blobPath, err := pathFor(blobPathSpec{digest: d})
if err != nil {
return err
}
context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath)
err = v.driver.Delete(v.ctx, blobPath)
if err != nil {
return err
}
return nil
}
// RemoveRepository removes a repository directory from the
// filesystem
func (v Vacuum) RemoveRepository(repoName string) error {
rootForRepository, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return err
}
repoDir := path.Join(rootForRepository, repoName)
context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir)
err = v.driver.Delete(v.ctx, repoDir)
if err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,59 @@
package storage
import (
"errors"
"fmt"
"sort"
"github.com/docker/distribution/context"
storageDriver "github.com/docker/distribution/registry/storage/driver"
)
// ErrSkipDir is used as a return value from onFileFunc to indicate that
// the directory named in the call is to be skipped. It is not returned
// as an error by any function.
var ErrSkipDir = errors.New("skip this directory")
// WalkFn is called once per file by Walk
// If the returned error is ErrSkipDir and fileInfo refers
// to a directory, the directory will not be entered and Walk
// will continue the traversal. Otherwise Walk will return
type WalkFn func(fileInfo storageDriver.FileInfo) error
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file
func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error {
children, err := driver.List(ctx, from)
if err != nil {
return err
}
sort.Stable(sort.StringSlice(children))
for _, child := range children {
// TODO(stevvooe): Calling driver.Stat for every entry is quite
// expensive when running against backends with a slow Stat
// implementation, such as s3. This is very likely a serious
// performance bottleneck.
fileInfo, err := driver.Stat(ctx, child)
if err != nil {
return err
}
err = f(fileInfo)
skipDir := (err == ErrSkipDir)
if err != nil && !skipDir {
return err
}
if fileInfo.IsDir() && !skipDir {
if err := Walk(ctx, driver, child, f); err != nil {
return err
}
}
}
return nil
}
// pushError formats an error type given a path and an error
// and pushes it to a slice of errors
func pushError(errors []error, path string, err error) []error {
return append(errors, fmt.Errorf("%s: %s", path, err))
}

99
vendor/github.com/docker/docker/builder/builder.go generated vendored Normal file
View file

@ -0,0 +1,99 @@
// Package builder defines interfaces for any Docker builder to implement.
//
// Historically, only server-side Dockerfile interpreters existed.
// This package allows for other implementations of Docker builders.
package builder
import (
"io"
"golang.org/x/net/context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
containerpkg "github.com/docker/docker/container"
)
const (
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
DefaultDockerfileName string = "Dockerfile"
)
// Source defines a location that can be used as a source for the ADD/COPY
// instructions in the builder.
type Source interface {
// Root returns root path for accessing source
Root() string
// Close allows to signal that the filesystem tree won't be used anymore.
// For Context implementations using a temporary directory, it is recommended to
// delete the temporary directory in Close().
Close() error
// Hash returns a checksum for a file
Hash(path string) (string, error)
}
// Backend abstracts calls to a Docker Daemon.
type Backend interface {
ImageBackend
// ContainerAttachRaw attaches to container.
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error
// ContainerCreate creates a new Docker container and returns potential warnings
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
// ContainerRm removes a container specified by `id`.
ContainerRm(name string, config *types.ContainerRmConfig) error
// Commit creates a new Docker image from an existing Docker container.
Commit(string, *backend.ContainerCommitConfig) (string, error)
// ContainerKill stops the container execution abruptly.
ContainerKill(containerID string, sig uint64) error
// ContainerStart starts a new container
ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
// ContainerWait stops processing until the given container is stopped.
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
// ContainerCreateWorkdir creates the workdir
ContainerCreateWorkdir(containerID string) error
// ContainerCopy copies/extracts a source FileInfo to a destination path inside a container
// specified by a container object.
// TODO: extract in the builder instead of passing `decompress`
// TODO: use containerd/fs.changestream instead as a source
CopyOnBuild(containerID string, destPath string, srcRoot string, srcPath string, decompress bool) error
}
// ImageBackend are the interface methods required from an image component
type ImageBackend interface {
GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error)
}
// Result is the output produced by a Builder
type Result struct {
ImageID string
FromImage Image
}
// ImageCacheBuilder represents a generator for stateful image cache.
type ImageCacheBuilder interface {
// MakeImageCache creates a stateful image cache.
MakeImageCache(cacheFrom []string) ImageCache
}
// ImageCache abstracts an image cache.
// (parent image, child runconfig) -> child image
type ImageCache interface {
// GetCache returns a reference to a cached image whose parent equals `parent`
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
GetCache(parentID string, cfg *container.Config) (imageID string, err error)
}
// Image represents a Docker image used by the builder.
type Image interface {
ImageID() string
RunConfig() *container.Config
}
// ReleaseableLayer is an image layer that can be mounted and released
type ReleaseableLayer interface {
Release() error
Mount() (string, error)
}

25
vendor/github.com/docker/docker/cli/cli.go generated vendored Normal file
View file

@ -0,0 +1,25 @@
package cli
import (
"os"
"path/filepath"
"github.com/docker/docker/pkg/homedir"
)
var (
configDir = os.Getenv("DOCKER_CONFIG")
configFileDir = ".docker"
)
// ConfigurationDir returns the path to the configuration directory as specified by the DOCKER_CONFIG environment variable.
// TODO: this was copied from cli/config/configfile and should be removed once cmd/dockerd moves
func ConfigurationDir() string {
return configDir
}
func init() {
if configDir == "" {
configDir = filepath.Join(homedir.Get(), configFileDir)
}
}

150
vendor/github.com/docker/docker/cli/cobra.go generated vendored Normal file
View file

@ -0,0 +1,150 @@
package cli
import (
"fmt"
"strings"
"github.com/docker/docker/pkg/term"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// SetupRootCommand sets default usage, help, and error handling for the
// root command.
func SetupRootCommand(rootCmd *cobra.Command) {
cobra.AddTemplateFunc("hasSubCommands", hasSubCommands)
cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands)
cobra.AddTemplateFunc("operationSubCommands", operationSubCommands)
cobra.AddTemplateFunc("managementSubCommands", managementSubCommands)
cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages)
rootCmd.SetUsageTemplate(usageTemplate)
rootCmd.SetHelpTemplate(helpTemplate)
rootCmd.SetFlagErrorFunc(FlagErrorFunc)
rootCmd.SetHelpCommand(helpCommand)
rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage")
rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help")
}
// FlagErrorFunc prints an error message which matches the format of the
// docker/docker/cli error messages
func FlagErrorFunc(cmd *cobra.Command, err error) error {
if err == nil {
return nil
}
usage := ""
if cmd.HasSubCommands() {
usage = "\n\n" + cmd.UsageString()
}
return StatusError{
Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage),
StatusCode: 125,
}
}
var helpCommand = &cobra.Command{
Use: "help [command]",
Short: "Help about the command",
PersistentPreRun: func(cmd *cobra.Command, args []string) {},
PersistentPostRun: func(cmd *cobra.Command, args []string) {},
RunE: func(c *cobra.Command, args []string) error {
cmd, args, e := c.Root().Find(args)
if cmd == nil || e != nil || len(args) > 0 {
return errors.Errorf("unknown help topic: %v", strings.Join(args, " "))
}
helpFunc := cmd.HelpFunc()
helpFunc(cmd, args)
return nil
},
}
func hasSubCommands(cmd *cobra.Command) bool {
return len(operationSubCommands(cmd)) > 0
}
func hasManagementSubCommands(cmd *cobra.Command) bool {
return len(managementSubCommands(cmd)) > 0
}
func operationSubCommands(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
for _, sub := range cmd.Commands() {
if sub.IsAvailableCommand() && !sub.HasSubCommands() {
cmds = append(cmds, sub)
}
}
return cmds
}
func wrappedFlagUsages(cmd *cobra.Command) string {
width := 80
if ws, err := term.GetWinsize(0); err == nil {
width = int(ws.Width)
}
return cmd.Flags().FlagUsagesWrapped(width - 1)
}
func managementSubCommands(cmd *cobra.Command) []*cobra.Command {
cmds := []*cobra.Command{}
for _, sub := range cmd.Commands() {
if sub.IsAvailableCommand() && sub.HasSubCommands() {
cmds = append(cmds, sub)
}
}
return cmds
}
var usageTemplate = `Usage:
{{- if not .HasSubCommands}} {{.UseLine}}{{end}}
{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}}
{{ .Short | trim }}
{{- if gt .Aliases 0}}
Aliases:
{{.NameAndAliases}}
{{- end}}
{{- if .HasExample}}
Examples:
{{ .Example }}
{{- end}}
{{- if .HasFlags}}
Options:
{{ wrappedFlagUsages . | trimRightSpace}}
{{- end}}
{{- if hasManagementSubCommands . }}
Management Commands:
{{- range managementSubCommands . }}
{{rpad .Name .NamePadding }} {{.Short}}
{{- end}}
{{- end}}
{{- if hasSubCommands .}}
Commands:
{{- range operationSubCommands . }}
{{rpad .Name .NamePadding }} {{.Short}}
{{- end}}
{{- end}}
{{- if .HasSubCommands }}
Run '{{.CommandPath}} COMMAND --help' for more information on a command.
{{- end}}
`
var helpTemplate = `
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`

33
vendor/github.com/docker/docker/cli/error.go generated vendored Normal file
View file

@ -0,0 +1,33 @@
package cli
import (
"fmt"
"strings"
)
// Errors is a list of errors.
// Useful in a loop if you don't want to return the error right away and you want to display after the loop,
// all the errors that happened during the loop.
type Errors []error
func (errList Errors) Error() string {
if len(errList) < 1 {
return ""
}
out := make([]string, len(errList))
for i := range errList {
out[i] = errList[i].Error()
}
return strings.Join(out, ", ")
}
// StatusError reports an unsuccessful exit by a command.
type StatusError struct {
Status string
StatusCode int
}
func (e StatusError) Error() string {
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
}

96
vendor/github.com/docker/docker/cli/required.go generated vendored Normal file
View file

@ -0,0 +1,96 @@
package cli
import (
"strings"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// NoArgs validates args and returns an error if there are any args
func NoArgs(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return nil
}
if cmd.HasSubCommands() {
return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n"))
}
return errors.Errorf(
"\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
// RequiresMinArgs returns an error if there is not at least min args
func RequiresMinArgs(min int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) >= min {
return nil
}
return errors.Errorf(
"\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
min,
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
}
// RequiresMaxArgs returns an error if there is not at most max args
func RequiresMaxArgs(max int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) <= max {
return nil
}
return errors.Errorf(
"\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
max,
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
}
// RequiresRangeArgs returns an error if there is not at least min args and at most max args
func RequiresRangeArgs(min int, max int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) >= min && len(args) <= max {
return nil
}
return errors.Errorf(
"\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
min,
max,
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
}
// ExactArgs returns an error if there is not the exact number of args
func ExactArgs(number int) cobra.PositionalArgs {
return func(cmd *cobra.Command, args []string) error {
if len(args) == number {
return nil
}
return errors.Errorf(
"\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
cmd.CommandPath(),
number,
cmd.CommandPath(),
cmd.UseLine(),
cmd.Short,
)
}
}

108
vendor/github.com/docker/docker/runconfig/config.go generated vendored Normal file
View file

@ -0,0 +1,108 @@
package runconfig
import (
"encoding/json"
"fmt"
"io"
"github.com/docker/docker/api/types/container"
networktypes "github.com/docker/docker/api/types/network"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/volume"
)
// ContainerDecoder implements httputils.ContainerDecoder
// calling DecodeContainerConfig.
type ContainerDecoder struct{}
// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder
func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) {
return DecodeContainerConfig(src)
}
// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder
func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) {
return DecodeHostConfig(src)
}
// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper
// struct and returns both a Config and a HostConfig struct
// Be aware this function is not checking whether the resulted structs are nil,
// it's your business to do so
func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) {
var w ContainerConfigWrapper
decoder := json.NewDecoder(src)
if err := decoder.Decode(&w); err != nil {
return nil, nil, nil, err
}
hc := w.getHostConfig()
// Perform platform-specific processing of Volumes and Binds.
if w.Config != nil && hc != nil {
// Initialize the volumes map if currently nil
if w.Config.Volumes == nil {
w.Config.Volumes = make(map[string]struct{})
}
// Now validate all the volumes and binds
if err := validateMountSettings(w.Config, hc); err != nil {
return nil, nil, nil, err
}
}
// Certain parameters need daemon-side validation that cannot be done
// on the client, as only the daemon knows what is valid for the platform.
if err := validateNetMode(w.Config, hc); err != nil {
return nil, nil, nil, err
}
// Validate isolation
if err := validateIsolation(hc); err != nil {
return nil, nil, nil, err
}
// Validate QoS
if err := validateQoS(hc); err != nil {
return nil, nil, nil, err
}
// Validate Resources
if err := validateResources(hc, sysinfo.New(true)); err != nil {
return nil, nil, nil, err
}
// Validate Privileged
if err := validatePrivileged(hc); err != nil {
return nil, nil, nil, err
}
// Validate ReadonlyRootfs
if err := validateReadonlyRootfs(hc); err != nil {
return nil, nil, nil, err
}
return w.Config, hc, w.NetworkingConfig, nil
}
// validateMountSettings validates each of the volumes and bind settings
// passed by the caller to ensure they are valid.
func validateMountSettings(c *container.Config, hc *container.HostConfig) error {
// it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 )
// Ensure all volumes and binds are valid.
for spec := range c.Volumes {
if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil {
return fmt.Errorf("invalid volume spec %q: %v", spec, err)
}
}
for _, spec := range hc.Binds {
if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil {
return fmt.Errorf("invalid bind mount spec %q: %v", spec, err)
}
}
return nil
}

View file

@ -0,0 +1,59 @@
// +build !windows
package runconfig
import (
"github.com/docker/docker/api/types/container"
networktypes "github.com/docker/docker/api/types/network"
)
// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable)
// and the corresponding HostConfig (non-portable).
type ContainerConfigWrapper struct {
*container.Config
InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"`
Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility.
NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"`
*container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure.
}
// getHostConfig gets the HostConfig of the Config.
// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper
func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig {
hc := w.HostConfig
if hc == nil && w.InnerHostConfig != nil {
hc = w.InnerHostConfig
} else if w.InnerHostConfig != nil {
if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 {
w.InnerHostConfig.Memory = hc.Memory
}
if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 {
w.InnerHostConfig.MemorySwap = hc.MemorySwap
}
if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 {
w.InnerHostConfig.CPUShares = hc.CPUShares
}
if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" {
w.InnerHostConfig.CpusetCpus = hc.CpusetCpus
}
if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" {
w.InnerHostConfig.VolumeDriver = hc.VolumeDriver
}
hc = w.InnerHostConfig
}
if hc != nil {
if w.Cpuset != "" && hc.CpusetCpus == "" {
hc.CpusetCpus = w.Cpuset
}
}
// Make sure NetworkMode has an acceptable value. We do this to ensure
// backwards compatible API behavior.
SetDefaultNetModeIfBlank(hc)
return hc
}

View file

@ -0,0 +1,19 @@
package runconfig
import (
"github.com/docker/docker/api/types/container"
networktypes "github.com/docker/docker/api/types/network"
)
// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable)
// and the corresponding HostConfig (non-portable).
type ContainerConfigWrapper struct {
*container.Config
HostConfig *container.HostConfig `json:"HostConfig,omitempty"`
NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"`
}
// getHostConfig gets the HostConfig of the Config.
func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig {
return w.HostConfig
}

38
vendor/github.com/docker/docker/runconfig/errors.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package runconfig
import (
"fmt"
)
var (
// ErrConflictContainerNetworkAndLinks conflict between --net=container and links
ErrConflictContainerNetworkAndLinks = fmt.Errorf("conflicting options: container type network can't be used with links. This would result in undefined behavior")
// ErrConflictSharedNetwork conflict between private and other networks
ErrConflictSharedNetwork = fmt.Errorf("container sharing network namespace with another container or host cannot be connected to any other network")
// ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network.
ErrConflictHostNetwork = fmt.Errorf("container cannot be disconnected from host network or connected to host network")
// ErrConflictNoNetwork conflict between private and other networks
ErrConflictNoNetwork = fmt.Errorf("container cannot be connected to multiple networks with one of the networks in private (none) mode")
// ErrConflictNetworkAndDNS conflict between --dns and the network mode
ErrConflictNetworkAndDNS = fmt.Errorf("conflicting options: dns and the network mode")
// ErrConflictNetworkHostname conflict between the hostname and the network mode
ErrConflictNetworkHostname = fmt.Errorf("conflicting options: hostname and the network mode")
// ErrConflictHostNetworkAndLinks conflict between --net=host and links
ErrConflictHostNetworkAndLinks = fmt.Errorf("conflicting options: host type networking can't be used with links. This would result in undefined behavior")
// ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode
ErrConflictContainerNetworkAndMac = fmt.Errorf("conflicting options: mac-address and the network mode")
// ErrConflictNetworkHosts conflict between add-host and the network mode
ErrConflictNetworkHosts = fmt.Errorf("conflicting options: custom host-to-IP mapping and the network mode")
// ErrConflictNetworkPublishPorts conflict between the publish options and the network mode
ErrConflictNetworkPublishPorts = fmt.Errorf("conflicting options: port publishing and the container type network mode")
// ErrConflictNetworkExposePorts conflict between the expose option and the network mode
ErrConflictNetworkExposePorts = fmt.Errorf("conflicting options: port exposing and the container type network mode")
// ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address
ErrUnsupportedNetworkAndIP = fmt.Errorf("user specified IP address is supported on user defined networks only")
// ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address
ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("user specified IP address is supported only when connecting to networks with user configured subnets")
// ErrUnsupportedNetworkAndAlias conflict between network mode and alias
ErrUnsupportedNetworkAndAlias = fmt.Errorf("network-scoped alias is supported only for containers in user defined networks")
// ErrConflictUTSHostname conflict between the hostname and the UTS mode
ErrConflictUTSHostname = fmt.Errorf("conflicting options: hostname and the UTS mode")
)

View file

@ -0,0 +1,80 @@
package runconfig
import (
"encoding/json"
"fmt"
"io"
"strings"
"github.com/docker/docker/api/types/container"
)
// DecodeHostConfig creates a HostConfig based on the specified Reader.
// It assumes the content of the reader will be JSON, and decodes it.
func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) {
decoder := json.NewDecoder(src)
var w ContainerConfigWrapper
if err := decoder.Decode(&w); err != nil {
return nil, err
}
hc := w.getHostConfig()
return hc, nil
}
// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure
// to default if it is not populated. This ensures backwards compatibility after
// the validation of the network mode was moved from the docker CLI to the
// docker daemon.
func SetDefaultNetModeIfBlank(hc *container.HostConfig) {
if hc != nil {
if hc.NetworkMode == container.NetworkMode("") {
hc.NetworkMode = container.NetworkMode("default")
}
}
}
// validateNetContainerMode ensures that the various combinations of requested
// network settings wrt container mode are valid.
func validateNetContainerMode(c *container.Config, hc *container.HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
parts := strings.Split(string(hc.NetworkMode), ":")
if parts[0] == "container" {
if len(parts) < 2 || parts[1] == "" {
return fmt.Errorf("--net: invalid net mode: invalid container format container:<name|id>")
}
}
if hc.NetworkMode.IsContainer() && c.Hostname != "" {
return ErrConflictNetworkHostname
}
if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 {
return ErrConflictContainerNetworkAndLinks
}
if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 {
return ErrConflictNetworkAndDNS
}
if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 {
return ErrConflictNetworkHosts
}
if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" {
return ErrConflictContainerNetworkAndMac
}
if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) {
return ErrConflictNetworkPublishPorts
}
if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 {
return ErrConflictNetworkExposePorts
}
return nil
}

View file

@ -0,0 +1,46 @@
package runconfig
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/sysinfo"
)
// DefaultDaemonNetworkMode returns the default network stack the daemon should
// use.
func DefaultDaemonNetworkMode() container.NetworkMode {
return container.NetworkMode("bridge")
}
// IsPreDefinedNetwork indicates if a network is predefined by the daemon
func IsPreDefinedNetwork(network string) bool {
return false
}
// validateNetMode ensures that the various combinations of requested
// network settings are valid.
func validateNetMode(c *container.Config, hc *container.HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
return nil
}
// validateIsolation performs platform specific validation of the
// isolation level in the hostconfig structure.
// This setting is currently discarded for Solaris so this is a no-op.
func validateIsolation(hc *container.HostConfig) error {
return nil
}
// validateQoS performs platform specific validation of the QoS settings
func validateQoS(hc *container.HostConfig) error {
return nil
}
// validateResources performs platform specific validation of the resource settings
func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error {
return nil
}
// validatePrivileged performs platform specific validation of the Privileged setting
func validatePrivileged(hc *container.HostConfig) error {
return nil
}

View file

@ -0,0 +1,110 @@
// +build !windows,!solaris
package runconfig
import (
"fmt"
"runtime"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/sysinfo"
)
// DefaultDaemonNetworkMode returns the default network stack the daemon should
// use.
func DefaultDaemonNetworkMode() container.NetworkMode {
return container.NetworkMode("bridge")
}
// IsPreDefinedNetwork indicates if a network is predefined by the daemon
func IsPreDefinedNetwork(network string) bool {
n := container.NetworkMode(network)
return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault()
}
// validateNetMode ensures that the various combinations of requested
// network settings are valid.
func validateNetMode(c *container.Config, hc *container.HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
err := validateNetContainerMode(c, hc)
if err != nil {
return err
}
if hc.UTSMode.IsHost() && c.Hostname != "" {
return ErrConflictUTSHostname
}
if hc.NetworkMode.IsHost() && len(hc.Links) > 0 {
return ErrConflictHostNetworkAndLinks
}
return nil
}
// validateIsolation performs platform specific validation of
// isolation in the hostconfig structure. Linux only supports "default"
// which is LXC container isolation
func validateIsolation(hc *container.HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
if !hc.Isolation.IsValid() {
return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS)
}
return nil
}
// validateQoS performs platform specific validation of the QoS settings
func validateQoS(hc *container.HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
if hc.IOMaximumBandwidth != 0 {
return fmt.Errorf("invalid QoS settings: %s does not support --io-maxbandwidth", runtime.GOOS)
}
if hc.IOMaximumIOps != 0 {
return fmt.Errorf("invalid QoS settings: %s does not support --io-maxiops", runtime.GOOS)
}
return nil
}
// validateResources performs platform specific validation of the resource settings
// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice
func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod {
return fmt.Errorf("invalid --cpu-rt-period: Your kernel does not support cgroup rt period")
}
if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime {
return fmt.Errorf("invalid --cpu-rt-runtime: Your kernel does not support cgroup rt runtime")
}
if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod {
return fmt.Errorf("invalid --cpu-rt-runtime: rt runtime cannot be higher than rt period")
}
return nil
}
// validatePrivileged performs platform specific validation of the Privileged setting
func validatePrivileged(hc *container.HostConfig) error {
return nil
}
// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting
func validateReadonlyRootfs(hc *container.HostConfig) error {
return nil
}

View file

@ -0,0 +1,96 @@
package runconfig
import (
"fmt"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/sysinfo"
)
// DefaultDaemonNetworkMode returns the default network stack the daemon should
// use.
func DefaultDaemonNetworkMode() container.NetworkMode {
return container.NetworkMode("nat")
}
// IsPreDefinedNetwork indicates if a network is predefined by the daemon
func IsPreDefinedNetwork(network string) bool {
return !container.NetworkMode(network).IsUserDefined()
}
// validateNetMode ensures that the various combinations of requested
// network settings are valid.
func validateNetMode(c *container.Config, hc *container.HostConfig) error {
if hc == nil {
return nil
}
err := validateNetContainerMode(c, hc)
if err != nil {
return err
}
if hc.NetworkMode.IsContainer() && hc.Isolation.IsHyperV() {
return fmt.Errorf("net mode --net=container:<NameOrId> unsupported for hyperv isolation")
}
return nil
}
// validateIsolation performs platform specific validation of the
// isolation in the hostconfig structure. Windows supports 'default' (or
// blank), 'process', or 'hyperv'.
func validateIsolation(hc *container.HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
if !hc.Isolation.IsValid() {
return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation)
}
return nil
}
// validateQoS performs platform specific validation of the Qos settings
func validateQoS(hc *container.HostConfig) error {
return nil
}
// validateResources performs platform specific validation of the resource settings
func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
if hc.Resources.CPURealtimePeriod != 0 {
return fmt.Errorf("invalid --cpu-rt-period: Windows does not support this feature")
}
if hc.Resources.CPURealtimeRuntime != 0 {
return fmt.Errorf("invalid --cpu-rt-runtime: Windows does not support this feature")
}
return nil
}
// validatePrivileged performs platform specific validation of the Privileged setting
func validatePrivileged(hc *container.HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
if hc.Privileged {
return fmt.Errorf("invalid --privileged: Windows does not support this feature")
}
return nil
}
// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting
func validateReadonlyRootfs(hc *container.HostConfig) error {
// We may not be passed a host config, such as in the case of docker commit
if hc == nil {
return nil
}
if hc.ReadonlyRootfs {
return fmt.Errorf("invalid --read-only: Windows does not support this feature")
}
return nil
}