Merge 'v1.5.2' into master
This commit is contained in:
commit
794c0206f3
338 changed files with 4158 additions and 48549 deletions
162
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
162
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
|
@ -1,162 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// SetupRootCommand sets default usage, help, and error handling for the
|
||||
// root command.
|
||||
func SetupRootCommand(rootCmd *cobra.Command) {
|
||||
cobra.AddTemplateFunc("hasSubCommands", hasSubCommands)
|
||||
cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands)
|
||||
cobra.AddTemplateFunc("operationSubCommands", operationSubCommands)
|
||||
cobra.AddTemplateFunc("managementSubCommands", managementSubCommands)
|
||||
cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages)
|
||||
cobra.AddTemplateFunc("useLine", UseLine)
|
||||
|
||||
rootCmd.SetUsageTemplate(usageTemplate)
|
||||
rootCmd.SetHelpTemplate(helpTemplate)
|
||||
rootCmd.SetFlagErrorFunc(FlagErrorFunc)
|
||||
rootCmd.SetHelpCommand(helpCommand)
|
||||
|
||||
rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage")
|
||||
rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help")
|
||||
rootCmd.PersistentFlags().Lookup("help").Hidden = true
|
||||
}
|
||||
|
||||
// FlagErrorFunc prints an error message which matches the format of the
|
||||
// docker/cli/cli error messages
|
||||
func FlagErrorFunc(cmd *cobra.Command, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
usage := ""
|
||||
if cmd.HasSubCommands() {
|
||||
usage = "\n\n" + cmd.UsageString()
|
||||
}
|
||||
return StatusError{
|
||||
Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage),
|
||||
StatusCode: 125,
|
||||
}
|
||||
}
|
||||
|
||||
var helpCommand = &cobra.Command{
|
||||
Use: "help [command]",
|
||||
Short: "Help about the command",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {},
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {},
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
cmd, args, e := c.Root().Find(args)
|
||||
if cmd == nil || e != nil || len(args) > 0 {
|
||||
return errors.Errorf("unknown help topic: %v", strings.Join(args, " "))
|
||||
}
|
||||
|
||||
helpFunc := cmd.HelpFunc()
|
||||
helpFunc(cmd, args)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func hasSubCommands(cmd *cobra.Command) bool {
|
||||
return len(operationSubCommands(cmd)) > 0
|
||||
}
|
||||
|
||||
func hasManagementSubCommands(cmd *cobra.Command) bool {
|
||||
return len(managementSubCommands(cmd)) > 0
|
||||
}
|
||||
|
||||
func operationSubCommands(cmd *cobra.Command) []*cobra.Command {
|
||||
cmds := []*cobra.Command{}
|
||||
for _, sub := range cmd.Commands() {
|
||||
if sub.IsAvailableCommand() && !sub.HasSubCommands() {
|
||||
cmds = append(cmds, sub)
|
||||
}
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
func wrappedFlagUsages(cmd *cobra.Command) string {
|
||||
width := 80
|
||||
if ws, err := term.GetWinsize(0); err == nil {
|
||||
width = int(ws.Width)
|
||||
}
|
||||
return cmd.Flags().FlagUsagesWrapped(width - 1)
|
||||
}
|
||||
|
||||
func managementSubCommands(cmd *cobra.Command) []*cobra.Command {
|
||||
cmds := []*cobra.Command{}
|
||||
for _, sub := range cmd.Commands() {
|
||||
if sub.IsAvailableCommand() && sub.HasSubCommands() {
|
||||
cmds = append(cmds, sub)
|
||||
}
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
// UseLine returns the usage line for a command. This implementation is different
|
||||
// from the default Command.UseLine in that it does not add a `[flags]` to the
|
||||
// end of the line.
|
||||
func UseLine(cmd *cobra.Command) string {
|
||||
if cmd.HasParent() {
|
||||
return cmd.Parent().CommandPath() + " " + cmd.Use
|
||||
}
|
||||
return cmd.Use
|
||||
}
|
||||
|
||||
var usageTemplate = `Usage:
|
||||
|
||||
{{- if not .HasSubCommands}} {{ useLine . }}{{end}}
|
||||
{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}}
|
||||
|
||||
{{ .Short | trim }}
|
||||
|
||||
{{- if gt .Aliases 0}}
|
||||
|
||||
Aliases:
|
||||
{{.NameAndAliases}}
|
||||
|
||||
{{- end}}
|
||||
{{- if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{ .Example }}
|
||||
|
||||
{{- end}}
|
||||
{{- if .HasFlags}}
|
||||
|
||||
Options:
|
||||
{{ wrappedFlagUsages . | trimRightSpace}}
|
||||
|
||||
{{- end}}
|
||||
{{- if hasManagementSubCommands . }}
|
||||
|
||||
Management Commands:
|
||||
|
||||
{{- range managementSubCommands . }}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}
|
||||
{{- end}}
|
||||
|
||||
{{- end}}
|
||||
{{- if hasSubCommands .}}
|
||||
|
||||
Commands:
|
||||
|
||||
{{- range operationSubCommands . }}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
{{- if .HasSubCommands }}
|
||||
|
||||
Run '{{.CommandPath}} COMMAND --help' for more information on a command.
|
||||
{{- end}}
|
||||
`
|
||||
|
||||
var helpTemplate = `
|
||||
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
258
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
258
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
|
@ -1,258 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/theupdateframework/notary"
|
||||
notaryclient "github.com/theupdateframework/notary/client"
|
||||
"github.com/theupdateframework/notary/passphrase"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Streams is an interface which exposes the standard input and output streams
|
||||
type Streams interface {
|
||||
In() *InStream
|
||||
Out() *OutStream
|
||||
Err() io.Writer
|
||||
}
|
||||
|
||||
// Cli represents the docker command line client.
|
||||
type Cli interface {
|
||||
Client() client.APIClient
|
||||
Out() *OutStream
|
||||
Err() io.Writer
|
||||
In() *InStream
|
||||
SetIn(in *InStream)
|
||||
ConfigFile() *configfile.ConfigFile
|
||||
ServerInfo() ServerInfo
|
||||
NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
|
||||
}
|
||||
|
||||
// DockerCli is an instance the docker command line client.
|
||||
// Instances of the client can be returned from NewDockerCli.
|
||||
type DockerCli struct {
|
||||
configFile *configfile.ConfigFile
|
||||
in *InStream
|
||||
out *OutStream
|
||||
err io.Writer
|
||||
client client.APIClient
|
||||
defaultVersion string
|
||||
server ServerInfo
|
||||
}
|
||||
|
||||
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
|
||||
func (cli *DockerCli) DefaultVersion() string {
|
||||
return cli.defaultVersion
|
||||
}
|
||||
|
||||
// Client returns the APIClient
|
||||
func (cli *DockerCli) Client() client.APIClient {
|
||||
return cli.client
|
||||
}
|
||||
|
||||
// Out returns the writer used for stdout
|
||||
func (cli *DockerCli) Out() *OutStream {
|
||||
return cli.out
|
||||
}
|
||||
|
||||
// Err returns the writer used for stderr
|
||||
func (cli *DockerCli) Err() io.Writer {
|
||||
return cli.err
|
||||
}
|
||||
|
||||
// SetIn sets the reader used for stdin
|
||||
func (cli *DockerCli) SetIn(in *InStream) {
|
||||
cli.in = in
|
||||
}
|
||||
|
||||
// In returns the reader used for stdin
|
||||
func (cli *DockerCli) In() *InStream {
|
||||
return cli.in
|
||||
}
|
||||
|
||||
// ShowHelp shows the command help.
|
||||
func ShowHelp(err io.Writer) func(*cobra.Command, []string) error {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
cmd.SetOutput(err)
|
||||
cmd.HelpFunc()(cmd, args)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigFile returns the ConfigFile
|
||||
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
|
||||
return cli.configFile
|
||||
}
|
||||
|
||||
// ServerInfo returns the server version details for the host this client is
|
||||
// connected to
|
||||
func (cli *DockerCli) ServerInfo() ServerInfo {
|
||||
return cli.server
|
||||
}
|
||||
|
||||
// Initialize the dockerCli runs initialization that must happen after command
|
||||
// line flags are parsed.
|
||||
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
|
||||
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
|
||||
|
||||
var err error
|
||||
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
if tlsconfig.IsErrEncryptedKey(err) {
|
||||
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
|
||||
newClient := func(password string) (client.APIClient, error) {
|
||||
opts.Common.TLSOptions.Passphrase = password
|
||||
return NewAPIClientFromFlags(opts.Common, cli.configFile)
|
||||
}
|
||||
cli.client, err = getClientWithPassword(passRetriever, newClient)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cli.initializeFromClient()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *DockerCli) initializeFromClient() {
|
||||
cli.defaultVersion = cli.client.ClientVersion()
|
||||
|
||||
ping, err := cli.client.Ping(context.Background())
|
||||
if err != nil {
|
||||
// Default to true if we fail to connect to daemon
|
||||
cli.server = ServerInfo{HasExperimental: true}
|
||||
|
||||
if ping.APIVersion != "" {
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
cli.server = ServerInfo{
|
||||
HasExperimental: ping.Experimental,
|
||||
OSType: ping.OSType,
|
||||
}
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
|
||||
func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) {
|
||||
for attempts := 0; ; attempts++ {
|
||||
passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts)
|
||||
if giveup || err != nil {
|
||||
return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase")
|
||||
}
|
||||
|
||||
apiclient, err := newClient(passwd)
|
||||
if !tlsconfig.IsErrEncryptedKey(err) {
|
||||
return apiclient, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
|
||||
func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
|
||||
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
|
||||
}
|
||||
|
||||
// ServerInfo stores details about the supported features and platform of the
|
||||
// server
|
||||
type ServerInfo struct {
|
||||
HasExperimental bool
|
||||
OSType string
|
||||
}
|
||||
|
||||
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
|
||||
func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli {
|
||||
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err}
|
||||
}
|
||||
|
||||
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
|
||||
if err != nil {
|
||||
return &client.Client{}, err
|
||||
}
|
||||
|
||||
customHeaders := configFile.HTTPHeaders
|
||||
if customHeaders == nil {
|
||||
customHeaders = map[string]string{}
|
||||
}
|
||||
customHeaders["User-Agent"] = UserAgent()
|
||||
|
||||
verStr := api.DefaultVersion
|
||||
if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" {
|
||||
verStr = tmpStr
|
||||
}
|
||||
|
||||
httpClient, err := newHTTPClient(host, opts.TLSOptions)
|
||||
if err != nil {
|
||||
return &client.Client{}, err
|
||||
}
|
||||
|
||||
return client.NewClient(host, verStr, httpClient, customHeaders)
|
||||
}
|
||||
|
||||
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
|
||||
var host string
|
||||
switch len(hosts) {
|
||||
case 0:
|
||||
host = os.Getenv("DOCKER_HOST")
|
||||
case 1:
|
||||
host = hosts[0]
|
||||
default:
|
||||
return "", errors.New("Please specify only one -H")
|
||||
}
|
||||
|
||||
return dopts.ParseHost(tlsOptions != nil, host)
|
||||
}
|
||||
|
||||
func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) {
|
||||
if tlsOptions == nil {
|
||||
// let the api client configure the default transport.
|
||||
return nil, nil
|
||||
}
|
||||
opts := *tlsOptions
|
||||
opts.ExclusiveRootPools = true
|
||||
config, err := tlsconfig.Client(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: config,
|
||||
DialContext: (&net.Dialer{
|
||||
KeepAlive: 30 * time.Second,
|
||||
Timeout: 30 * time.Second,
|
||||
}).DialContext,
|
||||
}
|
||||
proto, addr, _, err := client.ParseHost(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sockets.ConfigureTransport(tr, proto, addr)
|
||||
|
||||
return &http.Client{
|
||||
Transport: tr,
|
||||
CheckRedirect: client.CheckRedirect,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UserAgent returns the user agent string used for making API requests
|
||||
func UserAgent() string {
|
||||
return "Docker-Client/" + cli.Version + " (" + runtime.GOOS + ")"
|
||||
}
|
47
vendor/github.com/docker/cli/cli/command/events_utils.go
generated
vendored
47
vendor/github.com/docker/cli/cli/command/events_utils.go
generated
vendored
|
@ -1,47 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
eventtypes "github.com/docker/docker/api/types/events"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// EventHandler is abstract interface for user to customize
|
||||
// own handle functions of each type of events
|
||||
type EventHandler interface {
|
||||
Handle(action string, h func(eventtypes.Message))
|
||||
Watch(c <-chan eventtypes.Message)
|
||||
}
|
||||
|
||||
// InitEventHandler initializes and returns an EventHandler
|
||||
func InitEventHandler() EventHandler {
|
||||
return &eventHandler{handlers: make(map[string]func(eventtypes.Message))}
|
||||
}
|
||||
|
||||
type eventHandler struct {
|
||||
handlers map[string]func(eventtypes.Message)
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) {
|
||||
w.mu.Lock()
|
||||
w.handlers[action] = h
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
// Watch ranges over the passed in event chan and processes the events based on the
|
||||
// handlers created for a given action.
|
||||
// To stop watching, close the event chan.
|
||||
func (w *eventHandler) Watch(c <-chan eventtypes.Message) {
|
||||
for e := range c {
|
||||
w.mu.Lock()
|
||||
h, exists := w.handlers[e.Action]
|
||||
w.mu.Unlock()
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("event handler: received event: %v", e)
|
||||
go h(e)
|
||||
}
|
||||
}
|
589
vendor/github.com/docker/cli/cli/command/image/build.go
generated
vendored
589
vendor/github.com/docker/cli/cli/command/image/build.go
generated
vendored
|
@ -1,589 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/image/build"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type buildOptions struct {
|
||||
context string
|
||||
dockerfileName string
|
||||
tags opts.ListOpts
|
||||
labels opts.ListOpts
|
||||
buildArgs opts.ListOpts
|
||||
extraHosts opts.ListOpts
|
||||
ulimits *opts.UlimitOpt
|
||||
memory opts.MemBytes
|
||||
memorySwap opts.MemSwapBytes
|
||||
shmSize opts.MemBytes
|
||||
cpuShares int64
|
||||
cpuPeriod int64
|
||||
cpuQuota int64
|
||||
cpuSetCpus string
|
||||
cpuSetMems string
|
||||
cgroupParent string
|
||||
isolation string
|
||||
quiet bool
|
||||
noCache bool
|
||||
rm bool
|
||||
forceRm bool
|
||||
pull bool
|
||||
cacheFrom []string
|
||||
compress bool
|
||||
securityOpt []string
|
||||
networkMode string
|
||||
squash bool
|
||||
target string
|
||||
imageIDFile string
|
||||
stream bool
|
||||
platform string
|
||||
}
|
||||
|
||||
// dockerfileFromStdin returns true when the user specified that the Dockerfile
|
||||
// should be read from stdin instead of a file
|
||||
func (o buildOptions) dockerfileFromStdin() bool {
|
||||
return o.dockerfileName == "-"
|
||||
}
|
||||
|
||||
// contextFromStdin returns true when the user specified that the build context
|
||||
// should be read from stdin
|
||||
func (o buildOptions) contextFromStdin() bool {
|
||||
return o.context == "-"
|
||||
}
|
||||
|
||||
func newBuildOptions() buildOptions {
|
||||
ulimits := make(map[string]*units.Ulimit)
|
||||
return buildOptions{
|
||||
tags: opts.NewListOpts(validateTag),
|
||||
buildArgs: opts.NewListOpts(opts.ValidateEnv),
|
||||
ulimits: opts.NewUlimitOpt(&ulimits),
|
||||
labels: opts.NewListOpts(opts.ValidateEnv),
|
||||
extraHosts: opts.NewListOpts(opts.ValidateExtraHost),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBuildCommand creates a new `docker build` command
|
||||
func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options := newBuildOptions()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "build [OPTIONS] PATH | URL | -",
|
||||
Short: "Build an image from a Dockerfile",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.context = args[0]
|
||||
return runBuild(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format")
|
||||
flags.Var(&options.buildArgs, "build-arg", "Set build-time variables")
|
||||
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
|
||||
flags.VarP(&options.memory, "memory", "m", "Memory limit")
|
||||
flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
||||
flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm")
|
||||
flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||
flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
|
||||
flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||
flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
|
||||
flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
|
||||
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
||||
flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology")
|
||||
flags.Var(&options.labels, "label", "Set metadata for an image")
|
||||
flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image")
|
||||
flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build")
|
||||
flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers")
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
|
||||
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
|
||||
flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
|
||||
flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
|
||||
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||
flags.SetAnnotation("network", "version", []string{"1.25"})
|
||||
flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)")
|
||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
|
||||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
||||
|
||||
command.AddTrustVerificationFlags(flags)
|
||||
command.AddPlatformFlag(flags, &options.platform)
|
||||
|
||||
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
|
||||
flags.SetAnnotation("squash", "experimental", nil)
|
||||
flags.SetAnnotation("squash", "version", []string{"1.25"})
|
||||
|
||||
flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context")
|
||||
flags.SetAnnotation("stream", "experimental", nil)
|
||||
flags.SetAnnotation("stream", "version", []string{"1.31"})
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// lastProgressOutput is the same as progress.Output except
|
||||
// that it only output with the last update. It is used in
|
||||
// non terminal scenarios to suppress verbose messages
|
||||
type lastProgressOutput struct {
|
||||
output progress.Output
|
||||
}
|
||||
|
||||
// WriteProgress formats progress information from a ProgressReader.
|
||||
func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
|
||||
if !prog.LastUpdate {
|
||||
return nil
|
||||
}
|
||||
|
||||
return out.output.WriteProgress(prog)
|
||||
}
|
||||
|
||||
// nolint: gocyclo
|
||||
func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||
var (
|
||||
buildCtx io.ReadCloser
|
||||
dockerfileCtx io.ReadCloser
|
||||
err error
|
||||
contextDir string
|
||||
tempDir string
|
||||
relDockerfile string
|
||||
progBuff io.Writer
|
||||
buildBuff io.Writer
|
||||
remote string
|
||||
)
|
||||
|
||||
if options.dockerfileFromStdin() {
|
||||
if options.contextFromStdin() {
|
||||
return errors.New("invalid argument: can't use stdin for both build context and dockerfile")
|
||||
}
|
||||
dockerfileCtx = dockerCli.In()
|
||||
}
|
||||
|
||||
specifiedContext := options.context
|
||||
progBuff = dockerCli.Out()
|
||||
buildBuff = dockerCli.Out()
|
||||
if options.quiet {
|
||||
progBuff = bytes.NewBuffer(nil)
|
||||
buildBuff = bytes.NewBuffer(nil)
|
||||
}
|
||||
if options.imageIDFile != "" {
|
||||
// Avoid leaving a stale file if we eventually fail
|
||||
if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "Removing image ID file")
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case options.contextFromStdin():
|
||||
// buildCtx is tar archive. if stdin was dockerfile then it is wrapped
|
||||
buildCtx, relDockerfile, err = build.GetContextFromReader(dockerCli.In(), options.dockerfileName)
|
||||
case isLocalDir(specifiedContext):
|
||||
contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, options.dockerfileName)
|
||||
case urlutil.IsGitURL(specifiedContext):
|
||||
tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, options.dockerfileName)
|
||||
case urlutil.IsURL(specifiedContext):
|
||||
buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName)
|
||||
default:
|
||||
return errors.Errorf("unable to prepare context: path %q not found", specifiedContext)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if options.quiet && urlutil.IsURL(specifiedContext) {
|
||||
fmt.Fprintln(dockerCli.Err(), progBuff)
|
||||
}
|
||||
return errors.Errorf("unable to prepare context: %s", err)
|
||||
}
|
||||
|
||||
if tempDir != "" {
|
||||
defer os.RemoveAll(tempDir)
|
||||
contextDir = tempDir
|
||||
}
|
||||
|
||||
// read from a directory into tar archive
|
||||
if buildCtx == nil && !options.stream {
|
||||
excludes, err := build.ReadDockerignore(contextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := build.ValidateContextDirectory(contextDir, excludes); err != nil {
|
||||
return errors.Errorf("error checking context: '%s'.", err)
|
||||
}
|
||||
|
||||
// And canonicalize dockerfile name to a platform-independent one
|
||||
relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile)
|
||||
if err != nil {
|
||||
return errors.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err)
|
||||
}
|
||||
|
||||
excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, options.dockerfileFromStdin())
|
||||
buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
|
||||
ExcludePatterns: excludes,
|
||||
ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// replace Dockerfile if it was added from stdin and there is archive context
|
||||
if dockerfileCtx != nil && buildCtx != nil {
|
||||
buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// if streaming and dockerfile was not from stdin then read from file
|
||||
// to the same reader that is usually stdin
|
||||
if options.stream && dockerfileCtx == nil {
|
||||
dockerfileCtx, err = os.Open(relDockerfile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open %s", relDockerfile)
|
||||
}
|
||||
defer dockerfileCtx.Close()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
var resolvedTags []*resolvedTag
|
||||
if command.IsTrusted() {
|
||||
translator := func(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) {
|
||||
return TrustedReference(ctx, dockerCli, ref, nil)
|
||||
}
|
||||
// if there is a tar wrapper, the dockerfile needs to be replaced inside it
|
||||
if buildCtx != nil {
|
||||
// Wrap the tar archive to replace the Dockerfile entry with the rewritten
|
||||
// Dockerfile which uses trusted pulls.
|
||||
buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, translator, &resolvedTags)
|
||||
} else if dockerfileCtx != nil {
|
||||
// if there was not archive context still do the possible replacements in Dockerfile
|
||||
newDockerfile, _, err := rewriteDockerfileFrom(ctx, dockerfileCtx, translator)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dockerfileCtx = ioutil.NopCloser(bytes.NewBuffer(newDockerfile))
|
||||
}
|
||||
}
|
||||
|
||||
if options.compress {
|
||||
buildCtx, err = build.Compress(buildCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Setup an upload progress bar
|
||||
progressOutput := streamformatter.NewProgressOutput(progBuff)
|
||||
if !dockerCli.Out().IsTerminal() {
|
||||
progressOutput = &lastProgressOutput{output: progressOutput}
|
||||
}
|
||||
|
||||
// if up to this point nothing has set the context then we must have another
|
||||
// way for sending it(streaming) and set the context to the Dockerfile
|
||||
if dockerfileCtx != nil && buildCtx == nil {
|
||||
buildCtx = dockerfileCtx
|
||||
}
|
||||
|
||||
s, err := trySession(dockerCli, contextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var body io.Reader
|
||||
if buildCtx != nil && !options.stream {
|
||||
body = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
|
||||
}
|
||||
|
||||
// add context stream to the session
|
||||
if options.stream && s != nil {
|
||||
syncDone := make(chan error) // used to signal first progress reporting completed.
|
||||
// progress would also send errors but don't need it here as errors
|
||||
// are handled by session.Run() and ImageBuild()
|
||||
if err := addDirToSession(s, contextDir, progressOutput, syncDone); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := newBufferedWriter(syncDone, buildBuff)
|
||||
defer func() {
|
||||
select {
|
||||
case <-buf.flushed:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
buildBuff = buf
|
||||
|
||||
remote = clientSessionRemote
|
||||
body = buildCtx
|
||||
}
|
||||
|
||||
configFile := dockerCli.ConfigFile()
|
||||
authConfigs, _ := configFile.GetAllCredentials()
|
||||
buildOptions := types.ImageBuildOptions{
|
||||
Memory: options.memory.Value(),
|
||||
MemorySwap: options.memorySwap.Value(),
|
||||
Tags: options.tags.GetAll(),
|
||||
SuppressOutput: options.quiet,
|
||||
NoCache: options.noCache,
|
||||
Remove: options.rm,
|
||||
ForceRemove: options.forceRm,
|
||||
PullParent: options.pull,
|
||||
Isolation: container.Isolation(options.isolation),
|
||||
CPUSetCPUs: options.cpuSetCpus,
|
||||
CPUSetMems: options.cpuSetMems,
|
||||
CPUShares: options.cpuShares,
|
||||
CPUQuota: options.cpuQuota,
|
||||
CPUPeriod: options.cpuPeriod,
|
||||
CgroupParent: options.cgroupParent,
|
||||
Dockerfile: relDockerfile,
|
||||
ShmSize: options.shmSize.Value(),
|
||||
Ulimits: options.ulimits.GetList(),
|
||||
BuildArgs: configFile.ParseProxyConfig(dockerCli.Client().DaemonHost(), options.buildArgs.GetAll()),
|
||||
AuthConfigs: authConfigs,
|
||||
Labels: opts.ConvertKVStringsToMap(options.labels.GetAll()),
|
||||
CacheFrom: options.cacheFrom,
|
||||
SecurityOpt: options.securityOpt,
|
||||
NetworkMode: options.networkMode,
|
||||
Squash: options.squash,
|
||||
ExtraHosts: options.extraHosts.GetAll(),
|
||||
Target: options.target,
|
||||
RemoteContext: remote,
|
||||
Platform: options.platform,
|
||||
}
|
||||
|
||||
if s != nil {
|
||||
go func() {
|
||||
logrus.Debugf("running session: %v", s.ID())
|
||||
if err := s.Run(ctx, dockerCli.Client().DialSession); err != nil {
|
||||
logrus.Error(err)
|
||||
cancel() // cancel progress context
|
||||
}
|
||||
}()
|
||||
buildOptions.SessionID = s.ID()
|
||||
}
|
||||
|
||||
response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions)
|
||||
if err != nil {
|
||||
if options.quiet {
|
||||
fmt.Fprintf(dockerCli.Err(), "%s", progBuff)
|
||||
}
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
imageID := ""
|
||||
aux := func(auxJSON *json.RawMessage) {
|
||||
var result types.BuildResult
|
||||
if err := json.Unmarshal(*auxJSON, &result); err != nil {
|
||||
fmt.Fprintf(dockerCli.Err(), "Failed to parse aux message: %s", err)
|
||||
} else {
|
||||
imageID = result.ID
|
||||
}
|
||||
}
|
||||
|
||||
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), aux)
|
||||
if err != nil {
|
||||
if jerr, ok := err.(*jsonmessage.JSONError); ok {
|
||||
// If no error code is set, default to 1
|
||||
if jerr.Code == 0 {
|
||||
jerr.Code = 1
|
||||
}
|
||||
if options.quiet {
|
||||
fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff)
|
||||
}
|
||||
return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Windows: show error message about modified file permissions if the
|
||||
// daemon isn't running Windows.
|
||||
if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet {
|
||||
fmt.Fprintln(dockerCli.Out(), "SECURITY WARNING: You are building a Docker "+
|
||||
"image from Windows against a non-Windows Docker host. All files and "+
|
||||
"directories added to build context will have '-rwxr-xr-x' permissions. "+
|
||||
"It is recommended to double check and reset permissions for sensitive "+
|
||||
"files and directories.")
|
||||
}
|
||||
|
||||
// Everything worked so if -q was provided the output from the daemon
|
||||
// should be just the image ID and we'll print that to stdout.
|
||||
if options.quiet {
|
||||
imageID = fmt.Sprintf("%s", buildBuff)
|
||||
fmt.Fprintf(dockerCli.Out(), imageID)
|
||||
}
|
||||
|
||||
if options.imageIDFile != "" {
|
||||
if imageID == "" {
|
||||
return errors.Errorf("Server did not provide an image ID. Cannot write %s", options.imageIDFile)
|
||||
}
|
||||
if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if command.IsTrusted() {
|
||||
// Since the build was successful, now we must tag any of the resolved
|
||||
// images from the above Dockerfile rewrite.
|
||||
for _, resolved := range resolvedTags {
|
||||
if err := TagTrusted(ctx, dockerCli, resolved.digestRef, resolved.tagRef); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isLocalDir(c string) bool {
|
||||
_, err := os.Stat(c)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error)
|
||||
|
||||
// validateTag checks if the given image name can be resolved.
|
||||
func validateTag(rawRepo string) (string, error) {
|
||||
_, err := reference.ParseNormalizedNamed(rawRepo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return rawRepo, nil
|
||||
}
|
||||
|
||||
var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P<image>[^ \f\r\t\v\n#]+)`)
|
||||
|
||||
// resolvedTag records the repository, tag, and resolved digest reference
|
||||
// from a Dockerfile rewrite.
|
||||
type resolvedTag struct {
|
||||
digestRef reference.Canonical
|
||||
tagRef reference.NamedTagged
|
||||
}
|
||||
|
||||
// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in
|
||||
// "FROM <image>" instructions to a digest reference. `translator` is a
|
||||
// function that takes a repository name and tag reference and returns a
|
||||
// trusted digest reference.
|
||||
func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) {
|
||||
scanner := bufio.NewScanner(dockerfile)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
|
||||
// Scan the lines of the Dockerfile, looking for a "FROM" line.
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
matches := dockerfileFromLinePattern.FindStringSubmatch(line)
|
||||
if matches != nil && matches[1] != api.NoBaseImageSpecifier {
|
||||
// Replace the line with a resolved "FROM repo@digest"
|
||||
var ref reference.Named
|
||||
ref, err = reference.ParseNormalizedNamed(matches[1])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ref = reference.TagNameOnly(ref)
|
||||
if ref, ok := ref.(reference.NamedTagged); ok && command.IsTrusted() {
|
||||
trustedRef, err := translator(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", reference.FamiliarString(trustedRef)))
|
||||
resolvedTags = append(resolvedTags, &resolvedTag{
|
||||
digestRef: trustedRef,
|
||||
tagRef: ref,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
_, err := fmt.Fprintln(buf, line)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes(), resolvedTags, scanner.Err()
|
||||
}
|
||||
|
||||
// replaceDockerfileTarWrapper wraps the given input tar archive stream and
|
||||
// replaces the entry with the given Dockerfile name with the contents of the
|
||||
// new Dockerfile. Returns a new tar archive stream with the replaced
|
||||
// Dockerfile.
|
||||
func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser {
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
go func() {
|
||||
tarReader := tar.NewReader(inputTarStream)
|
||||
tarWriter := tar.NewWriter(pipeWriter)
|
||||
|
||||
defer inputTarStream.Close()
|
||||
|
||||
for {
|
||||
hdr, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
// Signals end of archive.
|
||||
tarWriter.Close()
|
||||
pipeWriter.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
content := io.Reader(tarReader)
|
||||
if hdr.Name == dockerfileName {
|
||||
// This entry is the Dockerfile. Since the tar archive was
|
||||
// generated from a directory on the local filesystem, the
|
||||
// Dockerfile will only appear once in the archive.
|
||||
var newDockerfile []byte
|
||||
newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator)
|
||||
if err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
hdr.Size = int64(len(newDockerfile))
|
||||
content = bytes.NewBuffer(newDockerfile)
|
||||
}
|
||||
|
||||
if err := tarWriter.WriteHeader(hdr); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tarWriter, content); err != nil {
|
||||
pipeWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return pipeReader
|
||||
}
|
153
vendor/github.com/docker/cli/cli/command/image/build_session.go
generated
vendored
153
vendor/github.com/docker/cli/cli/command/image/build_session.go
generated
vendored
|
@ -1,153 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/image/build"
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const clientSessionRemote = "client-session"
|
||||
|
||||
func isSessionSupported(dockerCli command.Cli) bool {
|
||||
return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31")
|
||||
}
|
||||
|
||||
func trySession(dockerCli command.Cli, contextDir string) (*session.Session, error) {
|
||||
var s *session.Session
|
||||
if isSessionSupported(dockerCli) {
|
||||
sharedKey, err := getBuildSharedKey(contextDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get build shared key")
|
||||
}
|
||||
s, err = session.NewSession(filepath.Base(contextDir), sharedKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create session")
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func addDirToSession(session *session.Session, contextDir string, progressOutput progress.Output, done chan error) error {
|
||||
excludes, err := build.ReadDockerignore(contextDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := &sizeProgress{out: progressOutput, action: "Streaming build context to Docker daemon"}
|
||||
|
||||
workdirProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{
|
||||
{Dir: contextDir, Excludes: excludes},
|
||||
})
|
||||
session.Allow(workdirProvider)
|
||||
|
||||
// this will be replaced on parallel build jobs. keep the current
|
||||
// progressbar for now
|
||||
if snpc, ok := workdirProvider.(interface {
|
||||
SetNextProgressCallback(func(int, bool), chan error)
|
||||
}); ok {
|
||||
snpc.SetNextProgressCallback(p.update, done)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type sizeProgress struct {
|
||||
out progress.Output
|
||||
action string
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
func (sp *sizeProgress) update(size int, last bool) {
|
||||
if sp.limiter == nil {
|
||||
sp.limiter = rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
|
||||
}
|
||||
if last || sp.limiter.Allow() {
|
||||
sp.out.WriteProgress(progress.Progress{Action: sp.action, Current: int64(size), LastUpdate: last})
|
||||
}
|
||||
}
|
||||
|
||||
type bufferedWriter struct {
|
||||
done chan error
|
||||
io.Writer
|
||||
buf *bytes.Buffer
|
||||
flushed chan struct{}
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newBufferedWriter(done chan error, w io.Writer) *bufferedWriter {
|
||||
bw := &bufferedWriter{done: done, Writer: w, buf: new(bytes.Buffer), flushed: make(chan struct{})}
|
||||
go func() {
|
||||
<-done
|
||||
bw.flushBuffer()
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) Write(dt []byte) (int, error) {
|
||||
select {
|
||||
case <-bw.done:
|
||||
bw.flushBuffer()
|
||||
return bw.Writer.Write(dt)
|
||||
default:
|
||||
return bw.buf.Write(dt)
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) flushBuffer() {
|
||||
bw.mu.Lock()
|
||||
select {
|
||||
case <-bw.flushed:
|
||||
default:
|
||||
bw.Writer.Write(bw.buf.Bytes())
|
||||
close(bw.flushed)
|
||||
}
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
|
||||
func getBuildSharedKey(dir string) (string, error) {
|
||||
// build session is hash of build dir with node based randomness
|
||||
s := sha256.Sum256([]byte(fmt.Sprintf("%s:%s", tryNodeIdentifier(), dir)))
|
||||
return hex.EncodeToString(s[:]), nil
|
||||
}
|
||||
|
||||
func tryNodeIdentifier() string {
|
||||
out := cliconfig.Dir() // return config dir as default on permission error
|
||||
if err := os.MkdirAll(cliconfig.Dir(), 0700); err == nil {
|
||||
sessionFile := filepath.Join(cliconfig.Dir(), ".buildNodeID")
|
||||
if _, err := os.Lstat(sessionFile); err != nil {
|
||||
if os.IsNotExist(err) { // create a new file with stored randomness
|
||||
b := make([]byte, 32)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return out
|
||||
}
|
||||
if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil {
|
||||
return out
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dt, err := ioutil.ReadFile(sessionFile)
|
||||
if err == nil {
|
||||
return string(dt)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
33
vendor/github.com/docker/cli/cli/command/image/cmd.go
generated
vendored
33
vendor/github.com/docker/cli/cli/command/image/cmd.go
generated
vendored
|
@ -1,33 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
)
|
||||
|
||||
// NewImageCommand returns a cobra command for `image` subcommands
|
||||
func NewImageCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "image",
|
||||
Short: "Manage images",
|
||||
Args: cli.NoArgs,
|
||||
RunE: command.ShowHelp(dockerCli.Err()),
|
||||
}
|
||||
cmd.AddCommand(
|
||||
NewBuildCommand(dockerCli),
|
||||
NewHistoryCommand(dockerCli),
|
||||
NewImportCommand(dockerCli),
|
||||
NewLoadCommand(dockerCli),
|
||||
NewPullCommand(dockerCli),
|
||||
NewPushCommand(dockerCli),
|
||||
NewSaveCommand(dockerCli),
|
||||
NewTagCommand(dockerCli),
|
||||
newListCommand(dockerCli),
|
||||
newRemoveCommand(dockerCli),
|
||||
newInspectCommand(dockerCli),
|
||||
NewPruneCommand(dockerCli),
|
||||
)
|
||||
return cmd
|
||||
}
|
64
vendor/github.com/docker/cli/cli/command/image/history.go
generated
vendored
64
vendor/github.com/docker/cli/cli/command/image/history.go
generated
vendored
|
@ -1,64 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type historyOptions struct {
|
||||
image string
|
||||
|
||||
human bool
|
||||
quiet bool
|
||||
noTrunc bool
|
||||
format string
|
||||
}
|
||||
|
||||
// NewHistoryCommand creates a new `docker history` command
|
||||
func NewHistoryCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts historyOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "history [OPTIONS] IMAGE",
|
||||
Short: "Show the history of an image",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.image = args[0]
|
||||
return runHistory(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs")
|
||||
flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||
flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runHistory(dockerCli command.Cli, opts historyOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
history, err := dockerCli.Client().ImageHistory(ctx, opts.image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format := opts.format
|
||||
if len(format) == 0 {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
|
||||
historyCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewHistoryFormat(format, opts.quiet, opts.human),
|
||||
Trunc: !opts.noTrunc,
|
||||
}
|
||||
return formatter.HistoryWrite(historyCtx, opts.human, history)
|
||||
}
|
87
vendor/github.com/docker/cli/cli/command/image/import.go
generated
vendored
87
vendor/github.com/docker/cli/cli/command/image/import.go
generated
vendored
|
@ -1,87 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
dockeropts "github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type importOptions struct {
|
||||
source string
|
||||
reference string
|
||||
changes dockeropts.ListOpts
|
||||
message string
|
||||
}
|
||||
|
||||
// NewImportCommand creates a new `docker import` command
|
||||
func NewImportCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var options importOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]",
|
||||
Short: "Import the contents from a tarball to create a filesystem image",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.source = args[0]
|
||||
if len(args) > 1 {
|
||||
options.reference = args[1]
|
||||
}
|
||||
return runImport(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
options.changes = dockeropts.NewListOpts(nil)
|
||||
flags.VarP(&options.changes, "change", "c", "Apply Dockerfile instruction to the created image")
|
||||
flags.StringVarP(&options.message, "message", "m", "", "Set commit message for imported image")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runImport(dockerCli command.Cli, options importOptions) error {
|
||||
var (
|
||||
in io.Reader
|
||||
srcName = options.source
|
||||
)
|
||||
|
||||
if options.source == "-" {
|
||||
in = dockerCli.In()
|
||||
} else if !urlutil.IsURL(options.source) {
|
||||
srcName = "-"
|
||||
file, err := os.Open(options.source)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
in = file
|
||||
}
|
||||
|
||||
source := types.ImageImportSource{
|
||||
Source: in,
|
||||
SourceName: srcName,
|
||||
}
|
||||
|
||||
importOptions := types.ImageImportOptions{
|
||||
Message: options.message,
|
||||
Changes: options.changes.GetAll(),
|
||||
}
|
||||
|
||||
clnt := dockerCli.Client()
|
||||
|
||||
responseBody, err := clnt.ImageImport(context.Background(), source, options.reference, importOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil)
|
||||
}
|
44
vendor/github.com/docker/cli/cli/command/image/inspect.go
generated
vendored
44
vendor/github.com/docker/cli/cli/command/image/inspect.go
generated
vendored
|
@ -1,44 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/inspect"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type inspectOptions struct {
|
||||
format string
|
||||
refs []string
|
||||
}
|
||||
|
||||
// newInspectCommand creates a new cobra.Command for `docker image inspect`
|
||||
func newInspectCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts inspectOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "inspect [OPTIONS] IMAGE [IMAGE...]",
|
||||
Short: "Display detailed information on one or more images",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.refs = args
|
||||
return runInspect(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given Go template")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runInspect(dockerCli command.Cli, opts inspectOptions) error {
|
||||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
getRefFunc := func(ref string) (interface{}, []byte, error) {
|
||||
return client.ImageInspectWithRaw(ctx, ref)
|
||||
}
|
||||
return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRefFunc)
|
||||
}
|
95
vendor/github.com/docker/cli/cli/command/image/list.go
generated
vendored
95
vendor/github.com/docker/cli/cli/command/image/list.go
generated
vendored
|
@ -1,95 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type imagesOptions struct {
|
||||
matchName string
|
||||
|
||||
quiet bool
|
||||
all bool
|
||||
noTrunc bool
|
||||
showDigests bool
|
||||
format string
|
||||
filter opts.FilterOpt
|
||||
}
|
||||
|
||||
// NewImagesCommand creates a new `docker images` command
|
||||
func NewImagesCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options := imagesOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "images [OPTIONS] [REPOSITORY[:TAG]]",
|
||||
Short: "List images",
|
||||
Args: cli.RequiresMaxArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.matchName = args[0]
|
||||
}
|
||||
return runImages(dockerCli, options)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only show numeric IDs")
|
||||
flags.BoolVarP(&options.all, "all", "a", false, "Show all images (default hides intermediate images)")
|
||||
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||
flags.BoolVar(&options.showDigests, "digests", false, "Show digests")
|
||||
flags.StringVar(&options.format, "format", "", "Pretty-print images using a Go template")
|
||||
flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newListCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := *NewImagesCommand(dockerCli)
|
||||
cmd.Aliases = []string{"images", "list"}
|
||||
cmd.Use = "ls [OPTIONS] [REPOSITORY[:TAG]]"
|
||||
return &cmd
|
||||
}
|
||||
|
||||
func runImages(dockerCli command.Cli, options imagesOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
filters := options.filter.Value()
|
||||
if options.matchName != "" {
|
||||
filters.Add("reference", options.matchName)
|
||||
}
|
||||
|
||||
listOptions := types.ImageListOptions{
|
||||
All: options.all,
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
images, err := dockerCli.Client().ImageList(ctx, listOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format := options.format
|
||||
if len(format) == 0 {
|
||||
if len(dockerCli.ConfigFile().ImagesFormat) > 0 && !options.quiet {
|
||||
format = dockerCli.ConfigFile().ImagesFormat
|
||||
} else {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
}
|
||||
|
||||
imageCtx := formatter.ImageContext{
|
||||
Context: formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewImageFormat(format, options.quiet, options.showDigests),
|
||||
Trunc: !options.noTrunc,
|
||||
},
|
||||
Digest: options.showDigests,
|
||||
}
|
||||
return formatter.ImageWrite(imageCtx, images)
|
||||
}
|
77
vendor/github.com/docker/cli/cli/command/image/load.go
generated
vendored
77
vendor/github.com/docker/cli/cli/command/image/load.go
generated
vendored
|
@ -1,77 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type loadOptions struct {
|
||||
input string
|
||||
quiet bool
|
||||
}
|
||||
|
||||
// NewLoadCommand creates a new `docker load` command
|
||||
func NewLoadCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts loadOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "load [OPTIONS]",
|
||||
Short: "Load an image from a tar archive or STDIN",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runLoad(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN")
|
||||
flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runLoad(dockerCli command.Cli, opts loadOptions) error {
|
||||
|
||||
var input io.Reader = dockerCli.In()
|
||||
if opts.input != "" {
|
||||
// We use system.OpenSequential to use sequential file access on Windows, avoiding
|
||||
// depleting the standby list un-necessarily. On Linux, this equates to a regular os.Open.
|
||||
file, err := system.OpenSequential(opts.input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
input = file
|
||||
}
|
||||
|
||||
// To avoid getting stuck, verify that a tar file is given either in
|
||||
// the input flag or through stdin and if not display an error message and exit.
|
||||
if opts.input == "" && dockerCli.In().IsTerminal() {
|
||||
return errors.Errorf("requested load from stdin, but stdin is empty")
|
||||
}
|
||||
|
||||
if !dockerCli.Out().IsTerminal() {
|
||||
opts.quiet = true
|
||||
}
|
||||
response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.Body != nil && response.JSON {
|
||||
return jsonmessage.DisplayJSONMessagesToStream(response.Body, dockerCli.Out(), nil)
|
||||
}
|
||||
|
||||
_, err = io.Copy(dockerCli.Out(), response.Body)
|
||||
return err
|
||||
}
|
95
vendor/github.com/docker/cli/cli/command/image/prune.go
generated
vendored
95
vendor/github.com/docker/cli/cli/command/image/prune.go
generated
vendored
|
@ -1,95 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type pruneOptions struct {
|
||||
force bool
|
||||
all bool
|
||||
filter opts.FilterOpt
|
||||
}
|
||||
|
||||
// NewPruneCommand returns a new cobra prune command for images
|
||||
func NewPruneCommand(dockerCli command.Cli) *cobra.Command {
|
||||
options := pruneOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "prune [OPTIONS]",
|
||||
Short: "Remove unused images",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
spaceReclaimed, output, err := runPrune(dockerCli, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if output != "" {
|
||||
fmt.Fprintln(dockerCli.Out(), output)
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), "Total reclaimed space:", units.HumanSize(float64(spaceReclaimed)))
|
||||
return nil
|
||||
},
|
||||
Annotations: map[string]string{"version": "1.25"},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
||||
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=<timestamp>')")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
const (
|
||||
allImageWarning = `WARNING! This will remove all images without at least one container associated to them.
|
||||
Are you sure you want to continue?`
|
||||
danglingWarning = `WARNING! This will remove all dangling images.
|
||||
Are you sure you want to continue?`
|
||||
)
|
||||
|
||||
func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {
|
||||
pruneFilters := options.filter.Value()
|
||||
pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all))
|
||||
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
|
||||
|
||||
warning := danglingWarning
|
||||
if options.all {
|
||||
warning = allImageWarning
|
||||
}
|
||||
if !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {
|
||||
return 0, "", nil
|
||||
}
|
||||
|
||||
report, err := dockerCli.Client().ImagesPrune(context.Background(), pruneFilters)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
if len(report.ImagesDeleted) > 0 {
|
||||
output = "Deleted Images:\n"
|
||||
for _, st := range report.ImagesDeleted {
|
||||
if st.Untagged != "" {
|
||||
output += fmt.Sprintln("untagged:", st.Untagged)
|
||||
} else {
|
||||
output += fmt.Sprintln("deleted:", st.Deleted)
|
||||
}
|
||||
}
|
||||
spaceReclaimed = report.SpaceReclaimed
|
||||
}
|
||||
|
||||
return spaceReclaimed, output, nil
|
||||
}
|
||||
|
||||
// RunPrune calls the Image Prune API
|
||||
// This returns the amount of space reclaimed and a detailed output string
|
||||
func RunPrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {
|
||||
return runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter})
|
||||
}
|
80
vendor/github.com/docker/cli/cli/command/image/pull.go
generated
vendored
80
vendor/github.com/docker/cli/cli/command/image/pull.go
generated
vendored
|
@ -1,80 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type pullOptions struct {
|
||||
remote string
|
||||
all bool
|
||||
platform string
|
||||
}
|
||||
|
||||
// NewPullCommand creates a new `docker pull` command
|
||||
func NewPullCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts pullOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]",
|
||||
Short: "Pull an image or a repository from a registry",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.remote = args[0]
|
||||
return runPull(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository")
|
||||
|
||||
command.AddPlatformFlag(flags, &opts.platform)
|
||||
command.AddTrustVerificationFlags(flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runPull(cli command.Cli, opts pullOptions) error {
|
||||
distributionRef, err := reference.ParseNormalizedNamed(opts.remote)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case opts.all && !reference.IsNameOnly(distributionRef):
|
||||
return errors.New("tag can't be used with --all-tags/-a")
|
||||
case !opts.all && reference.IsNameOnly(distributionRef):
|
||||
distributionRef = reference.TagNameOnly(distributionRef)
|
||||
if tagged, ok := distributionRef.(reference.Tagged); ok {
|
||||
fmt.Fprintf(cli.Out(), "Using default tag: %s\n", tagged.Tag())
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
imgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(cli), distributionRef.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if reference has a digest
|
||||
_, isCanonical := distributionRef.(reference.Canonical)
|
||||
if command.IsTrusted() && !isCanonical {
|
||||
err = trustedPull(ctx, cli, imgRefAndAuth, opts.platform)
|
||||
} else {
|
||||
err = imagePullPrivileged(ctx, cli, imgRefAndAuth, opts.all, opts.platform)
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "when fetching 'plugin'") {
|
||||
return errors.New(err.Error() + " - Use `docker plugin install`")
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
61
vendor/github.com/docker/cli/cli/command/image/push.go
generated
vendored
61
vendor/github.com/docker/cli/cli/command/image/push.go
generated
vendored
|
@ -1,61 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NewPushCommand creates a new `docker push` command
|
||||
func NewPushCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "push [OPTIONS] NAME[:TAG]",
|
||||
Short: "Push an image or a repository to a registry",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runPush(dockerCli, args[0])
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
command.AddTrustSigningFlags(flags)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runPush(dockerCli command.Cli, remote string) error {
|
||||
ref, err := reference.ParseNormalizedNamed(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCli, repoInfo.Index, "push")
|
||||
|
||||
if command.IsTrusted() {
|
||||
return TrustedPush(ctx, dockerCli, repoInfo, ref, authConfig, requestPrivilege)
|
||||
}
|
||||
|
||||
responseBody, err := imagePushPrivileged(ctx, dockerCli, authConfig, ref, requestPrivilege)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer responseBody.Close()
|
||||
return jsonmessage.DisplayJSONMessagesToStream(responseBody, dockerCli.Out(), nil)
|
||||
}
|
87
vendor/github.com/docker/cli/cli/command/image/remove.go
generated
vendored
87
vendor/github.com/docker/cli/cli/command/image/remove.go
generated
vendored
|
@ -1,87 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
apiclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type removeOptions struct {
|
||||
force bool
|
||||
noPrune bool
|
||||
}
|
||||
|
||||
// NewRemoveCommand creates a new `docker remove` command
|
||||
func NewRemoveCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts removeOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "rmi [OPTIONS] IMAGE [IMAGE...]",
|
||||
Short: "Remove one or more images",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runRemove(dockerCli, opts, args)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image")
|
||||
flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newRemoveCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := *NewRemoveCommand(dockerCli)
|
||||
cmd.Aliases = []string{"rmi", "remove"}
|
||||
cmd.Use = "rm [OPTIONS] IMAGE [IMAGE...]"
|
||||
return &cmd
|
||||
}
|
||||
|
||||
func runRemove(dockerCli command.Cli, opts removeOptions, images []string) error {
|
||||
client := dockerCli.Client()
|
||||
ctx := context.Background()
|
||||
|
||||
options := types.ImageRemoveOptions{
|
||||
Force: opts.force,
|
||||
PruneChildren: !opts.noPrune,
|
||||
}
|
||||
|
||||
var errs []string
|
||||
var fatalErr = false
|
||||
for _, img := range images {
|
||||
dels, err := client.ImageRemove(ctx, img, options)
|
||||
if err != nil {
|
||||
if !apiclient.IsErrNotFound(err) {
|
||||
fatalErr = true
|
||||
}
|
||||
errs = append(errs, err.Error())
|
||||
} else {
|
||||
for _, del := range dels {
|
||||
if del.Deleted != "" {
|
||||
fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted)
|
||||
} else {
|
||||
fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
msg := strings.Join(errs, "\n")
|
||||
if !opts.force || fatalErr {
|
||||
return errors.New(msg)
|
||||
}
|
||||
fmt.Fprintf(dockerCli.Err(), msg)
|
||||
}
|
||||
return nil
|
||||
}
|
72
vendor/github.com/docker/cli/cli/command/image/save.go
generated
vendored
72
vendor/github.com/docker/cli/cli/command/image/save.go
generated
vendored
|
@ -1,72 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type saveOptions struct {
|
||||
images []string
|
||||
output string
|
||||
}
|
||||
|
||||
// NewSaveCommand creates a new `docker save` command
|
||||
func NewSaveCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts saveOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "save [OPTIONS] IMAGE [IMAGE...]",
|
||||
Short: "Save one or more images to a tar archive (streamed to STDOUT by default)",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.images = args
|
||||
return runSave(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runSave(dockerCli command.Cli, opts saveOptions) error {
|
||||
if opts.output == "" && dockerCli.Out().IsTerminal() {
|
||||
return errors.New("cowardly refusing to save to a terminal. Use the -o flag or redirect")
|
||||
}
|
||||
|
||||
if err := validateOutputPath(opts.output); err != nil {
|
||||
return errors.Wrap(err, "failed to save image")
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
if opts.output == "" {
|
||||
_, err := io.Copy(dockerCli.Out(), responseBody)
|
||||
return err
|
||||
}
|
||||
|
||||
return command.CopyToFile(opts.output, responseBody)
|
||||
}
|
||||
|
||||
func validateOutputPath(path string) error {
|
||||
dir := filepath.Dir(path)
|
||||
if dir != "" && dir != "." {
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
return errors.Errorf("unable to validate output path: directory %q does not exist", dir)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
41
vendor/github.com/docker/cli/cli/command/image/tag.go
generated
vendored
41
vendor/github.com/docker/cli/cli/command/image/tag.go
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type tagOptions struct {
|
||||
image string
|
||||
name string
|
||||
}
|
||||
|
||||
// NewTagCommand creates a new `docker tag` command
|
||||
func NewTagCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var opts tagOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG]",
|
||||
Short: "Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE",
|
||||
Args: cli.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.image = args[0]
|
||||
opts.name = args[1]
|
||||
return runTag(dockerCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runTag(dockerCli command.Cli, opts tagOptions) error {
|
||||
ctx := context.Background()
|
||||
|
||||
return dockerCli.Client().ImageTag(ctx, opts.image, opts.name)
|
||||
}
|
363
vendor/github.com/docker/cli/cli/command/image/trust.go
generated
vendored
363
vendor/github.com/docker/cli/cli/command/image/trust.go
generated
vendored
|
@ -1,363 +0,0 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/registry"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/theupdateframework/notary/client"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type target struct {
|
||||
name string
|
||||
digest digest.Digest
|
||||
size int64
|
||||
}
|
||||
|
||||
// TrustedPush handles content trust pushing of an image
|
||||
func TrustedPush(ctx context.Context, cli command.Cli, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error {
|
||||
responseBody, err := imagePushPrivileged(ctx, cli, authConfig, ref, requestPrivilege)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer responseBody.Close()
|
||||
|
||||
return PushTrustedReference(cli, repoInfo, ref, authConfig, responseBody)
|
||||
}
|
||||
|
||||
// PushTrustedReference pushes a canonical reference to the trust server.
|
||||
// nolint: gocyclo
|
||||
func PushTrustedReference(streams command.Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, in io.Reader) error {
|
||||
// If it is a trusted push we would like to find the target entry which match the
|
||||
// tag provided in the function and then do an AddTarget later.
|
||||
target := &client.Target{}
|
||||
// Count the times of calling for handleTarget,
|
||||
// if it is called more that once, that should be considered an error in a trusted push.
|
||||
cnt := 0
|
||||
handleTarget := func(aux *json.RawMessage) {
|
||||
cnt++
|
||||
if cnt > 1 {
|
||||
// handleTarget should only be called one. This will be treated as an error.
|
||||
return
|
||||
}
|
||||
|
||||
var pushResult types.PushResult
|
||||
err := json.Unmarshal(*aux, &pushResult)
|
||||
if err == nil && pushResult.Tag != "" {
|
||||
if dgst, err := digest.Parse(pushResult.Digest); err == nil {
|
||||
h, err := hex.DecodeString(dgst.Hex())
|
||||
if err != nil {
|
||||
target = nil
|
||||
return
|
||||
}
|
||||
target.Name = pushResult.Tag
|
||||
target.Hashes = data.Hashes{string(dgst.Algorithm()): h}
|
||||
target.Length = int64(pushResult.Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var tag string
|
||||
switch x := ref.(type) {
|
||||
case reference.Canonical:
|
||||
return errors.New("cannot push a digest reference")
|
||||
case reference.NamedTagged:
|
||||
tag = x.Tag()
|
||||
default:
|
||||
// We want trust signatures to always take an explicit tag,
|
||||
// otherwise it will act as an untrusted push.
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(streams.Err(), "No tag specified, skipping trust metadata push")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := jsonmessage.DisplayJSONMessagesToStream(in, streams.Out(), handleTarget); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cnt > 1 {
|
||||
return errors.Errorf("internal error: only one call to handleTarget expected")
|
||||
}
|
||||
|
||||
if target == nil {
|
||||
return errors.Errorf("no targets found, please provide a specific tag in order to sign it")
|
||||
}
|
||||
|
||||
fmt.Fprintln(streams.Out(), "Signing and pushing trust metadata")
|
||||
|
||||
repo, err := trust.GetNotaryRepository(streams.In(), streams.Out(), command.UserAgent(), repoInfo, &authConfig, "push", "pull")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
// get the latest repository metadata so we can figure out which roles to sign
|
||||
_, err = repo.ListTargets()
|
||||
|
||||
switch err.(type) {
|
||||
case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist:
|
||||
keys := repo.GetCryptoService().ListKeys(data.CanonicalRootRole)
|
||||
var rootKeyID string
|
||||
// always select the first root key
|
||||
if len(keys) > 0 {
|
||||
sort.Strings(keys)
|
||||
rootKeyID = keys[0]
|
||||
} else {
|
||||
rootPublicKey, err := repo.GetCryptoService().Create(data.CanonicalRootRole, "", data.ECDSAKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootKeyID = rootPublicKey.ID()
|
||||
}
|
||||
|
||||
// Initialize the notary repository with a remotely managed snapshot key
|
||||
if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil {
|
||||
return trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
fmt.Fprintf(streams.Out(), "Finished initializing %q\n", repoInfo.Name.Name())
|
||||
err = repo.AddTarget(target, data.CanonicalTargetsRole)
|
||||
case nil:
|
||||
// already initialized and we have successfully downloaded the latest metadata
|
||||
err = AddTargetToAllSignableRoles(repo, target)
|
||||
default:
|
||||
return trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = repo.Publish()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "failed to sign %s:%s", repoInfo.Name.Name(), tag)
|
||||
return trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(streams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddTargetToAllSignableRoles attempts to add the image target to all the top level delegation roles we can
|
||||
// (based on whether we have the signing key and whether the role's path allows
|
||||
// us to).
|
||||
// If there are no delegation roles, we add to the targets role.
|
||||
func AddTargetToAllSignableRoles(repo client.Repository, target *client.Target) error {
|
||||
signableRoles, err := trust.GetSignableRoles(repo, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return repo.AddTarget(target, signableRoles...)
|
||||
}
|
||||
|
||||
// imagePushPrivileged push the image
|
||||
func imagePushPrivileged(ctx context.Context, cli command.Cli, authConfig types.AuthConfig, ref reference.Reference, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) {
|
||||
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := types.ImagePushOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
PrivilegeFunc: requestPrivilege,
|
||||
}
|
||||
|
||||
return cli.Client().ImagePush(ctx, reference.FamiliarString(ref), options)
|
||||
}
|
||||
|
||||
// trustedPull handles content trust pulling of an image
|
||||
func trustedPull(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, platform string) error {
|
||||
refs, err := getTrustedPullTargets(cli, imgRefAndAuth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref := imgRefAndAuth.Reference()
|
||||
for i, r := range refs {
|
||||
displayTag := r.name
|
||||
if displayTag != "" {
|
||||
displayTag = ":" + displayTag
|
||||
}
|
||||
fmt.Fprintf(cli.Out(), "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), reference.FamiliarName(ref), displayTag, r.digest)
|
||||
|
||||
trustedRef, err := reference.WithDigest(reference.TrimNamed(ref), r.digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedImgRefAndAuth, err := trust.GetImageReferencesAndAuth(ctx, AuthResolver(cli), trustedRef.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := imagePullPrivileged(ctx, cli, updatedImgRefAndAuth, false, platform); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tagged, err := reference.WithTag(reference.TrimNamed(ref), r.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := TagTrusted(ctx, cli, trustedRef, tagged); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTrustedPullTargets(cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth) ([]target, error) {
|
||||
notaryRepo, err := cli.NotaryClient(imgRefAndAuth, trust.ActionsPullOnly)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
ref := imgRefAndAuth.Reference()
|
||||
tagged, isTagged := ref.(reference.NamedTagged)
|
||||
if !isTagged {
|
||||
// List all targets
|
||||
targets, err := notaryRepo.ListTargets(trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return nil, trust.NotaryError(ref.Name(), err)
|
||||
}
|
||||
var refs []target
|
||||
for _, tgt := range targets {
|
||||
t, err := convertTarget(tgt.Target)
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Err(), "Skipping target for %q\n", reference.FamiliarName(ref))
|
||||
continue
|
||||
}
|
||||
// Only list tags in the top level targets role or the releases delegation role - ignore
|
||||
// all other delegation roles
|
||||
if tgt.Role != trust.ReleasesRole && tgt.Role != data.CanonicalTargetsRole {
|
||||
continue
|
||||
}
|
||||
refs = append(refs, t)
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trusted tags for %s", ref.Name()))
|
||||
}
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
t, err := notaryRepo.GetTargetByName(tagged.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return nil, trust.NotaryError(ref.Name(), err)
|
||||
}
|
||||
// Only get the tag if it's in the top level targets role or the releases delegation role
|
||||
// ignore it if it's in any other delegation roles
|
||||
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
|
||||
return nil, trust.NotaryError(ref.Name(), errors.Errorf("No trust data for %s", tagged.Tag()))
|
||||
}
|
||||
|
||||
logrus.Debugf("retrieving target for %s role", t.Role)
|
||||
r, err := convertTarget(t.Target)
|
||||
return []target{r}, err
|
||||
}
|
||||
|
||||
// imagePullPrivileged pulls the image and displays it to the output
|
||||
func imagePullPrivileged(ctx context.Context, cli command.Cli, imgRefAndAuth trust.ImageRefAndAuth, all bool, platform string) error {
|
||||
ref := reference.FamiliarString(imgRefAndAuth.Reference())
|
||||
|
||||
encodedAuth, err := command.EncodeAuthToBase64(*imgRefAndAuth.AuthConfig())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, imgRefAndAuth.RepoInfo().Index, "pull")
|
||||
options := types.ImagePullOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
PrivilegeFunc: requestPrivilege,
|
||||
All: all,
|
||||
Platform: platform,
|
||||
}
|
||||
responseBody, err := cli.Client().ImagePull(ctx, ref, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
return jsonmessage.DisplayJSONMessagesToStream(responseBody, cli.Out(), nil)
|
||||
}
|
||||
|
||||
// TrustedReference returns the canonical trusted reference for an image reference
|
||||
func TrustedReference(ctx context.Context, cli command.Cli, ref reference.NamedTagged, rs registry.Service) (reference.Canonical, error) {
|
||||
var (
|
||||
repoInfo *registry.RepositoryInfo
|
||||
err error
|
||||
)
|
||||
if rs != nil {
|
||||
repoInfo, err = rs.ResolveRepository(ref)
|
||||
} else {
|
||||
repoInfo, err = registry.ParseRepositoryInfo(ref)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Resolve the Auth config relevant for this server
|
||||
authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index)
|
||||
|
||||
notaryRepo, err := trust.GetNotaryRepository(cli.In(), cli.Out(), command.UserAgent(), repoInfo, &authConfig, "pull")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
// Only list tags in the top level targets role or the releases delegation role - ignore
|
||||
// all other delegation roles
|
||||
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), client.ErrNoSuchTarget(ref.Tag()))
|
||||
}
|
||||
r, err := convertTarget(t.Target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
return reference.WithDigest(reference.TrimNamed(ref), r.digest)
|
||||
}
|
||||
|
||||
func convertTarget(t client.Target) (target, error) {
|
||||
h, ok := t.Hashes["sha256"]
|
||||
if !ok {
|
||||
return target{}, errors.New("no valid hash, expecting sha256")
|
||||
}
|
||||
return target{
|
||||
name: t.Name,
|
||||
digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)),
|
||||
size: t.Length,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TagTrusted tags a trusted ref
|
||||
// nolint: interfacer
|
||||
func TagTrusted(ctx context.Context, cli command.Cli, trustedRef reference.Canonical, ref reference.NamedTagged) error {
|
||||
// Use familiar references when interacting with client and output
|
||||
familiarRef := reference.FamiliarString(ref)
|
||||
trustedFamiliarRef := reference.FamiliarString(trustedRef)
|
||||
|
||||
fmt.Fprintf(cli.Err(), "Tagging %s as %s\n", trustedFamiliarRef, familiarRef)
|
||||
|
||||
return cli.Client().ImageTag(ctx, trustedFamiliarRef, familiarRef)
|
||||
}
|
||||
|
||||
// AuthResolver returns an auth resolver function from a command.Cli
|
||||
func AuthResolver(cli command.Cli) func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||
return func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||
return command.ResolveAuthConfig(ctx, cli, index)
|
||||
}
|
||||
}
|
56
vendor/github.com/docker/cli/cli/command/in.go
generated
vendored
56
vendor/github.com/docker/cli/cli/command/in.go
generated
vendored
|
@ -1,56 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
)
|
||||
|
||||
// InStream is an input stream used by the DockerCli to read user input
|
||||
type InStream struct {
|
||||
CommonStream
|
||||
in io.ReadCloser
|
||||
}
|
||||
|
||||
func (i *InStream) Read(p []byte) (int, error) {
|
||||
return i.in.Read(p)
|
||||
}
|
||||
|
||||
// Close implements the Closer interface
|
||||
func (i *InStream) Close() error {
|
||||
return i.in.Close()
|
||||
}
|
||||
|
||||
// SetRawTerminal sets raw mode on the input terminal
|
||||
func (i *InStream) SetRawTerminal() (err error) {
|
||||
if os.Getenv("NORAW") != "" || !i.CommonStream.isTerminal {
|
||||
return nil
|
||||
}
|
||||
i.CommonStream.state, err = term.SetRawTerminal(i.CommonStream.fd)
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckTty checks if we are trying to attach to a container tty
|
||||
// from a non-tty client input stream, and if so, returns an error.
|
||||
func (i *InStream) CheckTty(attachStdin, ttyMode bool) error {
|
||||
// In order to attach to a container tty, input stream for the client must
|
||||
// be a tty itself: redirecting or piping the client standard input is
|
||||
// incompatible with `docker run -t`, `docker exec -t` or `docker attach`.
|
||||
if ttyMode && attachStdin && !i.isTerminal {
|
||||
eText := "the input device is not a TTY"
|
||||
if runtime.GOOS == "windows" {
|
||||
return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'")
|
||||
}
|
||||
return errors.New(eText)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewInStream returns a new InStream object from a ReadCloser
|
||||
func NewInStream(in io.ReadCloser) *InStream {
|
||||
fd, isTerminal := term.GetFdInfo(in)
|
||||
return &InStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, in: in}
|
||||
}
|
50
vendor/github.com/docker/cli/cli/command/out.go
generated
vendored
50
vendor/github.com/docker/cli/cli/command/out.go
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// OutStream is an output stream used by the DockerCli to write normal program
|
||||
// output.
|
||||
type OutStream struct {
|
||||
CommonStream
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (o *OutStream) Write(p []byte) (int, error) {
|
||||
return o.out.Write(p)
|
||||
}
|
||||
|
||||
// SetRawTerminal sets raw mode on the input terminal
|
||||
func (o *OutStream) SetRawTerminal() (err error) {
|
||||
if os.Getenv("NORAW") != "" || !o.CommonStream.isTerminal {
|
||||
return nil
|
||||
}
|
||||
o.CommonStream.state, err = term.SetRawTerminalOutput(o.CommonStream.fd)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTtySize returns the height and width in characters of the tty
|
||||
func (o *OutStream) GetTtySize() (uint, uint) {
|
||||
if !o.isTerminal {
|
||||
return 0, 0
|
||||
}
|
||||
ws, err := term.GetWinsize(o.fd)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error getting size: %s", err)
|
||||
if ws == nil {
|
||||
return 0, 0
|
||||
}
|
||||
}
|
||||
return uint(ws.Height), uint(ws.Width)
|
||||
}
|
||||
|
||||
// NewOutStream returns a new OutStream object from a Writer
|
||||
func NewOutStream(out io.Writer) *OutStream {
|
||||
fd, isTerminal := term.GetFdInfo(out)
|
||||
return &OutStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, out: out}
|
||||
}
|
189
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
189
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
|
@ -1,189 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ElectAuthServer returns the default registry to use (by asking the daemon)
|
||||
func ElectAuthServer(ctx context.Context, cli Cli) string {
|
||||
// The daemon `/info` endpoint informs us of the default registry being
|
||||
// used. This is essential in cross-platforms environment, where for
|
||||
// example a Linux client might be interacting with a Windows daemon, hence
|
||||
// the default registry URL might be Windows specific.
|
||||
serverAddress := registry.IndexServer
|
||||
if info, err := cli.Client().Info(ctx); err != nil {
|
||||
fmt.Fprintf(cli.Err(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress)
|
||||
} else if info.IndexServerAddress == "" {
|
||||
fmt.Fprintf(cli.Err(), "Warning: Empty registry endpoint from daemon. Using system default: %s\n", serverAddress)
|
||||
} else {
|
||||
serverAddress = info.IndexServerAddress
|
||||
}
|
||||
return serverAddress
|
||||
}
|
||||
|
||||
// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload
|
||||
func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) {
|
||||
buf, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
|
||||
// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info
|
||||
// for the given command.
|
||||
func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc {
|
||||
return func() (string, error) {
|
||||
fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName)
|
||||
indexServer := registry.GetAuthConfigKey(index)
|
||||
isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli)
|
||||
authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return EncodeAuthToBase64(authConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the
|
||||
// default index, it uses the default index name for the daemon's platform,
|
||||
// not the client's platform.
|
||||
func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||
configKey := index.Name
|
||||
if index.Official {
|
||||
configKey = ElectAuthServer(ctx, cli)
|
||||
}
|
||||
|
||||
a, _ := cli.ConfigFile().GetAuthConfig(configKey)
|
||||
return a
|
||||
}
|
||||
|
||||
// ConfigureAuth returns an AuthConfig from the specified user, password and server.
|
||||
func ConfigureAuth(cli Cli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) {
|
||||
// On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210
|
||||
if runtime.GOOS == "windows" {
|
||||
cli.SetIn(NewInStream(os.Stdin))
|
||||
}
|
||||
|
||||
if !isDefaultRegistry {
|
||||
serverAddress = registry.ConvertToHostname(serverAddress)
|
||||
}
|
||||
|
||||
authconfig, err := cli.ConfigFile().GetAuthConfig(serverAddress)
|
||||
if err != nil {
|
||||
return authconfig, err
|
||||
}
|
||||
|
||||
// Some links documenting this:
|
||||
// - https://code.google.com/archive/p/mintty/issues/56
|
||||
// - https://github.com/docker/docker/issues/15272
|
||||
// - https://mintty.github.io/ (compatibility)
|
||||
// Linux will hit this if you attempt `cat | docker login`, and Windows
|
||||
// will hit this if you attempt docker login from mintty where stdin
|
||||
// is a pipe, not a character based console.
|
||||
if flPassword == "" && !cli.In().IsTerminal() {
|
||||
return authconfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device")
|
||||
}
|
||||
|
||||
authconfig.Username = strings.TrimSpace(authconfig.Username)
|
||||
|
||||
if flUser = strings.TrimSpace(flUser); flUser == "" {
|
||||
if isDefaultRegistry {
|
||||
// if this is a default registry (docker hub), then display the following message.
|
||||
fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.")
|
||||
}
|
||||
promptWithDefault(cli.Out(), "Username", authconfig.Username)
|
||||
flUser = readInput(cli.In(), cli.Out())
|
||||
flUser = strings.TrimSpace(flUser)
|
||||
if flUser == "" {
|
||||
flUser = authconfig.Username
|
||||
}
|
||||
}
|
||||
if flUser == "" {
|
||||
return authconfig, errors.Errorf("Error: Non-null Username Required")
|
||||
}
|
||||
if flPassword == "" {
|
||||
oldState, err := term.SaveState(cli.In().FD())
|
||||
if err != nil {
|
||||
return authconfig, err
|
||||
}
|
||||
fmt.Fprintf(cli.Out(), "Password: ")
|
||||
term.DisableEcho(cli.In().FD(), oldState)
|
||||
|
||||
flPassword = readInput(cli.In(), cli.Out())
|
||||
fmt.Fprint(cli.Out(), "\n")
|
||||
|
||||
term.RestoreTerminal(cli.In().FD(), oldState)
|
||||
if flPassword == "" {
|
||||
return authconfig, errors.Errorf("Error: Password Required")
|
||||
}
|
||||
}
|
||||
|
||||
authconfig.Username = flUser
|
||||
authconfig.Password = flPassword
|
||||
authconfig.ServerAddress = serverAddress
|
||||
authconfig.IdentityToken = ""
|
||||
|
||||
return authconfig, nil
|
||||
}
|
||||
|
||||
func readInput(in io.Reader, out io.Writer) string {
|
||||
reader := bufio.NewReader(in)
|
||||
line, _, err := reader.ReadLine()
|
||||
if err != nil {
|
||||
fmt.Fprintln(out, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
return string(line)
|
||||
}
|
||||
|
||||
func promptWithDefault(out io.Writer, prompt string, configDefault string) {
|
||||
if configDefault == "" {
|
||||
fmt.Fprintf(out, "%s: ", prompt)
|
||||
} else {
|
||||
fmt.Fprintf(out, "%s (%s): ", prompt, configDefault)
|
||||
}
|
||||
}
|
||||
|
||||
// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image
|
||||
func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) {
|
||||
// Retrieve encoded auth token from the image reference
|
||||
authConfig, err := resolveAuthConfigFromImage(ctx, cli, image)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
encodedAuth, err := EncodeAuthToBase64(authConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return encodedAuth, nil
|
||||
}
|
||||
|
||||
// resolveAuthConfigFromImage retrieves that AuthConfig using the image string
|
||||
func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) {
|
||||
registryRef, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return types.AuthConfig{}, err
|
||||
}
|
||||
repoInfo, err := registry.ParseRepositoryInfo(registryRef)
|
||||
if err != nil {
|
||||
return types.AuthConfig{}, err
|
||||
}
|
||||
return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil
|
||||
}
|
34
vendor/github.com/docker/cli/cli/command/stream.go
generated
vendored
34
vendor/github.com/docker/cli/cli/command/stream.go
generated
vendored
|
@ -1,34 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/pkg/term"
|
||||
)
|
||||
|
||||
// CommonStream is an input stream used by the DockerCli to read user input
|
||||
type CommonStream struct {
|
||||
fd uintptr
|
||||
isTerminal bool
|
||||
state *term.State
|
||||
}
|
||||
|
||||
// FD returns the file descriptor number for this stream
|
||||
func (s *CommonStream) FD() uintptr {
|
||||
return s.fd
|
||||
}
|
||||
|
||||
// IsTerminal returns true if this stream is connected to a terminal
|
||||
func (s *CommonStream) IsTerminal() bool {
|
||||
return s.isTerminal
|
||||
}
|
||||
|
||||
// RestoreTerminal restores normal mode to the terminal
|
||||
func (s *CommonStream) RestoreTerminal() {
|
||||
if s.state != nil {
|
||||
term.RestoreTerminal(s.fd, s.state)
|
||||
}
|
||||
}
|
||||
|
||||
// SetIsTerminal sets the boolean used for isTerminal
|
||||
func (s *CommonStream) SetIsTerminal(isTerminal bool) {
|
||||
s.isTerminal = isTerminal
|
||||
}
|
47
vendor/github.com/docker/cli/cli/command/trust.go
generated
vendored
47
vendor/github.com/docker/cli/cli/command/trust.go
generated
vendored
|
@ -1,47 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
var (
|
||||
// TODO: make this not global
|
||||
untrusted bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
untrusted = !getDefaultTrustState()
|
||||
}
|
||||
|
||||
// AddTrustVerificationFlags adds content trust flags to the provided flagset
|
||||
func AddTrustVerificationFlags(fs *pflag.FlagSet) {
|
||||
trusted := getDefaultTrustState()
|
||||
fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image verification")
|
||||
}
|
||||
|
||||
// AddTrustSigningFlags adds "signing" flags to the provided flagset
|
||||
func AddTrustSigningFlags(fs *pflag.FlagSet) {
|
||||
trusted := getDefaultTrustState()
|
||||
fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image signing")
|
||||
}
|
||||
|
||||
// getDefaultTrustState returns true if content trust is enabled through the $DOCKER_CONTENT_TRUST environment variable.
|
||||
func getDefaultTrustState() bool {
|
||||
var trusted bool
|
||||
if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" {
|
||||
if t, err := strconv.ParseBool(e); t || err != nil {
|
||||
// treat any other value as true
|
||||
trusted = true
|
||||
}
|
||||
}
|
||||
return trusted
|
||||
}
|
||||
|
||||
// IsTrusted returns true if content trust is enabled, either through the $DOCKER_CONTENT_TRUST environment variable,
|
||||
// or through `--disabled-content-trust=false` on a command.
|
||||
func IsTrusted() bool {
|
||||
return !untrusted
|
||||
}
|
127
vendor/github.com/docker/cli/cli/command/utils.go
generated
vendored
127
vendor/github.com/docker/cli/cli/command/utils.go
generated
vendored
|
@ -1,127 +0,0 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// CopyToFile writes the content of the reader to the specified file
|
||||
func CopyToFile(outfile string, r io.Reader) error {
|
||||
// We use sequential file access here to avoid depleting the standby list
|
||||
// on Windows. On Linux, this is a call directly to ioutil.TempFile
|
||||
tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmpPath := tmpFile.Name()
|
||||
|
||||
_, err = io.Copy(tmpFile, r)
|
||||
tmpFile.Close()
|
||||
|
||||
if err != nil {
|
||||
os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = os.Rename(tmpPath, outfile); err != nil {
|
||||
os.Remove(tmpPath)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// capitalizeFirst capitalizes the first character of string
|
||||
func capitalizeFirst(s string) string {
|
||||
switch l := len(s); l {
|
||||
case 0:
|
||||
return s
|
||||
case 1:
|
||||
return strings.ToLower(s)
|
||||
default:
|
||||
return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:])
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter.
|
||||
func PrettyPrint(i interface{}) string {
|
||||
switch t := i.(type) {
|
||||
case nil:
|
||||
return "None"
|
||||
case string:
|
||||
return capitalizeFirst(t)
|
||||
default:
|
||||
return capitalizeFirst(fmt.Sprintf("%s", t))
|
||||
}
|
||||
}
|
||||
|
||||
// PromptForConfirmation requests and checks confirmation from user.
|
||||
// This will display the provided message followed by ' [y/N] '. If
|
||||
// the user input 'y' or 'Y' it returns true other false. If no
|
||||
// message is provided "Are you sure you want to proceed? [y/N] "
|
||||
// will be used instead.
|
||||
func PromptForConfirmation(ins io.Reader, outs io.Writer, message string) bool {
|
||||
if message == "" {
|
||||
message = "Are you sure you want to proceed?"
|
||||
}
|
||||
message += " [y/N] "
|
||||
|
||||
fmt.Fprintf(outs, message)
|
||||
|
||||
// On Windows, force the use of the regular OS stdin stream.
|
||||
if runtime.GOOS == "windows" {
|
||||
ins = NewInStream(os.Stdin)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(ins)
|
||||
answer, _, _ := reader.ReadLine()
|
||||
return strings.ToLower(string(answer)) == "y"
|
||||
}
|
||||
|
||||
// PruneFilters returns consolidated prune filters obtained from config.json and cli
|
||||
func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args {
|
||||
if dockerCli.ConfigFile() == nil {
|
||||
return pruneFilters
|
||||
}
|
||||
for _, f := range dockerCli.ConfigFile().PruneFilters {
|
||||
parts := strings.SplitN(f, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
if parts[0] == "label" {
|
||||
// CLI label filter supersede config.json.
|
||||
// If CLI label filter conflict with config.json,
|
||||
// skip adding label! filter in config.json.
|
||||
if pruneFilters.Include("label!") && pruneFilters.ExactMatch("label!", parts[1]) {
|
||||
continue
|
||||
}
|
||||
} else if parts[0] == "label!" {
|
||||
// CLI label! filter supersede config.json.
|
||||
// If CLI label! filter conflict with config.json,
|
||||
// skip adding label filter in config.json.
|
||||
if pruneFilters.Include("label") && pruneFilters.ExactMatch("label", parts[1]) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
pruneFilters.Add(parts[0], parts[1])
|
||||
}
|
||||
|
||||
return pruneFilters
|
||||
}
|
||||
|
||||
// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later.
|
||||
func AddPlatformFlag(flags *pflag.FlagSet, target *string) {
|
||||
flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable")
|
||||
flags.SetAnnotation("platform", "version", []string{"1.32"})
|
||||
flags.SetAnnotation("platform", "experimental", nil)
|
||||
}
|
33
vendor/github.com/docker/cli/cli/error.go
generated
vendored
33
vendor/github.com/docker/cli/cli/error.go
generated
vendored
|
@ -1,33 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Errors is a list of errors.
|
||||
// Useful in a loop if you don't want to return the error right away and you want to display after the loop,
|
||||
// all the errors that happened during the loop.
|
||||
type Errors []error
|
||||
|
||||
func (errList Errors) Error() string {
|
||||
if len(errList) < 1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
out := make([]string, len(errList))
|
||||
for i := range errList {
|
||||
out[i] = errList[i].Error()
|
||||
}
|
||||
return strings.Join(out, ", ")
|
||||
}
|
||||
|
||||
// StatusError reports an unsuccessful exit by a command.
|
||||
type StatusError struct {
|
||||
Status string
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
func (e StatusError) Error() string {
|
||||
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
|
||||
}
|
107
vendor/github.com/docker/cli/cli/required.go
generated
vendored
107
vendor/github.com/docker/cli/cli/required.go
generated
vendored
|
@ -1,107 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NoArgs validates args and returns an error if there are any args
|
||||
func NoArgs(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cmd.HasSubCommands() {
|
||||
return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n"))
|
||||
}
|
||||
|
||||
return errors.Errorf(
|
||||
"%q accepts no arguments.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
|
||||
// RequiresMinArgs returns an error if there is not at least min args
|
||||
func RequiresMinArgs(min int) cobra.PositionalArgs {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) >= min {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"%q requires at least %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
min,
|
||||
pluralize("argument", min),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// RequiresMaxArgs returns an error if there is not at most max args
|
||||
func RequiresMaxArgs(max int) cobra.PositionalArgs {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) <= max {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"%q requires at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
max,
|
||||
pluralize("argument", max),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// RequiresRangeArgs returns an error if there is not at least min args and at most max args
|
||||
func RequiresRangeArgs(min int, max int) cobra.PositionalArgs {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) >= min && len(args) <= max {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"%q requires at least %d and at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
min,
|
||||
max,
|
||||
pluralize("argument", max),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// ExactArgs returns an error if there is not the exact number of args
|
||||
func ExactArgs(number int) cobra.PositionalArgs {
|
||||
return func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == number {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf(
|
||||
"%q requires exactly %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
number,
|
||||
pluralize("argument", number),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func pluralize(word string, number int) string {
|
||||
if number == 1 {
|
||||
return word
|
||||
}
|
||||
return word + "s"
|
||||
}
|
9
vendor/github.com/docker/cli/cli/version.go
generated
vendored
9
vendor/github.com/docker/cli/cli/version.go
generated
vendored
|
@ -1,9 +0,0 @@
|
|||
package cli
|
||||
|
||||
// Default build-time variable.
|
||||
// These values are overriding via ldflags
|
||||
var (
|
||||
Version = "unknown-version"
|
||||
GitCommit = "unknown-commit"
|
||||
BuildTime = "unknown-buildtime"
|
||||
)
|
2
vendor/github.com/docker/distribution/registry/doc.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/doc.go
generated
vendored
|
@ -1,2 +0,0 @@
|
|||
// Package registry provides the main entrypoints for running a registry.
|
||||
package registry
|
356
vendor/github.com/docker/distribution/registry/registry.go
generated
vendored
356
vendor/github.com/docker/distribution/registry/registry.go
generated
vendored
|
@ -1,356 +0,0 @@
|
|||
package registry
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"rsc.io/letsencrypt"
|
||||
|
||||
logstash "github.com/bshuster-repo/logrus-logstash-hook"
|
||||
"github.com/bugsnag/bugsnag-go"
|
||||
"github.com/docker/distribution/configuration"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/health"
|
||||
"github.com/docker/distribution/registry/handlers"
|
||||
"github.com/docker/distribution/registry/listener"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/docker/distribution/version"
|
||||
gorhandlers "github.com/gorilla/handlers"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/yvasiyarov/gorelic"
|
||||
)
|
||||
|
||||
// ServeCmd is a cobra command for running the registry.
|
||||
var ServeCmd = &cobra.Command{
|
||||
Use: "serve <config>",
|
||||
Short: "`serve` stores and distributes Docker images",
|
||||
Long: "`serve` stores and distributes Docker images.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
// setup context
|
||||
ctx := context.WithVersion(context.Background(), version.Version)
|
||||
|
||||
config, err := resolveConfiguration(args)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
|
||||
cmd.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if config.HTTP.Debug.Addr != "" {
|
||||
go func(addr string) {
|
||||
log.Infof("debug server listening %v", addr)
|
||||
if err := http.ListenAndServe(addr, nil); err != nil {
|
||||
log.Fatalf("error listening on debug interface: %v", err)
|
||||
}
|
||||
}(config.HTTP.Debug.Addr)
|
||||
}
|
||||
|
||||
registry, err := NewRegistry(ctx, config)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if err = registry.ListenAndServe(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// A Registry represents a complete instance of the registry.
|
||||
// TODO(aaronl): It might make sense for Registry to become an interface.
|
||||
type Registry struct {
|
||||
config *configuration.Configuration
|
||||
app *handlers.App
|
||||
server *http.Server
|
||||
}
|
||||
|
||||
// NewRegistry creates a new registry from a context and configuration struct.
|
||||
func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) {
|
||||
var err error
|
||||
ctx, err = configureLogging(ctx, config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error configuring logger: %v", err)
|
||||
}
|
||||
|
||||
// inject a logger into the uuid library. warns us if there is a problem
|
||||
// with uuid generation under low entropy.
|
||||
uuid.Loggerf = context.GetLogger(ctx).Warnf
|
||||
|
||||
app := handlers.NewApp(ctx, config)
|
||||
// TODO(aaronl): The global scope of the health checks means NewRegistry
|
||||
// can only be called once per process.
|
||||
app.RegisterHealthChecks()
|
||||
handler := configureReporting(app)
|
||||
handler = alive("/", handler)
|
||||
handler = health.Handler(handler)
|
||||
handler = panicHandler(handler)
|
||||
if !config.Log.AccessLog.Disabled {
|
||||
handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler)
|
||||
}
|
||||
|
||||
server := &http.Server{
|
||||
Handler: handler,
|
||||
}
|
||||
|
||||
return &Registry{
|
||||
app: app,
|
||||
config: config,
|
||||
server: server,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListenAndServe runs the registry's HTTP server.
|
||||
func (registry *Registry) ListenAndServe() error {
|
||||
config := registry.config
|
||||
|
||||
ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
|
||||
tlsConf := &tls.Config{
|
||||
ClientAuth: tls.NoClientCert,
|
||||
NextProtos: nextProtos(config),
|
||||
MinVersion: tls.VersionTLS10,
|
||||
PreferServerCipherSuites: true,
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
},
|
||||
}
|
||||
|
||||
if config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
|
||||
if config.HTTP.TLS.Certificate != "" {
|
||||
return fmt.Errorf("cannot specify both certificate and Let's Encrypt")
|
||||
}
|
||||
var m letsencrypt.Manager
|
||||
if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil {
|
||||
return err
|
||||
}
|
||||
if !m.Registered() {
|
||||
if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tlsConf.GetCertificate = m.GetCertificate
|
||||
} else {
|
||||
tlsConf.Certificates = make([]tls.Certificate, 1)
|
||||
tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(config.HTTP.TLS.ClientCAs) != 0 {
|
||||
pool := x509.NewCertPool()
|
||||
|
||||
for _, ca := range config.HTTP.TLS.ClientCAs {
|
||||
caPem, err := ioutil.ReadFile(ca)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok := pool.AppendCertsFromPEM(caPem); !ok {
|
||||
return fmt.Errorf("Could not add CA to pool")
|
||||
}
|
||||
}
|
||||
|
||||
for _, subj := range pool.Subjects() {
|
||||
context.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj))
|
||||
}
|
||||
|
||||
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
tlsConf.ClientCAs = pool
|
||||
}
|
||||
|
||||
ln = tls.NewListener(ln, tlsConf)
|
||||
context.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr())
|
||||
} else {
|
||||
context.GetLogger(registry.app).Infof("listening on %v", ln.Addr())
|
||||
}
|
||||
|
||||
return registry.server.Serve(ln)
|
||||
}
|
||||
|
||||
func configureReporting(app *handlers.App) http.Handler {
|
||||
var handler http.Handler = app
|
||||
|
||||
if app.Config.Reporting.Bugsnag.APIKey != "" {
|
||||
bugsnagConfig := bugsnag.Configuration{
|
||||
APIKey: app.Config.Reporting.Bugsnag.APIKey,
|
||||
// TODO(brianbland): provide the registry version here
|
||||
// AppVersion: "2.0",
|
||||
}
|
||||
if app.Config.Reporting.Bugsnag.ReleaseStage != "" {
|
||||
bugsnagConfig.ReleaseStage = app.Config.Reporting.Bugsnag.ReleaseStage
|
||||
}
|
||||
if app.Config.Reporting.Bugsnag.Endpoint != "" {
|
||||
bugsnagConfig.Endpoint = app.Config.Reporting.Bugsnag.Endpoint
|
||||
}
|
||||
bugsnag.Configure(bugsnagConfig)
|
||||
|
||||
handler = bugsnag.Handler(handler)
|
||||
}
|
||||
|
||||
if app.Config.Reporting.NewRelic.LicenseKey != "" {
|
||||
agent := gorelic.NewAgent()
|
||||
agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey
|
||||
if app.Config.Reporting.NewRelic.Name != "" {
|
||||
agent.NewrelicName = app.Config.Reporting.NewRelic.Name
|
||||
}
|
||||
agent.CollectHTTPStat = true
|
||||
agent.Verbose = app.Config.Reporting.NewRelic.Verbose
|
||||
agent.Run()
|
||||
|
||||
handler = agent.WrapHTTPHandler(handler)
|
||||
}
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// configureLogging prepares the context with a logger using the
|
||||
// configuration.
|
||||
func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) {
|
||||
if config.Log.Level == "" && config.Log.Formatter == "" {
|
||||
// If no config for logging is set, fallback to deprecated "Loglevel".
|
||||
log.SetLevel(logLevel(config.Loglevel))
|
||||
ctx = context.WithLogger(ctx, context.GetLogger(ctx))
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
log.SetLevel(logLevel(config.Log.Level))
|
||||
|
||||
formatter := config.Log.Formatter
|
||||
if formatter == "" {
|
||||
formatter = "text" // default formatter
|
||||
}
|
||||
|
||||
switch formatter {
|
||||
case "json":
|
||||
log.SetFormatter(&log.JSONFormatter{
|
||||
TimestampFormat: time.RFC3339Nano,
|
||||
})
|
||||
case "text":
|
||||
log.SetFormatter(&log.TextFormatter{
|
||||
TimestampFormat: time.RFC3339Nano,
|
||||
})
|
||||
case "logstash":
|
||||
log.SetFormatter(&logstash.LogstashFormatter{
|
||||
TimestampFormat: time.RFC3339Nano,
|
||||
})
|
||||
default:
|
||||
// just let the library use default on empty string.
|
||||
if config.Log.Formatter != "" {
|
||||
return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter)
|
||||
}
|
||||
}
|
||||
|
||||
if config.Log.Formatter != "" {
|
||||
log.Debugf("using %q logging formatter", config.Log.Formatter)
|
||||
}
|
||||
|
||||
if len(config.Log.Fields) > 0 {
|
||||
// build up the static fields, if present.
|
||||
var fields []interface{}
|
||||
for k := range config.Log.Fields {
|
||||
fields = append(fields, k)
|
||||
}
|
||||
|
||||
ctx = context.WithValues(ctx, config.Log.Fields)
|
||||
ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...))
|
||||
}
|
||||
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func logLevel(level configuration.Loglevel) log.Level {
|
||||
l, err := log.ParseLevel(string(level))
|
||||
if err != nil {
|
||||
l = log.InfoLevel
|
||||
log.Warnf("error parsing level %q: %v, using %q ", level, err, l)
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// panicHandler add an HTTP handler to web app. The handler recover the happening
|
||||
// panic. logrus.Panic transmits panic message to pre-config log hooks, which is
|
||||
// defined in config.yml.
|
||||
func panicHandler(handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Panic(fmt.Sprintf("%v", err))
|
||||
}
|
||||
}()
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// alive simply wraps the handler with a route that always returns an http 200
|
||||
// response when the path is matched. If the path is not matched, the request
|
||||
// is passed to the provided handler. There is no guarantee of anything but
|
||||
// that the server is up. Wrap with other handlers (such as health.Handler)
|
||||
// for greater affect.
|
||||
func alive(path string, handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == path {
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
handler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func resolveConfiguration(args []string) (*configuration.Configuration, error) {
|
||||
var configurationPath string
|
||||
|
||||
if len(args) > 0 {
|
||||
configurationPath = args[0]
|
||||
} else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" {
|
||||
configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH")
|
||||
}
|
||||
|
||||
if configurationPath == "" {
|
||||
return nil, fmt.Errorf("configuration path unspecified")
|
||||
}
|
||||
|
||||
fp, err := os.Open(configurationPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer fp.Close()
|
||||
|
||||
config, err := configuration.Parse(fp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func nextProtos(config *configuration.Configuration) []string {
|
||||
switch config.HTTP.HTTP2.Disabled {
|
||||
case true:
|
||||
return []string{"http/1.1"}
|
||||
default:
|
||||
return []string{"h2", "http/1.1"}
|
||||
}
|
||||
}
|
84
vendor/github.com/docker/distribution/registry/root.go
generated
vendored
84
vendor/github.com/docker/distribution/registry/root.go
generated
vendored
|
@ -1,84 +0,0 @@
|
|||
package registry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/storage"
|
||||
"github.com/docker/distribution/registry/storage/driver/factory"
|
||||
"github.com/docker/distribution/version"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var showVersion bool
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(ServeCmd)
|
||||
RootCmd.AddCommand(GCCmd)
|
||||
GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything except remove the blobs")
|
||||
RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the version and exit")
|
||||
}
|
||||
|
||||
// RootCmd is the main command for the 'registry' binary.
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "registry",
|
||||
Short: "`registry`",
|
||||
Long: "`registry`",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if showVersion {
|
||||
version.PrintVersion()
|
||||
return
|
||||
}
|
||||
cmd.Usage()
|
||||
},
|
||||
}
|
||||
|
||||
var dryRun bool
|
||||
|
||||
// GCCmd is the cobra command that corresponds to the garbage-collect subcommand
|
||||
var GCCmd = &cobra.Command{
|
||||
Use: "garbage-collect <config>",
|
||||
Short: "`garbage-collect` deletes layers not referenced by any manifests",
|
||||
Long: "`garbage-collect` deletes layers not referenced by any manifests",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
config, err := resolveConfiguration(args)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
|
||||
cmd.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
driver, err := factory.Create(config.Storage.Type(), config.Storage.Parameters())
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to construct %s driver: %v", config.Storage.Type(), err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, err = configureLogging(ctx, config)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to configure logging with config: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
k, err := libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
registry, err := storage.NewRegistry(ctx, driver, storage.Schema1SigningKey(k))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to construct registry: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = storage.MarkAndSweep(ctx, driver, registry, dryRun)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to garbage collect: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
60
vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go
generated
vendored
60
vendor/github.com/docker/distribution/registry/storage/blobcachemetrics.go
generated
vendored
|
@ -1,60 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"expvar"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
)
|
||||
|
||||
type blobStatCollector struct {
|
||||
metrics cache.Metrics
|
||||
}
|
||||
|
||||
func (bsc *blobStatCollector) Hit() {
|
||||
atomic.AddUint64(&bsc.metrics.Requests, 1)
|
||||
atomic.AddUint64(&bsc.metrics.Hits, 1)
|
||||
}
|
||||
|
||||
func (bsc *blobStatCollector) Miss() {
|
||||
atomic.AddUint64(&bsc.metrics.Requests, 1)
|
||||
atomic.AddUint64(&bsc.metrics.Misses, 1)
|
||||
}
|
||||
|
||||
func (bsc *blobStatCollector) Metrics() cache.Metrics {
|
||||
return bsc.metrics
|
||||
}
|
||||
|
||||
// blobStatterCacheMetrics keeps track of cache metrics for blob descriptor
|
||||
// cache requests. Note this is kept globally and made available via expvar.
|
||||
// For more detailed metrics, its recommend to instrument a particular cache
|
||||
// implementation.
|
||||
var blobStatterCacheMetrics cache.MetricsTracker = &blobStatCollector{}
|
||||
|
||||
func init() {
|
||||
registry := expvar.Get("registry")
|
||||
if registry == nil {
|
||||
registry = expvar.NewMap("registry")
|
||||
}
|
||||
|
||||
cache := registry.(*expvar.Map).Get("cache")
|
||||
if cache == nil {
|
||||
cache = &expvar.Map{}
|
||||
cache.(*expvar.Map).Init()
|
||||
registry.(*expvar.Map).Set("cache", cache)
|
||||
}
|
||||
|
||||
storage := cache.(*expvar.Map).Get("storage")
|
||||
if storage == nil {
|
||||
storage = &expvar.Map{}
|
||||
storage.(*expvar.Map).Init()
|
||||
cache.(*expvar.Map).Set("storage", storage)
|
||||
}
|
||||
|
||||
storage.(*expvar.Map).Set("blobdescriptor", expvar.Func(func() interface{} {
|
||||
// no need for synchronous access: the increments are atomic and
|
||||
// during reading, we don't care if the data is up to date. The
|
||||
// numbers will always *eventually* be reported correctly.
|
||||
return blobStatterCacheMetrics
|
||||
}))
|
||||
}
|
78
vendor/github.com/docker/distribution/registry/storage/blobserver.go
generated
vendored
78
vendor/github.com/docker/distribution/registry/storage/blobserver.go
generated
vendored
|
@ -1,78 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// TODO(stevvooe): This should configurable in the future.
|
||||
const blobCacheControlMaxAge = 365 * 24 * time.Hour
|
||||
|
||||
// blobServer simply serves blobs from a driver instance using a path function
|
||||
// to identify paths and a descriptor service to fill in metadata.
|
||||
type blobServer struct {
|
||||
driver driver.StorageDriver
|
||||
statter distribution.BlobStatter
|
||||
pathFn func(dgst digest.Digest) (string, error)
|
||||
redirect bool // allows disabling URLFor redirects
|
||||
}
|
||||
|
||||
func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
|
||||
desc, err := bs.statter.Stat(ctx, dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path, err := bs.pathFn(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bs.redirect {
|
||||
redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method})
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
// Redirect to storage URL.
|
||||
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
|
||||
return err
|
||||
|
||||
case driver.ErrUnsupportedMethod:
|
||||
// Fallback to serving the content directly.
|
||||
default:
|
||||
// Some unexpected error.
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
br, err := newFileReader(ctx, bs.driver, path, desc.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer br.Close()
|
||||
|
||||
w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent
|
||||
w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds()))
|
||||
|
||||
if w.Header().Get("Docker-Content-Digest") == "" {
|
||||
w.Header().Set("Docker-Content-Digest", desc.Digest.String())
|
||||
}
|
||||
|
||||
if w.Header().Get("Content-Type") == "" {
|
||||
// Set the content type if not already set.
|
||||
w.Header().Set("Content-Type", desc.MediaType)
|
||||
}
|
||||
|
||||
if w.Header().Get("Content-Length") == "" {
|
||||
// Set the content length if not already set.
|
||||
w.Header().Set("Content-Length", fmt.Sprint(desc.Size))
|
||||
}
|
||||
|
||||
http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br)
|
||||
return nil
|
||||
}
|
223
vendor/github.com/docker/distribution/registry/storage/blobstore.go
generated
vendored
223
vendor/github.com/docker/distribution/registry/storage/blobstore.go
generated
vendored
|
@ -1,223 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// blobStore implements the read side of the blob store interface over a
|
||||
// driver without enforcing per-repository membership. This object is
|
||||
// intentionally a leaky abstraction, providing utility methods that support
|
||||
// creating and traversing backend links.
|
||||
type blobStore struct {
|
||||
driver driver.StorageDriver
|
||||
statter distribution.BlobStatter
|
||||
}
|
||||
|
||||
var _ distribution.BlobProvider = &blobStore{}
|
||||
|
||||
// Get implements the BlobReadService.Get call.
|
||||
func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
|
||||
bp, err := bs.path(dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := getContent(ctx, bs.driver, bp)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case driver.PathNotFoundError:
|
||||
return nil, distribution.ErrBlobUnknown
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
|
||||
desc, err := bs.statter.Stat(ctx, dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
path, err := bs.path(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newFileReader(ctx, bs.driver, path, desc.Size)
|
||||
}
|
||||
|
||||
// Put stores the content p in the blob store, calculating the digest. If the
|
||||
// content is already present, only the digest will be returned. This should
|
||||
// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations
|
||||
func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
|
||||
dgst := digest.FromBytes(p)
|
||||
desc, err := bs.statter.Stat(ctx, dgst)
|
||||
if err == nil {
|
||||
// content already present
|
||||
return desc, nil
|
||||
} else if err != distribution.ErrBlobUnknown {
|
||||
context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err)
|
||||
// real error, return it
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
bp, err := bs.path(dgst)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Write out mediatype here, as well.
|
||||
return distribution.Descriptor{
|
||||
Size: int64(len(p)),
|
||||
|
||||
// NOTE(stevvooe): The central blob store firewalls media types from
|
||||
// other users. The caller should look this up and override the value
|
||||
// for the specific repository.
|
||||
MediaType: "application/octet-stream",
|
||||
Digest: dgst,
|
||||
}, bs.driver.PutContent(ctx, bp, p)
|
||||
}
|
||||
|
||||
func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error {
|
||||
|
||||
specPath, err := pathFor(blobsPathSpec{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = Walk(ctx, bs.driver, specPath, func(fileInfo driver.FileInfo) error {
|
||||
// skip directories
|
||||
if fileInfo.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
currentPath := fileInfo.Path()
|
||||
// we only want to parse paths that end with /data
|
||||
_, fileName := path.Split(currentPath)
|
||||
if fileName != "data" {
|
||||
return nil
|
||||
}
|
||||
|
||||
digest, err := digestFromPath(currentPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ingester(digest)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// path returns the canonical path for the blob identified by digest. The blob
|
||||
// may or may not exist.
|
||||
func (bs *blobStore) path(dgst digest.Digest) (string, error) {
|
||||
bp, err := pathFor(blobDataPathSpec{
|
||||
digest: dgst,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return bp, nil
|
||||
}
|
||||
|
||||
// link links the path to the provided digest by writing the digest into the
|
||||
// target file. Caller must ensure that the blob actually exists.
|
||||
func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error {
|
||||
// The contents of the "link" file are the exact string contents of the
|
||||
// digest, which is specified in that package.
|
||||
return bs.driver.PutContent(ctx, path, []byte(dgst))
|
||||
}
|
||||
|
||||
// readlink returns the linked digest at path.
|
||||
func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) {
|
||||
content, err := bs.driver.GetContent(ctx, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
linked, err := digest.Parse(string(content))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return linked, nil
|
||||
}
|
||||
|
||||
// resolve reads the digest link at path and returns the blob store path.
|
||||
func (bs *blobStore) resolve(ctx context.Context, path string) (string, error) {
|
||||
dgst, err := bs.readlink(ctx, path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return bs.path(dgst)
|
||||
}
|
||||
|
||||
type blobStatter struct {
|
||||
driver driver.StorageDriver
|
||||
}
|
||||
|
||||
var _ distribution.BlobDescriptorService = &blobStatter{}
|
||||
|
||||
// Stat implements BlobStatter.Stat by returning the descriptor for the blob
|
||||
// in the main blob store. If this method returns successfully, there is
|
||||
// strong guarantee that the blob exists and is available.
|
||||
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
path, err := pathFor(blobDataPathSpec{
|
||||
digest: dgst,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
fi, err := bs.driver.Stat(ctx, path)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case driver.PathNotFoundError:
|
||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
||||
default:
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
// NOTE(stevvooe): This represents a corruption situation. Somehow, we
|
||||
// calculated a blob path and then detected a directory. We log the
|
||||
// error and then error on the side of not knowing about the blob.
|
||||
context.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path)
|
||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Add method to resolve the mediatype. We can store and
|
||||
// cache a "global" media type for the blob, even if a specific repo has a
|
||||
// mediatype that overrides the main one.
|
||||
|
||||
return distribution.Descriptor{
|
||||
Size: fi.Size(),
|
||||
|
||||
// NOTE(stevvooe): The central blob store firewalls media types from
|
||||
// other users. The caller should look this up and override the value
|
||||
// for the specific repository.
|
||||
MediaType: "application/octet-stream",
|
||||
Digest: dgst,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
|
||||
return distribution.ErrUnsupported
|
||||
}
|
||||
|
||||
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
return distribution.ErrUnsupported
|
||||
}
|
400
vendor/github.com/docker/distribution/registry/storage/blobwriter.go
generated
vendored
400
vendor/github.com/docker/distribution/registry/storage/blobwriter.go
generated
vendored
|
@ -1,400 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
errResumableDigestNotAvailable = errors.New("resumable digest not available")
|
||||
)
|
||||
|
||||
const (
|
||||
// digestSha256Empty is the canonical sha256 digest of empty data
|
||||
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
|
||||
// blobWriter is used to control the various aspects of resumable
|
||||
// blob upload.
|
||||
type blobWriter struct {
|
||||
ctx context.Context
|
||||
blobStore *linkedBlobStore
|
||||
|
||||
id string
|
||||
startedAt time.Time
|
||||
digester digest.Digester
|
||||
written int64 // track the contiguous write
|
||||
|
||||
fileWriter storagedriver.FileWriter
|
||||
driver storagedriver.StorageDriver
|
||||
path string
|
||||
|
||||
resumableDigestEnabled bool
|
||||
committed bool
|
||||
}
|
||||
|
||||
var _ distribution.BlobWriter = &blobWriter{}
|
||||
|
||||
// ID returns the identifier for this upload.
|
||||
func (bw *blobWriter) ID() string {
|
||||
return bw.id
|
||||
}
|
||||
|
||||
func (bw *blobWriter) StartedAt() time.Time {
|
||||
return bw.startedAt
|
||||
}
|
||||
|
||||
// Commit marks the upload as completed, returning a valid descriptor. The
|
||||
// final size and digest are checked against the first descriptor provided.
|
||||
func (bw *blobWriter) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
|
||||
context.GetLogger(ctx).Debug("(*blobWriter).Commit")
|
||||
|
||||
if err := bw.fileWriter.Commit(); err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
bw.Close()
|
||||
desc.Size = bw.Size()
|
||||
|
||||
canonical, err := bw.validateBlob(ctx, desc)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
if err := bw.moveBlob(ctx, canonical); err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
if err := bw.removeResources(ctx); err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
bw.committed = true
|
||||
return canonical, nil
|
||||
}
|
||||
|
||||
// Cancel the blob upload process, releasing any resources associated with
|
||||
// the writer and canceling the operation.
|
||||
func (bw *blobWriter) Cancel(ctx context.Context) error {
|
||||
context.GetLogger(ctx).Debug("(*blobWriter).Cancel")
|
||||
if err := bw.fileWriter.Cancel(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := bw.Close(); err != nil {
|
||||
context.GetLogger(ctx).Errorf("error closing blobwriter: %s", err)
|
||||
}
|
||||
|
||||
if err := bw.removeResources(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bw *blobWriter) Size() int64 {
|
||||
return bw.fileWriter.Size()
|
||||
}
|
||||
|
||||
func (bw *blobWriter) Write(p []byte) (int, error) {
|
||||
// Ensure that the current write offset matches how many bytes have been
|
||||
// written to the digester. If not, we need to update the digest state to
|
||||
// match the current write position.
|
||||
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n, err := io.MultiWriter(bw.fileWriter, bw.digester.Hash()).Write(p)
|
||||
bw.written += int64(n)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
// Ensure that the current write offset matches how many bytes have been
|
||||
// written to the digester. If not, we need to update the digest state to
|
||||
// match the current write position.
|
||||
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nn, err := io.Copy(io.MultiWriter(bw.fileWriter, bw.digester.Hash()), r)
|
||||
bw.written += nn
|
||||
|
||||
return nn, err
|
||||
}
|
||||
|
||||
func (bw *blobWriter) Close() error {
|
||||
if bw.committed {
|
||||
return errors.New("blobwriter close after commit")
|
||||
}
|
||||
|
||||
if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
|
||||
return err
|
||||
}
|
||||
|
||||
return bw.fileWriter.Close()
|
||||
}
|
||||
|
||||
// validateBlob checks the data against the digest, returning an error if it
|
||||
// does not match. The canonical descriptor is returned.
|
||||
func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
|
||||
var (
|
||||
verified, fullHash bool
|
||||
canonical digest.Digest
|
||||
)
|
||||
|
||||
if desc.Digest == "" {
|
||||
// if no descriptors are provided, we have nothing to validate
|
||||
// against. We don't really want to support this for the registry.
|
||||
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
|
||||
Reason: fmt.Errorf("cannot validate against empty digest"),
|
||||
}
|
||||
}
|
||||
|
||||
var size int64
|
||||
|
||||
// Stat the on disk file
|
||||
if fi, err := bw.driver.Stat(ctx, bw.path); err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
// NOTE(stevvooe): We really don't care if the file is
|
||||
// not actually present for the reader. We now assume
|
||||
// that the desc length is zero.
|
||||
desc.Size = 0
|
||||
default:
|
||||
// Any other error we want propagated up the stack.
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
} else {
|
||||
if fi.IsDir() {
|
||||
return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
|
||||
}
|
||||
|
||||
size = fi.Size()
|
||||
}
|
||||
|
||||
if desc.Size > 0 {
|
||||
if desc.Size != size {
|
||||
return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
|
||||
}
|
||||
} else {
|
||||
// if provided 0 or negative length, we can assume caller doesn't know or
|
||||
// care about length.
|
||||
desc.Size = size
|
||||
}
|
||||
|
||||
// TODO(stevvooe): This section is very meandering. Need to be broken down
|
||||
// to be a lot more clear.
|
||||
|
||||
if err := bw.resumeDigest(ctx); err == nil {
|
||||
canonical = bw.digester.Digest()
|
||||
|
||||
if canonical.Algorithm() == desc.Digest.Algorithm() {
|
||||
// Common case: client and server prefer the same canonical digest
|
||||
// algorithm - currently SHA256.
|
||||
verified = desc.Digest == canonical
|
||||
} else {
|
||||
// The client wants to use a different digest algorithm. They'll just
|
||||
// have to be patient and wait for us to download and re-hash the
|
||||
// uploaded content using that digest algorithm.
|
||||
fullHash = true
|
||||
}
|
||||
} else if err == errResumableDigestNotAvailable {
|
||||
// Not using resumable digests, so we need to hash the entire layer.
|
||||
fullHash = true
|
||||
} else {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
if fullHash {
|
||||
// a fantastic optimization: if the the written data and the size are
|
||||
// the same, we don't need to read the data from the backend. This is
|
||||
// because we've written the entire file in the lifecycle of the
|
||||
// current instance.
|
||||
if bw.written == size && digest.Canonical == desc.Digest.Algorithm() {
|
||||
canonical = bw.digester.Digest()
|
||||
verified = desc.Digest == canonical
|
||||
}
|
||||
|
||||
// If the check based on size fails, we fall back to the slowest of
|
||||
// paths. We may be able to make the size-based check a stronger
|
||||
// guarantee, so this may be defensive.
|
||||
if !verified {
|
||||
digester := digest.Canonical.Digester()
|
||||
verifier := desc.Digest.Verifier()
|
||||
|
||||
// Read the file from the backend driver and validate it.
|
||||
fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
defer fr.Close()
|
||||
|
||||
tr := io.TeeReader(fr, digester.Hash())
|
||||
|
||||
if _, err := io.Copy(verifier, tr); err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
canonical = digester.Digest()
|
||||
verified = verifier.Verified()
|
||||
}
|
||||
}
|
||||
|
||||
if !verified {
|
||||
context.GetLoggerWithFields(ctx,
|
||||
map[interface{}]interface{}{
|
||||
"canonical": canonical,
|
||||
"provided": desc.Digest,
|
||||
}, "canonical", "provided").
|
||||
Errorf("canonical digest does match provided digest")
|
||||
return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
|
||||
Digest: desc.Digest,
|
||||
Reason: fmt.Errorf("content does not match digest"),
|
||||
}
|
||||
}
|
||||
|
||||
// update desc with canonical hash
|
||||
desc.Digest = canonical
|
||||
|
||||
if desc.MediaType == "" {
|
||||
desc.MediaType = "application/octet-stream"
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// moveBlob moves the data into its final, hash-qualified destination,
|
||||
// identified by dgst. The layer should be validated before commencing the
|
||||
// move.
|
||||
func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor) error {
|
||||
blobPath, err := pathFor(blobDataPathSpec{
|
||||
digest: desc.Digest,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for existence
|
||||
if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
break // ensure that it doesn't exist.
|
||||
default:
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// If the path exists, we can assume that the content has already
|
||||
// been uploaded, since the blob storage is content-addressable.
|
||||
// While it may be corrupted, detection of such corruption belongs
|
||||
// elsewhere.
|
||||
return nil
|
||||
}
|
||||
|
||||
// If no data was received, we may not actually have a file on disk. Check
|
||||
// the size here and write a zero-length file to blobPath if this is the
|
||||
// case. For the most part, this should only ever happen with zero-length
|
||||
// blobs.
|
||||
if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
// HACK(stevvooe): This is slightly dangerous: if we verify above,
|
||||
// get a hash, then the underlying file is deleted, we risk moving
|
||||
// a zero-length blob into a nonzero-length blob location. To
|
||||
// prevent this horrid thing, we employ the hack of only allowing
|
||||
// to this happen for the digest of an empty blob.
|
||||
if desc.Digest == digestSha256Empty {
|
||||
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
|
||||
}
|
||||
|
||||
// We let this fail during the move below.
|
||||
logrus.
|
||||
WithField("upload.id", bw.ID()).
|
||||
WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest")
|
||||
default:
|
||||
return err // unrelated error
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(stevvooe): We should also write the mediatype when executing this move.
|
||||
|
||||
return bw.blobStore.driver.Move(ctx, bw.path, blobPath)
|
||||
}
|
||||
|
||||
// removeResources should clean up all resources associated with the upload
|
||||
// instance. An error will be returned if the clean up cannot proceed. If the
|
||||
// resources are already not present, no error will be returned.
|
||||
func (bw *blobWriter) removeResources(ctx context.Context) error {
|
||||
dataPath, err := pathFor(uploadDataPathSpec{
|
||||
name: bw.blobStore.repository.Named().Name(),
|
||||
id: bw.id,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve and delete the containing directory, which should include any
|
||||
// upload related files.
|
||||
dirPath := path.Dir(dataPath)
|
||||
if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
break // already gone!
|
||||
default:
|
||||
// This should be uncommon enough such that returning an error
|
||||
// should be okay. At this point, the upload should be mostly
|
||||
// complete, but perhaps the backend became unaccessible.
|
||||
context.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bw *blobWriter) Reader() (io.ReadCloser, error) {
|
||||
// todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4
|
||||
try := 1
|
||||
for try <= 5 {
|
||||
_, err := bw.driver.Stat(bw.ctx, bw.path)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
switch err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
context.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try)
|
||||
time.Sleep(1 * time.Second)
|
||||
try++
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return readCloser, nil
|
||||
}
|
17
vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go
generated
vendored
17
vendor/github.com/docker/distribution/registry/storage/blobwriter_nonresumable.go
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
// +build noresumabledigest
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/context"
|
||||
)
|
||||
|
||||
// resumeHashAt is a noop when resumable digest support is disabled.
|
||||
func (bw *blobWriter) resumeDigest(ctx context.Context) error {
|
||||
return errResumableDigestNotAvailable
|
||||
}
|
||||
|
||||
// storeHashState is a noop when resumable digest support is disabled.
|
||||
func (bw *blobWriter) storeHashState(ctx context.Context) error {
|
||||
return errResumableDigestNotAvailable
|
||||
}
|
145
vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go
generated
vendored
145
vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go
generated
vendored
|
@ -1,145 +0,0 @@
|
|||
// +build !noresumabledigest
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stevvooe/resumable"
|
||||
|
||||
// register resumable hashes with import
|
||||
_ "github.com/stevvooe/resumable/sha256"
|
||||
_ "github.com/stevvooe/resumable/sha512"
|
||||
)
|
||||
|
||||
// resumeDigest attempts to restore the state of the internal hash function
|
||||
// by loading the most recent saved hash state equal to the current size of the blob.
|
||||
func (bw *blobWriter) resumeDigest(ctx context.Context) error {
|
||||
if !bw.resumableDigestEnabled {
|
||||
return errResumableDigestNotAvailable
|
||||
}
|
||||
|
||||
h, ok := bw.digester.Hash().(resumable.Hash)
|
||||
if !ok {
|
||||
return errResumableDigestNotAvailable
|
||||
}
|
||||
offset := bw.fileWriter.Size()
|
||||
if offset == int64(h.Len()) {
|
||||
// State of digester is already at the requested offset.
|
||||
return nil
|
||||
}
|
||||
|
||||
// List hash states from storage backend.
|
||||
var hashStateMatch hashStateEntry
|
||||
hashStates, err := bw.getStoredHashStates(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err)
|
||||
}
|
||||
|
||||
// Find the highest stored hashState with offset equal to
|
||||
// the requested offset.
|
||||
for _, hashState := range hashStates {
|
||||
if hashState.offset == offset {
|
||||
hashStateMatch = hashState
|
||||
break // Found an exact offset match.
|
||||
}
|
||||
}
|
||||
|
||||
if hashStateMatch.offset == 0 {
|
||||
// No need to load any state, just reset the hasher.
|
||||
h.Reset()
|
||||
} else {
|
||||
storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = h.Restore(storedState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Mind the gap.
|
||||
if gapLen := offset - int64(h.Len()); gapLen > 0 {
|
||||
return errResumableDigestNotAvailable
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type hashStateEntry struct {
|
||||
offset int64
|
||||
path string
|
||||
}
|
||||
|
||||
// getStoredHashStates returns a slice of hashStateEntries for this upload.
|
||||
func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) {
|
||||
uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{
|
||||
name: bw.blobStore.repository.Named().String(),
|
||||
id: bw.id,
|
||||
alg: bw.digester.Digest().Algorithm(),
|
||||
list: true,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix)
|
||||
if err != nil {
|
||||
if _, ok := err.(storagedriver.PathNotFoundError); !ok {
|
||||
return nil, err
|
||||
}
|
||||
// Treat PathNotFoundError as no entries.
|
||||
paths = nil
|
||||
}
|
||||
|
||||
hashStateEntries := make([]hashStateEntry, 0, len(paths))
|
||||
|
||||
for _, p := range paths {
|
||||
pathSuffix := path.Base(p)
|
||||
// The suffix should be the offset.
|
||||
offset, err := strconv.ParseInt(pathSuffix, 0, 64)
|
||||
if err != nil {
|
||||
logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err)
|
||||
}
|
||||
|
||||
hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p})
|
||||
}
|
||||
|
||||
return hashStateEntries, nil
|
||||
}
|
||||
|
||||
func (bw *blobWriter) storeHashState(ctx context.Context) error {
|
||||
if !bw.resumableDigestEnabled {
|
||||
return errResumableDigestNotAvailable
|
||||
}
|
||||
|
||||
h, ok := bw.digester.Hash().(resumable.Hash)
|
||||
if !ok {
|
||||
return errResumableDigestNotAvailable
|
||||
}
|
||||
|
||||
uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{
|
||||
name: bw.blobStore.repository.Named().String(),
|
||||
id: bw.id,
|
||||
alg: bw.digester.Digest().Algorithm(),
|
||||
offset: int64(h.Len()),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hashState, err := h.State()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bw.driver.PutContent(ctx, uploadHashStatePath, hashState)
|
||||
}
|
153
vendor/github.com/docker/distribution/registry/storage/catalog.go
generated
vendored
153
vendor/github.com/docker/distribution/registry/storage/catalog.go
generated
vendored
|
@ -1,153 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
)
|
||||
|
||||
// errFinishedWalk signals an early exit to the walk when the current query
|
||||
// is satisfied.
|
||||
var errFinishedWalk = errors.New("finished walk")
|
||||
|
||||
// Returns a list, or partial list, of repositories in the registry.
|
||||
// Because it's a quite expensive operation, it should only be used when building up
|
||||
// an initial set of repositories.
|
||||
func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) {
|
||||
var foundRepos []string
|
||||
|
||||
if len(repos) == 0 {
|
||||
return 0, errors.New("no space in slice")
|
||||
}
|
||||
|
||||
root, err := pathFor(repositoriesRootPathSpec{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error {
|
||||
err := handleRepository(fileInfo, root, last, func(repoPath string) error {
|
||||
foundRepos = append(foundRepos, repoPath)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if we've filled our array, no need to walk any further
|
||||
if len(foundRepos) == len(repos) {
|
||||
return errFinishedWalk
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
n = copy(repos, foundRepos)
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
// nil means that we completed walk and didn't fill buffer. No more
|
||||
// records are available.
|
||||
err = io.EOF
|
||||
case errFinishedWalk:
|
||||
// more records are available.
|
||||
err = nil
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Enumerate applies ingester to each repository
|
||||
func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error {
|
||||
root, err := pathFor(repositoriesRootPathSpec{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error {
|
||||
return handleRepository(fileInfo, root, "", ingester)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// lessPath returns true if one path a is less than path b.
|
||||
//
|
||||
// A component-wise comparison is done, rather than the lexical comparison of
|
||||
// strings.
|
||||
func lessPath(a, b string) bool {
|
||||
// we provide this behavior by making separator always sort first.
|
||||
return compareReplaceInline(a, b, '/', '\x00') < 0
|
||||
}
|
||||
|
||||
// compareReplaceInline modifies runtime.cmpstring to replace old with new
|
||||
// during a byte-wise comparison.
|
||||
func compareReplaceInline(s1, s2 string, old, new byte) int {
|
||||
// TODO(stevvooe): We are missing an optimization when the s1 and s2 have
|
||||
// the exact same slice header. It will make the code unsafe but can
|
||||
// provide some extra performance.
|
||||
|
||||
l := len(s1)
|
||||
if len(s2) < l {
|
||||
l = len(s2)
|
||||
}
|
||||
|
||||
for i := 0; i < l; i++ {
|
||||
c1, c2 := s1[i], s2[i]
|
||||
if c1 == old {
|
||||
c1 = new
|
||||
}
|
||||
|
||||
if c2 == old {
|
||||
c2 = new
|
||||
}
|
||||
|
||||
if c1 < c2 {
|
||||
return -1
|
||||
}
|
||||
|
||||
if c1 > c2 {
|
||||
return +1
|
||||
}
|
||||
}
|
||||
|
||||
if len(s1) < len(s2) {
|
||||
return -1
|
||||
}
|
||||
|
||||
if len(s1) > len(s2) {
|
||||
return +1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// handleRepository calls function fn with a repository path if fileInfo
|
||||
// has a path of a repository under root and that it is lexographically
|
||||
// after last. Otherwise, it will return ErrSkipDir. This should be used
|
||||
// with Walk to do handling with repositories in a storage.
|
||||
func handleRepository(fileInfo driver.FileInfo, root, last string, fn func(repoPath string) error) error {
|
||||
filePath := fileInfo.Path()
|
||||
|
||||
// lop the base path off
|
||||
repo := filePath[len(root)+1:]
|
||||
|
||||
_, file := path.Split(repo)
|
||||
if file == "_layers" {
|
||||
repo = strings.TrimSuffix(repo, "/_layers")
|
||||
if lessPath(last, repo) {
|
||||
if err := fn(repo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ErrSkipDir
|
||||
} else if strings.HasPrefix(file, "_") {
|
||||
return ErrSkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
3
vendor/github.com/docker/distribution/registry/storage/doc.go
generated
vendored
3
vendor/github.com/docker/distribution/registry/storage/doc.go
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
// Package storage contains storage services for use in the registry
|
||||
// application. It should be considered an internal package, as of Go 1.4.
|
||||
package storage
|
177
vendor/github.com/docker/distribution/registry/storage/filereader.go
generated
vendored
177
vendor/github.com/docker/distribution/registry/storage/filereader.go
generated
vendored
|
@ -1,177 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
)
|
||||
|
||||
// TODO(stevvooe): Set an optimal buffer size here. We'll have to
|
||||
// understand the latency characteristics of the underlying network to
|
||||
// set this correctly, so we may want to leave it to the driver. For
|
||||
// out of process drivers, we'll have to optimize this buffer size for
|
||||
// local communication.
|
||||
const fileReaderBufferSize = 4 << 20
|
||||
|
||||
// remoteFileReader provides a read seeker interface to files stored in
|
||||
// storagedriver. Used to implement part of layer interface and will be used
|
||||
// to implement read side of LayerUpload.
|
||||
type fileReader struct {
|
||||
driver storagedriver.StorageDriver
|
||||
|
||||
ctx context.Context
|
||||
|
||||
// identifying fields
|
||||
path string
|
||||
size int64 // size is the total size, must be set.
|
||||
|
||||
// mutable fields
|
||||
rc io.ReadCloser // remote read closer
|
||||
brd *bufio.Reader // internal buffered io
|
||||
offset int64 // offset is the current read offset
|
||||
err error // terminal error, if set, reader is closed
|
||||
}
|
||||
|
||||
// newFileReader initializes a file reader for the remote file. The reader
|
||||
// takes on the size and path that must be determined externally with a stat
|
||||
// call. The reader operates optimistically, assuming that the file is already
|
||||
// there.
|
||||
func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) {
|
||||
return &fileReader{
|
||||
ctx: ctx,
|
||||
driver: driver,
|
||||
path: path,
|
||||
size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fr *fileReader) Read(p []byte) (n int, err error) {
|
||||
if fr.err != nil {
|
||||
return 0, fr.err
|
||||
}
|
||||
|
||||
rd, err := fr.reader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n, err = rd.Read(p)
|
||||
fr.offset += int64(n)
|
||||
|
||||
// Simulate io.EOR error if we reach filesize.
|
||||
if err == nil && fr.offset >= fr.size {
|
||||
err = io.EOF
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (fr *fileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
if fr.err != nil {
|
||||
return 0, fr.err
|
||||
}
|
||||
|
||||
var err error
|
||||
newOffset := fr.offset
|
||||
|
||||
switch whence {
|
||||
case os.SEEK_CUR:
|
||||
newOffset += int64(offset)
|
||||
case os.SEEK_END:
|
||||
newOffset = fr.size + int64(offset)
|
||||
case os.SEEK_SET:
|
||||
newOffset = int64(offset)
|
||||
}
|
||||
|
||||
if newOffset < 0 {
|
||||
err = fmt.Errorf("cannot seek to negative position")
|
||||
} else {
|
||||
if fr.offset != newOffset {
|
||||
fr.reset()
|
||||
}
|
||||
|
||||
// No problems, set the offset.
|
||||
fr.offset = newOffset
|
||||
}
|
||||
|
||||
return fr.offset, err
|
||||
}
|
||||
|
||||
func (fr *fileReader) Close() error {
|
||||
return fr.closeWithErr(fmt.Errorf("fileReader: closed"))
|
||||
}
|
||||
|
||||
// reader prepares the current reader at the lrs offset, ensuring its buffered
|
||||
// and ready to go.
|
||||
func (fr *fileReader) reader() (io.Reader, error) {
|
||||
if fr.err != nil {
|
||||
return nil, fr.err
|
||||
}
|
||||
|
||||
if fr.rc != nil {
|
||||
return fr.brd, nil
|
||||
}
|
||||
|
||||
// If we don't have a reader, open one up.
|
||||
rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
// NOTE(stevvooe): If the path is not found, we simply return a
|
||||
// reader that returns io.EOF. However, we do not set fr.rc,
|
||||
// allowing future attempts at getting a reader to possibly
|
||||
// succeed if the file turns up later.
|
||||
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
fr.rc = rc
|
||||
|
||||
if fr.brd == nil {
|
||||
fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize)
|
||||
} else {
|
||||
fr.brd.Reset(fr.rc)
|
||||
}
|
||||
|
||||
return fr.brd, nil
|
||||
}
|
||||
|
||||
// resetReader resets the reader, forcing the read method to open up a new
|
||||
// connection and rebuild the buffered reader. This should be called when the
|
||||
// offset and the reader will become out of sync, such as during a seek
|
||||
// operation.
|
||||
func (fr *fileReader) reset() {
|
||||
if fr.err != nil {
|
||||
return
|
||||
}
|
||||
if fr.rc != nil {
|
||||
fr.rc.Close()
|
||||
fr.rc = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (fr *fileReader) closeWithErr(err error) error {
|
||||
if fr.err != nil {
|
||||
return fr.err
|
||||
}
|
||||
|
||||
fr.err = err
|
||||
|
||||
// close and release reader chain
|
||||
if fr.rc != nil {
|
||||
fr.rc.Close()
|
||||
}
|
||||
|
||||
fr.rc = nil
|
||||
fr.brd = nil
|
||||
|
||||
return fr.err
|
||||
}
|
114
vendor/github.com/docker/distribution/registry/storage/garbagecollect.go
generated
vendored
114
vendor/github.com/docker/distribution/registry/storage/garbagecollect.go
generated
vendored
|
@ -1,114 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func emit(format string, a ...interface{}) {
|
||||
fmt.Printf(format+"\n", a...)
|
||||
}
|
||||
|
||||
// MarkAndSweep performs a mark and sweep of registry data
|
||||
func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, dryRun bool) error {
|
||||
repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator)
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator")
|
||||
}
|
||||
|
||||
// mark
|
||||
markSet := make(map[digest.Digest]struct{})
|
||||
err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error {
|
||||
emit(repoName)
|
||||
|
||||
var err error
|
||||
named, err := reference.WithName(repoName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse repo name %s: %v", repoName, err)
|
||||
}
|
||||
repository, err := registry.Repository(ctx, named)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct repository: %v", err)
|
||||
}
|
||||
|
||||
manifestService, err := repository.Manifests(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct manifest service: %v", err)
|
||||
}
|
||||
|
||||
manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator)
|
||||
if !ok {
|
||||
return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator")
|
||||
}
|
||||
|
||||
err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
|
||||
// Mark the manifest's blob
|
||||
emit("%s: marking manifest %s ", repoName, dgst)
|
||||
markSet[dgst] = struct{}{}
|
||||
|
||||
manifest, err := manifestService.Get(ctx, dgst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err)
|
||||
}
|
||||
|
||||
descriptors := manifest.References()
|
||||
for _, descriptor := range descriptors {
|
||||
markSet[descriptor.Digest] = struct{}{}
|
||||
emit("%s: marking blob %s", repoName, descriptor.Digest)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
// In certain situations such as unfinished uploads, deleting all
|
||||
// tags in S3 or removing the _manifests folder manually, this
|
||||
// error may be of type PathNotFound.
|
||||
//
|
||||
// In these cases we can continue marking other manifests safely.
|
||||
if _, ok := err.(driver.PathNotFoundError); ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to mark: %v", err)
|
||||
}
|
||||
|
||||
// sweep
|
||||
blobService := registry.Blobs()
|
||||
deleteSet := make(map[digest.Digest]struct{})
|
||||
err = blobService.Enumerate(ctx, func(dgst digest.Digest) error {
|
||||
// check if digest is in markSet. If not, delete it!
|
||||
if _, ok := markSet[dgst]; !ok {
|
||||
deleteSet[dgst] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error enumerating blobs: %v", err)
|
||||
}
|
||||
emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet), len(deleteSet))
|
||||
// Construct vacuum
|
||||
vacuum := NewVacuum(ctx, storageDriver)
|
||||
for dgst := range deleteSet {
|
||||
emit("blob eligible for deletion: %s", dgst)
|
||||
if dryRun {
|
||||
continue
|
||||
}
|
||||
err = vacuum.RemoveBlob(string(dgst))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete blob %s: %v", dgst, err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
71
vendor/github.com/docker/distribution/registry/storage/io.go
generated
vendored
71
vendor/github.com/docker/distribution/registry/storage/io.go
generated
vendored
|
@ -1,71 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
)
|
||||
|
||||
const (
|
||||
maxBlobGetSize = 4 << 20
|
||||
)
|
||||
|
||||
func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) {
|
||||
r, err := driver.Reader(ctx, p, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return readAllLimited(r, maxBlobGetSize)
|
||||
}
|
||||
|
||||
func readAllLimited(r io.Reader, limit int64) ([]byte, error) {
|
||||
r = limitReader(r, limit)
|
||||
return ioutil.ReadAll(r)
|
||||
}
|
||||
|
||||
// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader,
|
||||
// this returns an error when the limit reached.
|
||||
func limitReader(r io.Reader, n int64) io.Reader {
|
||||
return &limitedReader{r: r, n: n}
|
||||
}
|
||||
|
||||
// limitedReader implements a reader that errors when the limit is reached.
|
||||
//
|
||||
// Partially cribbed from net/http.MaxBytesReader.
|
||||
type limitedReader struct {
|
||||
r io.Reader // underlying reader
|
||||
n int64 // max bytes remaining
|
||||
err error // sticky error
|
||||
}
|
||||
|
||||
func (l *limitedReader) Read(p []byte) (n int, err error) {
|
||||
if l.err != nil {
|
||||
return 0, l.err
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
// If they asked for a 32KB byte read but only 5 bytes are
|
||||
// remaining, no need to read 32KB. 6 bytes will answer the
|
||||
// question of the whether we hit the limit or go past it.
|
||||
if int64(len(p)) > l.n+1 {
|
||||
p = p[:l.n+1]
|
||||
}
|
||||
n, err = l.r.Read(p)
|
||||
|
||||
if int64(n) <= l.n {
|
||||
l.n -= int64(n)
|
||||
l.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
n = int(l.n)
|
||||
l.n = 0
|
||||
|
||||
l.err = errors.New("storage: read exceeds limit")
|
||||
return n, l.err
|
||||
}
|
470
vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go
generated
vendored
470
vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go
generated
vendored
|
@ -1,470 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/uuid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// linkPathFunc describes a function that can resolve a link based on the
|
||||
// repository name and digest.
|
||||
type linkPathFunc func(name string, dgst digest.Digest) (string, error)
|
||||
|
||||
// linkedBlobStore provides a full BlobService that namespaces the blobs to a
|
||||
// given repository. Effectively, it manages the links in a given repository
|
||||
// that grant access to the global blob store.
|
||||
type linkedBlobStore struct {
|
||||
*blobStore
|
||||
registry *registry
|
||||
blobServer distribution.BlobServer
|
||||
blobAccessController distribution.BlobDescriptorService
|
||||
repository distribution.Repository
|
||||
ctx context.Context // only to be used where context can't come through method args
|
||||
deleteEnabled bool
|
||||
resumableDigestEnabled bool
|
||||
|
||||
// linkPathFns specifies one or more path functions allowing one to
|
||||
// control the repository blob link set to which the blob store
|
||||
// dispatches. This is required because manifest and layer blobs have not
|
||||
// yet been fully merged. At some point, this functionality should be
|
||||
// removed the blob links folder should be merged. The first entry is
|
||||
// treated as the "canonical" link location and will be used for writes.
|
||||
linkPathFns []linkPathFunc
|
||||
|
||||
// linkDirectoryPathSpec locates the root directories in which one might find links
|
||||
linkDirectoryPathSpec pathSpec
|
||||
}
|
||||
|
||||
var _ distribution.BlobStore = &linkedBlobStore{}
|
||||
|
||||
func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
return lbs.blobAccessController.Stat(ctx, dgst)
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
|
||||
canonical, err := lbs.Stat(ctx, dgst) // access check
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lbs.blobStore.Get(ctx, canonical.Digest)
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
|
||||
canonical, err := lbs.Stat(ctx, dgst) // access check
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lbs.blobStore.Open(ctx, canonical.Digest)
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
|
||||
canonical, err := lbs.Stat(ctx, dgst) // access check
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if canonical.MediaType != "" {
|
||||
// Set the repository local content type.
|
||||
w.Header().Set("Content-Type", canonical.MediaType)
|
||||
}
|
||||
|
||||
return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest)
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
|
||||
dgst := digest.FromBytes(p)
|
||||
// Place the data in the blob store first.
|
||||
desc, err := lbs.blobStore.Put(ctx, mediaType, p)
|
||||
if err != nil {
|
||||
context.GetLogger(ctx).Errorf("error putting into main store: %v", err)
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Write out mediatype if incoming differs from what is
|
||||
// returned by Put above. Note that we should allow updates for a given
|
||||
// repository.
|
||||
|
||||
return desc, lbs.linkBlob(ctx, desc)
|
||||
}
|
||||
|
||||
type optionFunc func(interface{}) error
|
||||
|
||||
func (f optionFunc) Apply(v interface{}) error {
|
||||
return f(v)
|
||||
}
|
||||
|
||||
// WithMountFrom returns a BlobCreateOption which designates that the blob should be
|
||||
// mounted from the given canonical reference.
|
||||
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
|
||||
return optionFunc(func(v interface{}) error {
|
||||
opts, ok := v.(*distribution.CreateOptions)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected options type: %T", v)
|
||||
}
|
||||
|
||||
opts.Mount.ShouldMount = true
|
||||
opts.Mount.From = ref
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Writer begins a blob write session, returning a handle.
|
||||
func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
|
||||
context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer")
|
||||
|
||||
var opts distribution.CreateOptions
|
||||
|
||||
for _, option := range options {
|
||||
err := option.Apply(&opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if opts.Mount.ShouldMount {
|
||||
desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest(), opts.Mount.Stat)
|
||||
if err == nil {
|
||||
// Mount successful, no need to initiate an upload session
|
||||
return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
|
||||
}
|
||||
}
|
||||
|
||||
uuid := uuid.Generate().String()
|
||||
startedAt := time.Now().UTC()
|
||||
|
||||
path, err := pathFor(uploadDataPathSpec{
|
||||
name: lbs.repository.Named().Name(),
|
||||
id: uuid,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
|
||||
name: lbs.repository.Named().Name(),
|
||||
id: uuid,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Write a startedat file for this upload
|
||||
if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lbs.newBlobUpload(ctx, uuid, path, startedAt, false)
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
|
||||
context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume")
|
||||
|
||||
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
|
||||
name: lbs.repository.Named().Name(),
|
||||
id: id,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case driver.PathNotFoundError:
|
||||
return nil, distribution.ErrBlobUploadUnknown
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
path, err := pathFor(uploadDataPathSpec{
|
||||
name: lbs.repository.Named().Name(),
|
||||
id: id,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lbs.newBlobUpload(ctx, id, path, startedAt, true)
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
if !lbs.deleteEnabled {
|
||||
return distribution.ErrUnsupported
|
||||
}
|
||||
|
||||
// Ensure the blob is available for deletion
|
||||
_, err := lbs.blobAccessController.Stat(ctx, dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = lbs.blobAccessController.Clear(ctx, dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error {
|
||||
rootPath, err := pathFor(lbs.linkDirectoryPathSpec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = Walk(ctx, lbs.blobStore.driver, rootPath, func(fileInfo driver.FileInfo) error {
|
||||
// exit early if directory...
|
||||
if fileInfo.IsDir() {
|
||||
return nil
|
||||
}
|
||||
filePath := fileInfo.Path()
|
||||
|
||||
// check if it's a link
|
||||
_, fileName := path.Split(filePath)
|
||||
if fileName != "link" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// read the digest found in link
|
||||
digest, err := lbs.blobStore.readlink(ctx, filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ensure this conforms to the linkPathFns
|
||||
_, err = lbs.Stat(ctx, digest)
|
||||
if err != nil {
|
||||
// we expect this error to occur so we move on
|
||||
if err == distribution.ErrBlobUnknown {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = ingestor(digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest, sourceStat *distribution.Descriptor) (distribution.Descriptor, error) {
|
||||
var stat distribution.Descriptor
|
||||
if sourceStat == nil {
|
||||
// look up the blob info from the sourceRepo if not already provided
|
||||
repo, err := lbs.registry.Repository(ctx, sourceRepo)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
stat, err = repo.Blobs(ctx).Stat(ctx, dgst)
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
} else {
|
||||
// use the provided blob info
|
||||
stat = *sourceStat
|
||||
}
|
||||
|
||||
desc := distribution.Descriptor{
|
||||
Size: stat.Size,
|
||||
|
||||
// NOTE(stevvooe): The central blob store firewalls media types from
|
||||
// other users. The caller should look this up and override the value
|
||||
// for the specific repository.
|
||||
MediaType: "application/octet-stream",
|
||||
Digest: dgst,
|
||||
}
|
||||
return desc, lbs.linkBlob(ctx, desc)
|
||||
}
|
||||
|
||||
// newBlobUpload allocates a new upload controller with the given state.
|
||||
func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) {
|
||||
fw, err := lbs.driver.Writer(ctx, path, append)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bw := &blobWriter{
|
||||
ctx: ctx,
|
||||
blobStore: lbs,
|
||||
id: uuid,
|
||||
startedAt: startedAt,
|
||||
digester: digest.Canonical.Digester(),
|
||||
fileWriter: fw,
|
||||
driver: lbs.driver,
|
||||
path: path,
|
||||
resumableDigestEnabled: lbs.resumableDigestEnabled,
|
||||
}
|
||||
|
||||
return bw, nil
|
||||
}
|
||||
|
||||
// linkBlob links a valid, written blob into the registry under the named
|
||||
// repository for the upload controller.
|
||||
func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution.Descriptor, aliases ...digest.Digest) error {
|
||||
dgsts := append([]digest.Digest{canonical.Digest}, aliases...)
|
||||
|
||||
// TODO(stevvooe): Need to write out mediatype for only canonical hash
|
||||
// since we don't care about the aliases. They are generally unused except
|
||||
// for tarsum but those versions don't care about mediatype.
|
||||
|
||||
// Don't make duplicate links.
|
||||
seenDigests := make(map[digest.Digest]struct{}, len(dgsts))
|
||||
|
||||
// only use the first link
|
||||
linkPathFn := lbs.linkPathFns[0]
|
||||
|
||||
for _, dgst := range dgsts {
|
||||
if _, seen := seenDigests[dgst]; seen {
|
||||
continue
|
||||
}
|
||||
seenDigests[dgst] = struct{}{}
|
||||
|
||||
blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type linkedBlobStatter struct {
|
||||
*blobStore
|
||||
repository distribution.Repository
|
||||
|
||||
// linkPathFns specifies one or more path functions allowing one to
|
||||
// control the repository blob link set to which the blob store
|
||||
// dispatches. This is required because manifest and layer blobs have not
|
||||
// yet been fully merged. At some point, this functionality should be
|
||||
// removed an the blob links folder should be merged. The first entry is
|
||||
// treated as the "canonical" link location and will be used for writes.
|
||||
linkPathFns []linkPathFunc
|
||||
}
|
||||
|
||||
var _ distribution.BlobDescriptorService = &linkedBlobStatter{}
|
||||
|
||||
func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
var (
|
||||
found bool
|
||||
target digest.Digest
|
||||
)
|
||||
|
||||
// try the many link path functions until we get success or an error that
|
||||
// is not PathNotFoundError.
|
||||
for _, linkPathFn := range lbs.linkPathFns {
|
||||
var err error
|
||||
target, err = lbs.resolveWithLinkFunc(ctx, dgst, linkPathFn)
|
||||
|
||||
if err == nil {
|
||||
found = true
|
||||
break // success!
|
||||
}
|
||||
|
||||
switch err := err.(type) {
|
||||
case driver.PathNotFoundError:
|
||||
// do nothing, just move to the next linkPathFn
|
||||
default:
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return distribution.Descriptor{}, distribution.ErrBlobUnknown
|
||||
}
|
||||
|
||||
if target != dgst {
|
||||
// Track when we are doing cross-digest domain lookups. ie, sha512 to sha256.
|
||||
context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Look up repository local mediatype and replace that on
|
||||
// the returned descriptor.
|
||||
|
||||
return lbs.blobStore.statter.Stat(ctx, target)
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) {
|
||||
// clear any possible existence of a link described in linkPathFns
|
||||
for _, linkPathFn := range lbs.linkPathFns {
|
||||
blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = lbs.blobStore.driver.Delete(ctx, blobLinkPath)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case driver.PathNotFoundError:
|
||||
continue // just ignore this error and continue
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveTargetWithFunc allows us to read a link to a resource with different
|
||||
// linkPathFuncs to let us try a few different paths before returning not
|
||||
// found.
|
||||
func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) {
|
||||
blobLinkPath, err := linkPathFn(lbs.repository.Named().Name(), dgst)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return lbs.blobStore.readlink(ctx, blobLinkPath)
|
||||
}
|
||||
|
||||
func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
|
||||
// The canonical descriptor for a blob is set at the commit phase of upload
|
||||
return nil
|
||||
}
|
||||
|
||||
// blobLinkPath provides the path to the blob link, also known as layers.
|
||||
func blobLinkPath(name string, dgst digest.Digest) (string, error) {
|
||||
return pathFor(layerLinkPathSpec{name: name, digest: dgst})
|
||||
}
|
||||
|
||||
// manifestRevisionLinkPath provides the path to the manifest revision link.
|
||||
func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) {
|
||||
return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst})
|
||||
}
|
92
vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go
generated
vendored
92
vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go
generated
vendored
|
@ -1,92 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"encoding/json"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// manifestListHandler is a ManifestHandler that covers schema2 manifest lists.
|
||||
type manifestListHandler struct {
|
||||
repository distribution.Repository
|
||||
blobStore distribution.BlobStore
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
var _ ManifestHandler = &manifestListHandler{}
|
||||
|
||||
func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
|
||||
context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal")
|
||||
|
||||
var m manifestlist.DeserializedManifestList
|
||||
if err := json.Unmarshal(content, &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
|
||||
context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put")
|
||||
|
||||
m, ok := manifestList.(*manifestlist.DeserializedManifestList)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList)
|
||||
}
|
||||
|
||||
if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
mt, payload, err := m.Payload()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
revision, err := ms.blobStore.Put(ctx, mt, payload)
|
||||
if err != nil {
|
||||
context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return revision.Digest, nil
|
||||
}
|
||||
|
||||
// verifyManifest ensures that the manifest content is valid from the
|
||||
// perspective of the registry. As a policy, the registry only tries to
|
||||
// store valid content, leaving trust policies of that content up to
|
||||
// consumers.
|
||||
func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error {
|
||||
var errs distribution.ErrManifestVerification
|
||||
|
||||
if !skipDependencyVerification {
|
||||
// This manifest service is different from the blob service
|
||||
// returned by Blob. It uses a linked blob store to ensure that
|
||||
// only manifests are accessible.
|
||||
|
||||
manifestService, err := ms.repository.Manifests(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, manifestDescriptor := range mnfst.References() {
|
||||
exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest)
|
||||
if err != nil && err != distribution.ErrBlobUnknown {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if err != nil || !exists {
|
||||
// On error here, we always append unknown blob errors.
|
||||
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest})
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
141
vendor/github.com/docker/distribution/registry/storage/manifeststore.go
generated
vendored
141
vendor/github.com/docker/distribution/registry/storage/manifeststore.go
generated
vendored
|
@ -1,141 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"encoding/json"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/manifestlist"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// A ManifestHandler gets and puts manifests of a particular type.
|
||||
type ManifestHandler interface {
|
||||
// Unmarshal unmarshals the manifest from a byte slice.
|
||||
Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error)
|
||||
|
||||
// Put creates or updates the given manifest returning the manifest digest.
|
||||
Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error)
|
||||
}
|
||||
|
||||
// SkipLayerVerification allows a manifest to be Put before its
|
||||
// layers are on the filesystem
|
||||
func SkipLayerVerification() distribution.ManifestServiceOption {
|
||||
return skipLayerOption{}
|
||||
}
|
||||
|
||||
type skipLayerOption struct{}
|
||||
|
||||
func (o skipLayerOption) Apply(m distribution.ManifestService) error {
|
||||
if ms, ok := m.(*manifestStore); ok {
|
||||
ms.skipDependencyVerification = true
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("skip layer verification only valid for manifestStore")
|
||||
}
|
||||
|
||||
type manifestStore struct {
|
||||
repository *repository
|
||||
blobStore *linkedBlobStore
|
||||
ctx context.Context
|
||||
|
||||
skipDependencyVerification bool
|
||||
|
||||
schema1Handler ManifestHandler
|
||||
schema2Handler ManifestHandler
|
||||
manifestListHandler ManifestHandler
|
||||
}
|
||||
|
||||
var _ distribution.ManifestService = &manifestStore{}
|
||||
|
||||
func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
|
||||
context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists")
|
||||
|
||||
_, err := ms.blobStore.Stat(ms.ctx, dgst)
|
||||
if err != nil {
|
||||
if err == distribution.ErrBlobUnknown {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
|
||||
context.GetLogger(ms.ctx).Debug("(*manifestStore).Get")
|
||||
|
||||
// TODO(stevvooe): Need to check descriptor from above to ensure that the
|
||||
// mediatype is as we expect for the manifest store.
|
||||
|
||||
content, err := ms.blobStore.Get(ctx, dgst)
|
||||
if err != nil {
|
||||
if err == distribution.ErrBlobUnknown {
|
||||
return nil, distribution.ErrManifestUnknownRevision{
|
||||
Name: ms.repository.Named().Name(),
|
||||
Revision: dgst,
|
||||
}
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var versioned manifest.Versioned
|
||||
if err = json.Unmarshal(content, &versioned); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch versioned.SchemaVersion {
|
||||
case 1:
|
||||
return ms.schema1Handler.Unmarshal(ctx, dgst, content)
|
||||
case 2:
|
||||
// This can be an image manifest or a manifest list
|
||||
switch versioned.MediaType {
|
||||
case schema2.MediaTypeManifest:
|
||||
return ms.schema2Handler.Unmarshal(ctx, dgst, content)
|
||||
case manifestlist.MediaTypeManifestList:
|
||||
return ms.manifestListHandler.Unmarshal(ctx, dgst, content)
|
||||
default:
|
||||
return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion)
|
||||
}
|
||||
|
||||
func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
|
||||
context.GetLogger(ms.ctx).Debug("(*manifestStore).Put")
|
||||
|
||||
switch manifest.(type) {
|
||||
case *schema1.SignedManifest:
|
||||
return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification)
|
||||
case *schema2.DeserializedManifest:
|
||||
return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification)
|
||||
case *manifestlist.DeserializedManifestList:
|
||||
return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification)
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unrecognized manifest type %T", manifest)
|
||||
}
|
||||
|
||||
// Delete removes the revision of the specified manifest.
|
||||
func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete")
|
||||
return ms.blobStore.Delete(ctx, dgst)
|
||||
}
|
||||
|
||||
func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error {
|
||||
err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error {
|
||||
err := ingester(dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
490
vendor/github.com/docker/distribution/registry/storage/paths.go
generated
vendored
490
vendor/github.com/docker/distribution/registry/storage/paths.go
generated
vendored
|
@ -1,490 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
storagePathVersion = "v2" // fixed storage layout version
|
||||
storagePathRoot = "/docker/registry/" // all driver paths have a prefix
|
||||
|
||||
// TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though
|
||||
// storage path root would configurable for all drivers through this
|
||||
// package. In reality, we've found it simpler to do this on a per driver
|
||||
// basis.
|
||||
)
|
||||
|
||||
// pathFor maps paths based on "object names" and their ids. The "object
|
||||
// names" mapped by are internal to the storage system.
|
||||
//
|
||||
// The path layout in the storage backend is roughly as follows:
|
||||
//
|
||||
// <root>/v2
|
||||
// -> repositories/
|
||||
// -><name>/
|
||||
// -> _manifests/
|
||||
// revisions
|
||||
// -> <manifest digest path>
|
||||
// -> link
|
||||
// tags/<tag>
|
||||
// -> current/link
|
||||
// -> index
|
||||
// -> <algorithm>/<hex digest>/link
|
||||
// -> _layers/
|
||||
// <layer links to blob store>
|
||||
// -> _uploads/<id>
|
||||
// data
|
||||
// startedat
|
||||
// hashstates/<algorithm>/<offset>
|
||||
// -> blob/<algorithm>
|
||||
// <split directory content addressable storage>
|
||||
//
|
||||
// The storage backend layout is broken up into a content-addressable blob
|
||||
// store and repositories. The content-addressable blob store holds most data
|
||||
// throughout the backend, keyed by algorithm and digests of the underlying
|
||||
// content. Access to the blob store is controlled through links from the
|
||||
// repository to blobstore.
|
||||
//
|
||||
// A repository is made up of layers, manifests and tags. The layers component
|
||||
// is just a directory of layers which are "linked" into a repository. A layer
|
||||
// can only be accessed through a qualified repository name if it is linked in
|
||||
// the repository. Uploads of layers are managed in the uploads directory,
|
||||
// which is key by upload id. When all data for an upload is received, the
|
||||
// data is moved into the blob store and the upload directory is deleted.
|
||||
// Abandoned uploads can be garbage collected by reading the startedat file
|
||||
// and removing uploads that have been active for longer than a certain time.
|
||||
//
|
||||
// The third component of the repository directory is the manifests store,
|
||||
// which is made up of a revision store and tag store. Manifests are stored in
|
||||
// the blob store and linked into the revision store.
|
||||
// While the registry can save all revisions of a manifest, no relationship is
|
||||
// implied as to the ordering of changes to a manifest. The tag store provides
|
||||
// support for name, tag lookups of manifests, using "current/link" under a
|
||||
// named tag directory. An index is maintained to support deletions of all
|
||||
// revisions of a given manifest tag.
|
||||
//
|
||||
// We cover the path formats implemented by this path mapper below.
|
||||
//
|
||||
// Manifests:
|
||||
//
|
||||
// manifestRevisionsPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/
|
||||
// manifestRevisionPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/
|
||||
// manifestRevisionLinkPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link
|
||||
//
|
||||
// Tags:
|
||||
//
|
||||
// manifestTagsPathSpec: <root>/v2/repositories/<name>/_manifests/tags/
|
||||
// manifestTagPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/
|
||||
// manifestTagCurrentPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/current/link
|
||||
// manifestTagIndexPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/
|
||||
// manifestTagIndexEntryPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/
|
||||
// manifestTagIndexEntryLinkPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/link
|
||||
//
|
||||
// Blobs:
|
||||
//
|
||||
// layerLinkPathSpec: <root>/v2/repositories/<name>/_layers/<algorithm>/<hex digest>/link
|
||||
//
|
||||
// Uploads:
|
||||
//
|
||||
// uploadDataPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/data
|
||||
// uploadStartedAtPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/startedat
|
||||
// uploadHashStatePathSpec: <root>/v2/repositories/<name>/_uploads/<id>/hashstates/<algorithm>/<offset>
|
||||
//
|
||||
// Blob Store:
|
||||
//
|
||||
// blobsPathSpec: <root>/v2/blobs/
|
||||
// blobPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
|
||||
// blobDataPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
|
||||
// blobMediaTypePathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
|
||||
//
|
||||
// For more information on the semantic meaning of each path and their
|
||||
// contents, please see the path spec documentation.
|
||||
func pathFor(spec pathSpec) (string, error) {
|
||||
|
||||
// Switch on the path object type and return the appropriate path. At
|
||||
// first glance, one may wonder why we don't use an interface to
|
||||
// accomplish this. By keep the formatting separate from the pathSpec, we
|
||||
// keep separate the path generation componentized. These specs could be
|
||||
// passed to a completely different mapper implementation and generate a
|
||||
// different set of paths.
|
||||
//
|
||||
// For example, imagine migrating from one backend to the other: one could
|
||||
// build a filesystem walker that converts a string path in one version,
|
||||
// to an intermediate path object, than can be consumed and mapped by the
|
||||
// other version.
|
||||
|
||||
rootPrefix := []string{storagePathRoot, storagePathVersion}
|
||||
repoPrefix := append(rootPrefix, "repositories")
|
||||
|
||||
switch v := spec.(type) {
|
||||
|
||||
case manifestRevisionsPathSpec:
|
||||
return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil
|
||||
|
||||
case manifestRevisionPathSpec:
|
||||
components, err := digestPathComponents(v.revision, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil
|
||||
case manifestRevisionLinkPathSpec:
|
||||
root, err := pathFor(manifestRevisionPathSpec{
|
||||
name: v.name,
|
||||
revision: v.revision,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, "link"), nil
|
||||
case manifestTagsPathSpec:
|
||||
return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil
|
||||
case manifestTagPathSpec:
|
||||
root, err := pathFor(manifestTagsPathSpec{
|
||||
name: v.name,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, v.tag), nil
|
||||
case manifestTagCurrentPathSpec:
|
||||
root, err := pathFor(manifestTagPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, "current", "link"), nil
|
||||
case manifestTagIndexPathSpec:
|
||||
root, err := pathFor(manifestTagPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, "index"), nil
|
||||
case manifestTagIndexEntryLinkPathSpec:
|
||||
root, err := pathFor(manifestTagIndexEntryPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
revision: v.revision,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, "link"), nil
|
||||
case manifestTagIndexEntryPathSpec:
|
||||
root, err := pathFor(manifestTagIndexPathSpec{
|
||||
name: v.name,
|
||||
tag: v.tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
components, err := digestPathComponents(v.revision, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return path.Join(root, path.Join(components...)), nil
|
||||
case layerLinkPathSpec:
|
||||
components, err := digestPathComponents(v.digest, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Right now, all blobs are linked under "_layers". If
|
||||
// we have future migrations, we may want to rename this to "_blobs".
|
||||
// A migration strategy would simply leave existing items in place and
|
||||
// write the new paths, commit a file then delete the old files.
|
||||
|
||||
blobLinkPathComponents := append(repoPrefix, v.name, "_layers")
|
||||
|
||||
return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil
|
||||
case blobsPathSpec:
|
||||
blobsPathPrefix := append(rootPrefix, "blobs")
|
||||
return path.Join(blobsPathPrefix...), nil
|
||||
case blobPathSpec:
|
||||
components, err := digestPathComponents(v.digest, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
blobPathPrefix := append(rootPrefix, "blobs")
|
||||
return path.Join(append(blobPathPrefix, components...)...), nil
|
||||
case blobDataPathSpec:
|
||||
components, err := digestPathComponents(v.digest, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
components = append(components, "data")
|
||||
blobPathPrefix := append(rootPrefix, "blobs")
|
||||
return path.Join(append(blobPathPrefix, components...)...), nil
|
||||
|
||||
case uploadDataPathSpec:
|
||||
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil
|
||||
case uploadStartedAtPathSpec:
|
||||
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil
|
||||
case uploadHashStatePathSpec:
|
||||
offset := fmt.Sprintf("%d", v.offset)
|
||||
if v.list {
|
||||
offset = "" // Limit to the prefix for listing offsets.
|
||||
}
|
||||
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil
|
||||
case repositoriesRootPathSpec:
|
||||
return path.Join(repoPrefix...), nil
|
||||
default:
|
||||
// TODO(sday): This is an internal error. Ensure it doesn't escape (panic?).
|
||||
return "", fmt.Errorf("unknown path spec: %#v", v)
|
||||
}
|
||||
}
|
||||
|
||||
// pathSpec is a type to mark structs as path specs. There is no
|
||||
// implementation because we'd like to keep the specs and the mappers
|
||||
// decoupled.
|
||||
type pathSpec interface {
|
||||
pathSpec()
|
||||
}
|
||||
|
||||
// manifestRevisionsPathSpec describes the directory path for
|
||||
// a manifest revision.
|
||||
type manifestRevisionsPathSpec struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (manifestRevisionsPathSpec) pathSpec() {}
|
||||
|
||||
// manifestRevisionPathSpec describes the components of the directory path for
|
||||
// a manifest revision.
|
||||
type manifestRevisionPathSpec struct {
|
||||
name string
|
||||
revision digest.Digest
|
||||
}
|
||||
|
||||
func (manifestRevisionPathSpec) pathSpec() {}
|
||||
|
||||
// manifestRevisionLinkPathSpec describes the path components required to look
|
||||
// up the data link for a revision of a manifest. If this file is not present,
|
||||
// the manifest blob is not available in the given repo. The contents of this
|
||||
// file should just be the digest.
|
||||
type manifestRevisionLinkPathSpec struct {
|
||||
name string
|
||||
revision digest.Digest
|
||||
}
|
||||
|
||||
func (manifestRevisionLinkPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagsPathSpec describes the path elements required to point to the
|
||||
// manifest tags directory.
|
||||
type manifestTagsPathSpec struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (manifestTagsPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagPathSpec describes the path elements required to point to the
|
||||
// manifest tag links files under a repository. These contain a blob id that
|
||||
// can be used to look up the data and signatures.
|
||||
type manifestTagPathSpec struct {
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
func (manifestTagPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagCurrentPathSpec describes the link to the current revision for a
|
||||
// given tag.
|
||||
type manifestTagCurrentPathSpec struct {
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
func (manifestTagCurrentPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagCurrentPathSpec describes the link to the index of revisions
|
||||
// with the given tag.
|
||||
type manifestTagIndexPathSpec struct {
|
||||
name string
|
||||
tag string
|
||||
}
|
||||
|
||||
func (manifestTagIndexPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagIndexEntryPathSpec contains the entries of the index by revision.
|
||||
type manifestTagIndexEntryPathSpec struct {
|
||||
name string
|
||||
tag string
|
||||
revision digest.Digest
|
||||
}
|
||||
|
||||
func (manifestTagIndexEntryPathSpec) pathSpec() {}
|
||||
|
||||
// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a
|
||||
// manifest with given tag within the index.
|
||||
type manifestTagIndexEntryLinkPathSpec struct {
|
||||
name string
|
||||
tag string
|
||||
revision digest.Digest
|
||||
}
|
||||
|
||||
func (manifestTagIndexEntryLinkPathSpec) pathSpec() {}
|
||||
|
||||
// blobLinkPathSpec specifies a path for a blob link, which is a file with a
|
||||
// blob id. The blob link will contain a content addressable blob id reference
|
||||
// into the blob store. The format of the contents is as follows:
|
||||
//
|
||||
// <algorithm>:<hex digest of layer data>
|
||||
//
|
||||
// The following example of the file contents is more illustrative:
|
||||
//
|
||||
// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36
|
||||
//
|
||||
// This indicates that there is a blob with the id/digest, calculated via
|
||||
// sha256 that can be fetched from the blob store.
|
||||
type layerLinkPathSpec struct {
|
||||
name string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (layerLinkPathSpec) pathSpec() {}
|
||||
|
||||
// blobAlgorithmReplacer does some very simple path sanitization for user
|
||||
// input. Paths should be "safe" before getting this far due to strict digest
|
||||
// requirements but we can add further path conversion here, if needed.
|
||||
var blobAlgorithmReplacer = strings.NewReplacer(
|
||||
"+", "/",
|
||||
".", "/",
|
||||
";", "/",
|
||||
)
|
||||
|
||||
// blobsPathSpec contains the path for the blobs directory
|
||||
type blobsPathSpec struct{}
|
||||
|
||||
func (blobsPathSpec) pathSpec() {}
|
||||
|
||||
// blobPathSpec contains the path for the registry global blob store.
|
||||
type blobPathSpec struct {
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (blobPathSpec) pathSpec() {}
|
||||
|
||||
// blobDataPathSpec contains the path for the registry global blob store. For
|
||||
// now, this contains layer data, exclusively.
|
||||
type blobDataPathSpec struct {
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (blobDataPathSpec) pathSpec() {}
|
||||
|
||||
// uploadDataPathSpec defines the path parameters of the data file for
|
||||
// uploads.
|
||||
type uploadDataPathSpec struct {
|
||||
name string
|
||||
id string
|
||||
}
|
||||
|
||||
func (uploadDataPathSpec) pathSpec() {}
|
||||
|
||||
// uploadDataPathSpec defines the path parameters for the file that stores the
|
||||
// start time of an uploads. If it is missing, the upload is considered
|
||||
// unknown. Admittedly, the presence of this file is an ugly hack to make sure
|
||||
// we have a way to cleanup old or stalled uploads that doesn't rely on driver
|
||||
// FileInfo behavior. If we come up with a more clever way to do this, we
|
||||
// should remove this file immediately and rely on the startetAt field from
|
||||
// the client to enforce time out policies.
|
||||
type uploadStartedAtPathSpec struct {
|
||||
name string
|
||||
id string
|
||||
}
|
||||
|
||||
func (uploadStartedAtPathSpec) pathSpec() {}
|
||||
|
||||
// uploadHashStatePathSpec defines the path parameters for the file that stores
|
||||
// the hash function state of an upload at a specific byte offset. If `list` is
|
||||
// set, then the path mapper will generate a list prefix for all hash state
|
||||
// offsets for the upload identified by the name, id, and alg.
|
||||
type uploadHashStatePathSpec struct {
|
||||
name string
|
||||
id string
|
||||
alg digest.Algorithm
|
||||
offset int64
|
||||
list bool
|
||||
}
|
||||
|
||||
func (uploadHashStatePathSpec) pathSpec() {}
|
||||
|
||||
// repositoriesRootPathSpec returns the root of repositories
|
||||
type repositoriesRootPathSpec struct {
|
||||
}
|
||||
|
||||
func (repositoriesRootPathSpec) pathSpec() {}
|
||||
|
||||
// digestPathComponents provides a consistent path breakdown for a given
|
||||
// digest. For a generic digest, it will be as follows:
|
||||
//
|
||||
// <algorithm>/<hex digest>
|
||||
//
|
||||
// If multilevel is true, the first two bytes of the digest will separate
|
||||
// groups of digest folder. It will be as follows:
|
||||
//
|
||||
// <algorithm>/<first two bytes of digest>/<full digest>
|
||||
//
|
||||
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm()))
|
||||
hex := dgst.Hex()
|
||||
prefix := []string{algorithm}
|
||||
|
||||
var suffix []string
|
||||
|
||||
if multilevel {
|
||||
suffix = append(suffix, hex[:2])
|
||||
}
|
||||
|
||||
suffix = append(suffix, hex)
|
||||
|
||||
return append(prefix, suffix...), nil
|
||||
}
|
||||
|
||||
// Reconstructs a digest from a path
|
||||
func digestFromPath(digestPath string) (digest.Digest, error) {
|
||||
|
||||
digestPath = strings.TrimSuffix(digestPath, "/data")
|
||||
dir, hex := path.Split(digestPath)
|
||||
dir = path.Dir(dir)
|
||||
dir, next := path.Split(dir)
|
||||
|
||||
// next is either the algorithm OR the first two characters in the hex string
|
||||
var algo string
|
||||
if next == hex[:2] {
|
||||
algo = path.Base(dir)
|
||||
} else {
|
||||
algo = next
|
||||
}
|
||||
|
||||
dgst := digest.NewDigestFromHex(algo, hex)
|
||||
return dgst, dgst.Validate()
|
||||
}
|
139
vendor/github.com/docker/distribution/registry/storage/purgeuploads.go
generated
vendored
139
vendor/github.com/docker/distribution/registry/storage/purgeuploads.go
generated
vendored
|
@ -1,139 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
storageDriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/distribution/uuid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// uploadData stored the location of temporary files created during a layer upload
|
||||
// along with the date the upload was started
|
||||
type uploadData struct {
|
||||
containingDir string
|
||||
startedAt time.Time
|
||||
}
|
||||
|
||||
func newUploadData() uploadData {
|
||||
return uploadData{
|
||||
containingDir: "",
|
||||
// default to far in future to protect against missing startedat
|
||||
startedAt: time.Now().Add(time.Duration(10000 * time.Hour)),
|
||||
}
|
||||
}
|
||||
|
||||
// PurgeUploads deletes files from the upload directory
|
||||
// created before olderThan. The list of files deleted and errors
|
||||
// encountered are returned
|
||||
func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) {
|
||||
log.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete)
|
||||
uploadData, errors := getOutstandingUploads(ctx, driver)
|
||||
var deleted []string
|
||||
for _, uploadData := range uploadData {
|
||||
if uploadData.startedAt.Before(olderThan) {
|
||||
var err error
|
||||
log.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.",
|
||||
uploadData.containingDir, uploadData.startedAt, olderThan)
|
||||
if actuallyDelete {
|
||||
err = driver.Delete(ctx, uploadData.containingDir)
|
||||
}
|
||||
if err == nil {
|
||||
deleted = append(deleted, uploadData.containingDir)
|
||||
} else {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors))
|
||||
return deleted, errors
|
||||
}
|
||||
|
||||
// getOutstandingUploads walks the upload directory, collecting files
|
||||
// which could be eligible for deletion. The only reliable way to
|
||||
// classify the age of a file is with the date stored in the startedAt
|
||||
// file, so gather files by UUID with a date from startedAt.
|
||||
func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) {
|
||||
var errors []error
|
||||
uploads := make(map[string]uploadData, 0)
|
||||
|
||||
inUploadDir := false
|
||||
root, err := pathFor(repositoriesRootPathSpec{})
|
||||
if err != nil {
|
||||
return uploads, append(errors, err)
|
||||
}
|
||||
|
||||
err = Walk(ctx, driver, root, func(fileInfo storageDriver.FileInfo) error {
|
||||
filePath := fileInfo.Path()
|
||||
_, file := path.Split(filePath)
|
||||
if file[0] == '_' {
|
||||
// Reserved directory
|
||||
inUploadDir = (file == "_uploads")
|
||||
|
||||
if fileInfo.IsDir() && !inUploadDir {
|
||||
return ErrSkipDir
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
uuid, isContainingDir := uuidFromPath(filePath)
|
||||
if uuid == "" {
|
||||
// Cannot reliably delete
|
||||
return nil
|
||||
}
|
||||
ud, ok := uploads[uuid]
|
||||
if !ok {
|
||||
ud = newUploadData()
|
||||
}
|
||||
if isContainingDir {
|
||||
ud.containingDir = filePath
|
||||
}
|
||||
if file == "startedat" {
|
||||
if t, err := readStartedAtFile(driver, filePath); err == nil {
|
||||
ud.startedAt = t
|
||||
} else {
|
||||
errors = pushError(errors, filePath, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
uploads[uuid] = ud
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
errors = pushError(errors, root, err)
|
||||
}
|
||||
return uploads, errors
|
||||
}
|
||||
|
||||
// uuidFromPath extracts the upload UUID from a given path
|
||||
// If the UUID is the last path component, this is the containing
|
||||
// directory for all upload files
|
||||
func uuidFromPath(path string) (string, bool) {
|
||||
components := strings.Split(path, "/")
|
||||
for i := len(components) - 1; i >= 0; i-- {
|
||||
if u, err := uuid.Parse(components[i]); err == nil {
|
||||
return u.String(), i == len(components)-1
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// readStartedAtFile reads the date from an upload's startedAtFile
|
||||
func readStartedAtFile(driver storageDriver.StorageDriver, path string) (time.Time, error) {
|
||||
// todo:(richardscothern) - pass in a context
|
||||
startedAtBytes, err := driver.GetContent(context.Background(), path)
|
||||
if err != nil {
|
||||
return time.Now(), err
|
||||
}
|
||||
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
|
||||
if err != nil {
|
||||
return time.Now(), err
|
||||
}
|
||||
return startedAt, nil
|
||||
}
|
306
vendor/github.com/docker/distribution/registry/storage/registry.go
generated
vendored
306
vendor/github.com/docker/distribution/registry/storage/registry.go
generated
vendored
|
@ -1,306 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/distribution/registry/storage/cache"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/docker/libtrust"
|
||||
)
|
||||
|
||||
// registry is the top-level implementation of Registry for use in the storage
|
||||
// package. All instances should descend from this object.
|
||||
type registry struct {
|
||||
blobStore *blobStore
|
||||
blobServer *blobServer
|
||||
statter *blobStatter // global statter service.
|
||||
blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider
|
||||
deleteEnabled bool
|
||||
resumableDigestEnabled bool
|
||||
schema1SigningKey libtrust.PrivateKey
|
||||
blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory
|
||||
manifestURLs manifestURLs
|
||||
}
|
||||
|
||||
// manifestURLs holds regular expressions for controlling manifest URL whitelisting
|
||||
type manifestURLs struct {
|
||||
allow *regexp.Regexp
|
||||
deny *regexp.Regexp
|
||||
}
|
||||
|
||||
// RegistryOption is the type used for functional options for NewRegistry.
|
||||
type RegistryOption func(*registry) error
|
||||
|
||||
// EnableRedirect is a functional option for NewRegistry. It causes the backend
|
||||
// blob server to attempt using (StorageDriver).URLFor to serve all blobs.
|
||||
func EnableRedirect(registry *registry) error {
|
||||
registry.blobServer.redirect = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableDelete is a functional option for NewRegistry. It enables deletion on
|
||||
// the registry.
|
||||
func EnableDelete(registry *registry) error {
|
||||
registry.deleteEnabled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisableDigestResumption is a functional option for NewRegistry. It should be
|
||||
// used if the registry is acting as a caching proxy.
|
||||
func DisableDigestResumption(registry *registry) error {
|
||||
registry.resumableDigestEnabled = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// ManifestURLsAllowRegexp is a functional option for NewRegistry.
|
||||
func ManifestURLsAllowRegexp(r *regexp.Regexp) RegistryOption {
|
||||
return func(registry *registry) error {
|
||||
registry.manifestURLs.allow = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ManifestURLsDenyRegexp is a functional option for NewRegistry.
|
||||
func ManifestURLsDenyRegexp(r *regexp.Regexp) RegistryOption {
|
||||
return func(registry *registry) error {
|
||||
registry.manifestURLs.deny = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Schema1SigningKey returns a functional option for NewRegistry. It sets the
|
||||
// key for signing all schema1 manifests.
|
||||
func Schema1SigningKey(key libtrust.PrivateKey) RegistryOption {
|
||||
return func(registry *registry) error {
|
||||
registry.schema1SigningKey = key
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the
|
||||
// factory to create BlobDescriptorServiceFactory middleware.
|
||||
func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption {
|
||||
return func(registry *registry) error {
|
||||
registry.blobDescriptorServiceFactory = factory
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// BlobDescriptorCacheProvider returns a functional option for
|
||||
// NewRegistry. It creates a cached blob statter for use by the
|
||||
// registry.
|
||||
func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption {
|
||||
// TODO(aaronl): The duplication of statter across several objects is
|
||||
// ugly, and prevents us from using interface types in the registry
|
||||
// struct. Ideally, blobStore and blobServer should be lazily
|
||||
// initialized, and use the current value of
|
||||
// blobDescriptorCacheProvider.
|
||||
return func(registry *registry) error {
|
||||
if blobDescriptorCacheProvider != nil {
|
||||
statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter)
|
||||
registry.blobStore.statter = statter
|
||||
registry.blobServer.statter = statter
|
||||
registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewRegistry creates a new registry instance from the provided driver. The
|
||||
// resulting registry may be shared by multiple goroutines but is cheap to
|
||||
// allocate. If the Redirect option is specified, the backend blob server will
|
||||
// attempt to use (StorageDriver).URLFor to serve all blobs.
|
||||
func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) {
|
||||
// create global statter
|
||||
statter := &blobStatter{
|
||||
driver: driver,
|
||||
}
|
||||
|
||||
bs := &blobStore{
|
||||
driver: driver,
|
||||
statter: statter,
|
||||
}
|
||||
|
||||
registry := ®istry{
|
||||
blobStore: bs,
|
||||
blobServer: &blobServer{
|
||||
driver: driver,
|
||||
statter: statter,
|
||||
pathFn: bs.path,
|
||||
},
|
||||
statter: statter,
|
||||
resumableDigestEnabled: true,
|
||||
}
|
||||
|
||||
for _, option := range options {
|
||||
if err := option(registry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return registry, nil
|
||||
}
|
||||
|
||||
// Scope returns the namespace scope for a registry. The registry
|
||||
// will only serve repositories contained within this scope.
|
||||
func (reg *registry) Scope() distribution.Scope {
|
||||
return distribution.GlobalScope
|
||||
}
|
||||
|
||||
// Repository returns an instance of the repository tied to the registry.
|
||||
// Instances should not be shared between goroutines but are cheap to
|
||||
// allocate. In general, they should be request scoped.
|
||||
func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) {
|
||||
var descriptorCache distribution.BlobDescriptorService
|
||||
if reg.blobDescriptorCacheProvider != nil {
|
||||
var err error
|
||||
descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &repository{
|
||||
ctx: ctx,
|
||||
registry: reg,
|
||||
name: canonicalName,
|
||||
descriptorCache: descriptorCache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (reg *registry) Blobs() distribution.BlobEnumerator {
|
||||
return reg.blobStore
|
||||
}
|
||||
|
||||
func (reg *registry) BlobStatter() distribution.BlobStatter {
|
||||
return reg.statter
|
||||
}
|
||||
|
||||
// repository provides name-scoped access to various services.
|
||||
type repository struct {
|
||||
*registry
|
||||
ctx context.Context
|
||||
name reference.Named
|
||||
descriptorCache distribution.BlobDescriptorService
|
||||
}
|
||||
|
||||
// Name returns the name of the repository.
|
||||
func (repo *repository) Named() reference.Named {
|
||||
return repo.name
|
||||
}
|
||||
|
||||
func (repo *repository) Tags(ctx context.Context) distribution.TagService {
|
||||
tags := &tagStore{
|
||||
repository: repo,
|
||||
blobStore: repo.registry.blobStore,
|
||||
}
|
||||
|
||||
return tags
|
||||
}
|
||||
|
||||
// Manifests returns an instance of ManifestService. Instantiation is cheap and
|
||||
// may be context sensitive in the future. The instance should be used similar
|
||||
// to a request local.
|
||||
func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
|
||||
manifestLinkPathFns := []linkPathFunc{
|
||||
// NOTE(stevvooe): Need to search through multiple locations since
|
||||
// 2.1.0 unintentionally linked into _layers.
|
||||
manifestRevisionLinkPath,
|
||||
blobLinkPath,
|
||||
}
|
||||
|
||||
manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()}
|
||||
|
||||
var statter distribution.BlobDescriptorService = &linkedBlobStatter{
|
||||
blobStore: repo.blobStore,
|
||||
repository: repo,
|
||||
linkPathFns: manifestLinkPathFns,
|
||||
}
|
||||
|
||||
if repo.registry.blobDescriptorServiceFactory != nil {
|
||||
statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
|
||||
}
|
||||
|
||||
blobStore := &linkedBlobStore{
|
||||
ctx: ctx,
|
||||
blobStore: repo.blobStore,
|
||||
repository: repo,
|
||||
deleteEnabled: repo.registry.deleteEnabled,
|
||||
blobAccessController: statter,
|
||||
|
||||
// TODO(stevvooe): linkPath limits this blob store to only
|
||||
// manifests. This instance cannot be used for blob checks.
|
||||
linkPathFns: manifestLinkPathFns,
|
||||
linkDirectoryPathSpec: manifestDirectoryPathSpec,
|
||||
}
|
||||
|
||||
ms := &manifestStore{
|
||||
ctx: ctx,
|
||||
repository: repo,
|
||||
blobStore: blobStore,
|
||||
schema1Handler: &signedManifestHandler{
|
||||
ctx: ctx,
|
||||
schema1SigningKey: repo.schema1SigningKey,
|
||||
repository: repo,
|
||||
blobStore: blobStore,
|
||||
},
|
||||
schema2Handler: &schema2ManifestHandler{
|
||||
ctx: ctx,
|
||||
repository: repo,
|
||||
blobStore: blobStore,
|
||||
manifestURLs: repo.registry.manifestURLs,
|
||||
},
|
||||
manifestListHandler: &manifestListHandler{
|
||||
ctx: ctx,
|
||||
repository: repo,
|
||||
blobStore: blobStore,
|
||||
},
|
||||
}
|
||||
|
||||
// Apply options
|
||||
for _, option := range options {
|
||||
err := option.Apply(ms)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ms, nil
|
||||
}
|
||||
|
||||
// Blobs returns an instance of the BlobStore. Instantiation is cheap and
|
||||
// may be context sensitive in the future. The instance should be used similar
|
||||
// to a request local.
|
||||
func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore {
|
||||
var statter distribution.BlobDescriptorService = &linkedBlobStatter{
|
||||
blobStore: repo.blobStore,
|
||||
repository: repo,
|
||||
linkPathFns: []linkPathFunc{blobLinkPath},
|
||||
}
|
||||
|
||||
if repo.descriptorCache != nil {
|
||||
statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter)
|
||||
}
|
||||
|
||||
if repo.registry.blobDescriptorServiceFactory != nil {
|
||||
statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
|
||||
}
|
||||
|
||||
return &linkedBlobStore{
|
||||
registry: repo.registry,
|
||||
blobStore: repo.blobStore,
|
||||
blobServer: repo.blobServer,
|
||||
blobAccessController: statter,
|
||||
repository: repo,
|
||||
ctx: ctx,
|
||||
|
||||
// TODO(stevvooe): linkPath limits this blob store to only layers.
|
||||
// This instance cannot be used for manifest checks.
|
||||
linkPathFns: []linkPathFunc{blobLinkPath},
|
||||
deleteEnabled: repo.registry.deleteEnabled,
|
||||
resumableDigestEnabled: repo.resumableDigestEnabled,
|
||||
}
|
||||
}
|
136
vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go
generated
vendored
136
vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go
generated
vendored
|
@ -1,136 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
errUnexpectedURL = errors.New("unexpected URL on layer")
|
||||
errMissingURL = errors.New("missing URL on layer")
|
||||
errInvalidURL = errors.New("invalid URL on layer")
|
||||
)
|
||||
|
||||
//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
|
||||
type schema2ManifestHandler struct {
|
||||
repository distribution.Repository
|
||||
blobStore distribution.BlobStore
|
||||
ctx context.Context
|
||||
manifestURLs manifestURLs
|
||||
}
|
||||
|
||||
var _ ManifestHandler = &schema2ManifestHandler{}
|
||||
|
||||
func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
|
||||
context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal")
|
||||
|
||||
var m schema2.DeserializedManifest
|
||||
if err := json.Unmarshal(content, &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
|
||||
context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put")
|
||||
|
||||
m, ok := manifest.(*schema2.DeserializedManifest)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest)
|
||||
}
|
||||
|
||||
if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
mt, payload, err := m.Payload()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
revision, err := ms.blobStore.Put(ctx, mt, payload)
|
||||
if err != nil {
|
||||
context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return revision.Digest, nil
|
||||
}
|
||||
|
||||
// verifyManifest ensures that the manifest content is valid from the
|
||||
// perspective of the registry. As a policy, the registry only tries to store
|
||||
// valid content, leaving trust policies of that content up to consumers.
|
||||
func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error {
|
||||
var errs distribution.ErrManifestVerification
|
||||
|
||||
if skipDependencyVerification {
|
||||
return nil
|
||||
}
|
||||
|
||||
manifestService, err := ms.repository.Manifests(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobsService := ms.repository.Blobs(ctx)
|
||||
|
||||
for _, descriptor := range mnfst.References() {
|
||||
var err error
|
||||
|
||||
switch descriptor.MediaType {
|
||||
case schema2.MediaTypeForeignLayer:
|
||||
// Clients download this layer from an external URL, so do not check for
|
||||
// its presense.
|
||||
if len(descriptor.URLs) == 0 {
|
||||
err = errMissingURL
|
||||
}
|
||||
allow := ms.manifestURLs.allow
|
||||
deny := ms.manifestURLs.deny
|
||||
for _, u := range descriptor.URLs {
|
||||
var pu *url.URL
|
||||
pu, err = url.Parse(u)
|
||||
if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) {
|
||||
err = errInvalidURL
|
||||
break
|
||||
}
|
||||
}
|
||||
case schema2.MediaTypeManifest, schema1.MediaTypeManifest:
|
||||
var exists bool
|
||||
exists, err = manifestService.Exists(ctx, descriptor.Digest)
|
||||
if err != nil || !exists {
|
||||
err = distribution.ErrBlobUnknown // just coerce to unknown.
|
||||
}
|
||||
|
||||
fallthrough // double check the blob store.
|
||||
default:
|
||||
// forward all else to blob storage
|
||||
if len(descriptor.URLs) == 0 {
|
||||
_, err = blobsService.Stat(ctx, descriptor.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err != distribution.ErrBlobUnknown {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// On error here, we always append unknown blob errors.
|
||||
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest})
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) != 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
141
vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go
generated
vendored
141
vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go
generated
vendored
|
@ -1,141 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It
|
||||
// can unmarshal and put schema1 manifests that have been signed by libtrust.
|
||||
type signedManifestHandler struct {
|
||||
repository distribution.Repository
|
||||
schema1SigningKey libtrust.PrivateKey
|
||||
blobStore distribution.BlobStore
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
var _ ManifestHandler = &signedManifestHandler{}
|
||||
|
||||
func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
|
||||
context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal")
|
||||
|
||||
var (
|
||||
signatures [][]byte
|
||||
err error
|
||||
)
|
||||
|
||||
jsig, err := libtrust.NewJSONSignature(content, signatures...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ms.schema1SigningKey != nil {
|
||||
if err := jsig.Sign(ms.schema1SigningKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the pretty JWS
|
||||
raw, err := jsig.PrettySignature("signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sm schema1.SignedManifest
|
||||
if err := json.Unmarshal(raw, &sm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &sm, nil
|
||||
}
|
||||
|
||||
func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
|
||||
context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put")
|
||||
|
||||
sm, ok := manifest.(*schema1.SignedManifest)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest)
|
||||
}
|
||||
|
||||
if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
mt := schema1.MediaTypeManifest
|
||||
payload := sm.Canonical
|
||||
|
||||
revision, err := ms.blobStore.Put(ctx, mt, payload)
|
||||
if err != nil {
|
||||
context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return revision.Digest, nil
|
||||
}
|
||||
|
||||
// verifyManifest ensures that the manifest content is valid from the
|
||||
// perspective of the registry. It ensures that the signature is valid for the
|
||||
// enclosed payload. As a policy, the registry only tries to store valid
|
||||
// content, leaving trust policies of that content up to consumers.
|
||||
func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error {
|
||||
var errs distribution.ErrManifestVerification
|
||||
|
||||
if len(mnfst.Name) > reference.NameTotalLengthMax {
|
||||
errs = append(errs,
|
||||
distribution.ErrManifestNameInvalid{
|
||||
Name: mnfst.Name,
|
||||
Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax),
|
||||
})
|
||||
}
|
||||
|
||||
if !reference.NameRegexp.MatchString(mnfst.Name) {
|
||||
errs = append(errs,
|
||||
distribution.ErrManifestNameInvalid{
|
||||
Name: mnfst.Name,
|
||||
Reason: fmt.Errorf("invalid manifest name format"),
|
||||
})
|
||||
}
|
||||
|
||||
if len(mnfst.History) != len(mnfst.FSLayers) {
|
||||
errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d",
|
||||
len(mnfst.History), len(mnfst.FSLayers)))
|
||||
}
|
||||
|
||||
if _, err := schema1.Verify(&mnfst); err != nil {
|
||||
switch err {
|
||||
case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey:
|
||||
errs = append(errs, distribution.ErrManifestUnverified{})
|
||||
default:
|
||||
if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust
|
||||
errs = append(errs, distribution.ErrManifestUnverified{})
|
||||
} else {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !skipDependencyVerification {
|
||||
for _, fsLayer := range mnfst.References() {
|
||||
_, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest)
|
||||
if err != nil {
|
||||
if err != distribution.ErrBlobUnknown {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
// On error here, we always append unknown blob errors.
|
||||
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest})
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
191
vendor/github.com/docker/distribution/registry/storage/tagstore.go
generated
vendored
191
vendor/github.com/docker/distribution/registry/storage/tagstore.go
generated
vendored
|
@ -1,191 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/context"
|
||||
storagedriver "github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var _ distribution.TagService = &tagStore{}
|
||||
|
||||
// tagStore provides methods to manage manifest tags in a backend storage driver.
|
||||
// This implementation uses the same on-disk layout as the (now deleted) tag
|
||||
// store. This provides backward compatibility with current registry deployments
|
||||
// which only makes use of the Digest field of the returned distribution.Descriptor
|
||||
// but does not enable full roundtripping of Descriptor objects
|
||||
type tagStore struct {
|
||||
repository *repository
|
||||
blobStore *blobStore
|
||||
}
|
||||
|
||||
// All returns all tags
|
||||
func (ts *tagStore) All(ctx context.Context) ([]string, error) {
|
||||
var tags []string
|
||||
|
||||
pathSpec, err := pathFor(manifestTagPathSpec{
|
||||
name: ts.repository.Named().Name(),
|
||||
})
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
entries, err := ts.blobStore.driver.List(ctx, pathSpec)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()}
|
||||
default:
|
||||
return tags, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
_, filename := path.Split(entry)
|
||||
tags = append(tags, filename)
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
// exists returns true if the specified manifest tag exists in the repository.
|
||||
func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) {
|
||||
tagPath, err := pathFor(manifestTagCurrentPathSpec{
|
||||
name: ts.repository.Named().Name(),
|
||||
tag: tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
exists, err := exists(ctx, ts.blobStore.driver, tagPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
// Tag tags the digest with the given tag, updating the the store to point at
|
||||
// the current tag. The digest must point to a manifest.
|
||||
func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error {
|
||||
currentPath, err := pathFor(manifestTagCurrentPathSpec{
|
||||
name: ts.repository.Named().Name(),
|
||||
tag: tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lbs := ts.linkedBlobStore(ctx, tag)
|
||||
|
||||
// Link into the index
|
||||
if err := lbs.linkBlob(ctx, desc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Overwrite the current link
|
||||
return ts.blobStore.link(ctx, currentPath, desc.Digest)
|
||||
}
|
||||
|
||||
// resolve the current revision for name and tag.
|
||||
func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) {
|
||||
currentPath, err := pathFor(manifestTagCurrentPathSpec{
|
||||
name: ts.repository.Named().Name(),
|
||||
tag: tag,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
revision, err := ts.blobStore.readlink(ctx, currentPath)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag}
|
||||
}
|
||||
|
||||
return distribution.Descriptor{}, err
|
||||
}
|
||||
|
||||
return distribution.Descriptor{Digest: revision}, nil
|
||||
}
|
||||
|
||||
// Untag removes the tag association
|
||||
func (ts *tagStore) Untag(ctx context.Context, tag string) error {
|
||||
tagPath, err := pathFor(manifestTagPathSpec{
|
||||
name: ts.repository.Named().Name(),
|
||||
tag: tag,
|
||||
})
|
||||
|
||||
switch err.(type) {
|
||||
case storagedriver.PathNotFoundError:
|
||||
return distribution.ErrTagUnknown{Tag: tag}
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
return ts.blobStore.driver.Delete(ctx, tagPath)
|
||||
}
|
||||
|
||||
// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one
|
||||
// to index manifest blobs by tag name. While the tag store doesn't map
|
||||
// precisely to the linked blob store, using this ensures the links are
|
||||
// managed via the same code path.
|
||||
func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore {
|
||||
return &linkedBlobStore{
|
||||
blobStore: ts.blobStore,
|
||||
repository: ts.repository,
|
||||
ctx: ctx,
|
||||
linkPathFns: []linkPathFunc{func(name string, dgst digest.Digest) (string, error) {
|
||||
return pathFor(manifestTagIndexEntryLinkPathSpec{
|
||||
name: name,
|
||||
tag: tag,
|
||||
revision: dgst,
|
||||
})
|
||||
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by
|
||||
// digest, tag entries which point to it need to be recovered to avoid dangling tags.
|
||||
func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) {
|
||||
allTags, err := ts.All(ctx)
|
||||
switch err.(type) {
|
||||
case distribution.ErrRepositoryUnknown:
|
||||
// This tag store has been initialized but not yet populated
|
||||
break
|
||||
case nil:
|
||||
break
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tags []string
|
||||
for _, tag := range allTags {
|
||||
tagLinkPathSpec := manifestTagCurrentPathSpec{
|
||||
name: ts.repository.Named().Name(),
|
||||
tag: tag,
|
||||
}
|
||||
|
||||
tagLinkPath, err := pathFor(tagLinkPathSpec)
|
||||
tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tagDigest == desc.Digest {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
}
|
||||
|
||||
return tags, nil
|
||||
}
|
21
vendor/github.com/docker/distribution/registry/storage/util.go
generated
vendored
21
vendor/github.com/docker/distribution/registry/storage/util.go
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
)
|
||||
|
||||
// Exists provides a utility method to test whether or not a path exists in
|
||||
// the given driver.
|
||||
func exists(ctx context.Context, drv driver.StorageDriver, path string) (bool, error) {
|
||||
if _, err := drv.Stat(ctx, path); err != nil {
|
||||
switch err := err.(type) {
|
||||
case driver.PathNotFoundError:
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
67
vendor/github.com/docker/distribution/registry/storage/vacuum.go
generated
vendored
67
vendor/github.com/docker/distribution/registry/storage/vacuum.go
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/docker/distribution/registry/storage/driver"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// vacuum contains functions for cleaning up repositories and blobs
|
||||
// These functions will only reliably work on strongly consistent
|
||||
// storage systems.
|
||||
// https://en.wikipedia.org/wiki/Consistency_model
|
||||
|
||||
// NewVacuum creates a new Vacuum
|
||||
func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum {
|
||||
return Vacuum{
|
||||
ctx: ctx,
|
||||
driver: driver,
|
||||
}
|
||||
}
|
||||
|
||||
// Vacuum removes content from the filesystem
|
||||
type Vacuum struct {
|
||||
driver driver.StorageDriver
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// RemoveBlob removes a blob from the filesystem
|
||||
func (v Vacuum) RemoveBlob(dgst string) error {
|
||||
d, err := digest.Parse(dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobPath, err := pathFor(blobPathSpec{digest: d})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
context.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath)
|
||||
|
||||
err = v.driver.Delete(v.ctx, blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveRepository removes a repository directory from the
|
||||
// filesystem
|
||||
func (v Vacuum) RemoveRepository(repoName string) error {
|
||||
rootForRepository, err := pathFor(repositoriesRootPathSpec{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
repoDir := path.Join(rootForRepository, repoName)
|
||||
context.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir)
|
||||
err = v.driver.Delete(v.ctx, repoDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
59
vendor/github.com/docker/distribution/registry/storage/walk.go
generated
vendored
59
vendor/github.com/docker/distribution/registry/storage/walk.go
generated
vendored
|
@ -1,59 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/docker/distribution/context"
|
||||
storageDriver "github.com/docker/distribution/registry/storage/driver"
|
||||
)
|
||||
|
||||
// ErrSkipDir is used as a return value from onFileFunc to indicate that
|
||||
// the directory named in the call is to be skipped. It is not returned
|
||||
// as an error by any function.
|
||||
var ErrSkipDir = errors.New("skip this directory")
|
||||
|
||||
// WalkFn is called once per file by Walk
|
||||
// If the returned error is ErrSkipDir and fileInfo refers
|
||||
// to a directory, the directory will not be entered and Walk
|
||||
// will continue the traversal. Otherwise Walk will return
|
||||
type WalkFn func(fileInfo storageDriver.FileInfo) error
|
||||
|
||||
// Walk traverses a filesystem defined within driver, starting
|
||||
// from the given path, calling f on each file
|
||||
func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, f WalkFn) error {
|
||||
children, err := driver.List(ctx, from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sort.Stable(sort.StringSlice(children))
|
||||
for _, child := range children {
|
||||
// TODO(stevvooe): Calling driver.Stat for every entry is quite
|
||||
// expensive when running against backends with a slow Stat
|
||||
// implementation, such as s3. This is very likely a serious
|
||||
// performance bottleneck.
|
||||
fileInfo, err := driver.Stat(ctx, child)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f(fileInfo)
|
||||
skipDir := (err == ErrSkipDir)
|
||||
if err != nil && !skipDir {
|
||||
return err
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() && !skipDir {
|
||||
if err := Walk(ctx, driver, child, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pushError formats an error type given a path and an error
|
||||
// and pushes it to a slice of errors
|
||||
func pushError(errors []error, path string, err error) []error {
|
||||
return append(errors, fmt.Errorf("%s: %s", path, err))
|
||||
}
|
107
vendor/github.com/docker/docker/builder/builder.go
generated
vendored
107
vendor/github.com/docker/docker/builder/builder.go
generated
vendored
|
@ -1,107 +0,0 @@
|
|||
// Package builder defines interfaces for any Docker builder to implement.
|
||||
//
|
||||
// Historically, only server-side Dockerfile interpreters existed.
|
||||
// This package allows for other implementations of Docker builders.
|
||||
package builder
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
|
||||
DefaultDockerfileName string = "Dockerfile"
|
||||
)
|
||||
|
||||
// Source defines a location that can be used as a source for the ADD/COPY
|
||||
// instructions in the builder.
|
||||
type Source interface {
|
||||
// Root returns root path for accessing source
|
||||
Root() containerfs.ContainerFS
|
||||
// Close allows to signal that the filesystem tree won't be used anymore.
|
||||
// For Context implementations using a temporary directory, it is recommended to
|
||||
// delete the temporary directory in Close().
|
||||
Close() error
|
||||
// Hash returns a checksum for a file
|
||||
Hash(path string) (string, error)
|
||||
}
|
||||
|
||||
// Backend abstracts calls to a Docker Daemon.
|
||||
type Backend interface {
|
||||
ImageBackend
|
||||
ExecBackend
|
||||
|
||||
// Commit creates a new Docker image from an existing Docker container.
|
||||
Commit(string, *backend.ContainerCommitConfig) (string, error)
|
||||
// ContainerCreateWorkdir creates the workdir
|
||||
ContainerCreateWorkdir(containerID string) error
|
||||
|
||||
CreateImage(config []byte, parent string, platform string) (Image, error)
|
||||
|
||||
ImageCacheBuilder
|
||||
}
|
||||
|
||||
// ImageBackend are the interface methods required from an image component
|
||||
type ImageBackend interface {
|
||||
GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error)
|
||||
}
|
||||
|
||||
// ExecBackend contains the interface methods required for executing containers
|
||||
type ExecBackend interface {
|
||||
// ContainerAttachRaw attaches to container.
|
||||
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error
|
||||
// ContainerCreate creates a new Docker container and returns potential warnings
|
||||
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
|
||||
// ContainerRm removes a container specified by `id`.
|
||||
ContainerRm(name string, config *types.ContainerRmConfig) error
|
||||
// ContainerKill stops the container execution abruptly.
|
||||
ContainerKill(containerID string, sig uint64) error
|
||||
// ContainerStart starts a new container
|
||||
ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||
// ContainerWait stops processing until the given container is stopped.
|
||||
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
||||
}
|
||||
|
||||
// Result is the output produced by a Builder
|
||||
type Result struct {
|
||||
ImageID string
|
||||
FromImage Image
|
||||
}
|
||||
|
||||
// ImageCacheBuilder represents a generator for stateful image cache.
|
||||
type ImageCacheBuilder interface {
|
||||
// MakeImageCache creates a stateful image cache.
|
||||
MakeImageCache(cacheFrom []string, platform string) ImageCache
|
||||
}
|
||||
|
||||
// ImageCache abstracts an image cache.
|
||||
// (parent image, child runconfig) -> child image
|
||||
type ImageCache interface {
|
||||
// GetCache returns a reference to a cached image whose parent equals `parent`
|
||||
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
|
||||
GetCache(parentID string, cfg *container.Config) (imageID string, err error)
|
||||
}
|
||||
|
||||
// Image represents a Docker image used by the builder.
|
||||
type Image interface {
|
||||
ImageID() string
|
||||
RunConfig() *container.Config
|
||||
MarshalJSON() ([]byte, error)
|
||||
OperatingSystem() string
|
||||
}
|
||||
|
||||
// ReleaseableLayer is an image layer that can be mounted and released
|
||||
type ReleaseableLayer interface {
|
||||
Release() error
|
||||
Mount() (containerfs.ContainerFS, error)
|
||||
Commit(platform string) (ReleaseableLayer, error)
|
||||
DiffID() layer.DiffID
|
||||
}
|
129
vendor/github.com/docker/docker/builder/remotecontext/archive.go
generated
vendored
129
vendor/github.com/docker/docker/builder/remotecontext/archive.go
generated
vendored
|
@ -1,129 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type archiveContext struct {
|
||||
root containerfs.ContainerFS
|
||||
sums tarsum.FileInfoSums
|
||||
}
|
||||
|
||||
func (c *archiveContext) Close() error {
|
||||
return c.root.RemoveAll(c.root.Path())
|
||||
}
|
||||
|
||||
func convertPathError(err error, cleanpath string) error {
|
||||
if err, ok := err.(*os.PathError); ok {
|
||||
err.Path = cleanpath
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type modifiableContext interface {
|
||||
builder.Source
|
||||
// Remove deletes the entry specified by `path`.
|
||||
// It is usual for directory entries to delete all its subentries.
|
||||
Remove(path string) error
|
||||
}
|
||||
|
||||
// FromArchive returns a build source from a tar stream.
|
||||
//
|
||||
// It extracts the tar stream to a temporary folder that is deleted as soon as
|
||||
// the Context is closed.
|
||||
// As the extraction happens, a tarsum is calculated for every file, and the set of
|
||||
// all those sums then becomes the source of truth for all operations on this Context.
|
||||
//
|
||||
// Closing tarStream has to be done by the caller.
|
||||
func FromArchive(tarStream io.Reader) (builder.Source, error) {
|
||||
root, err := ioutils.TempDir("", "docker-builder")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Assume local file system. Since it's coming from a tar file.
|
||||
tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)}
|
||||
|
||||
// Make sure we clean-up upon error. In the happy case the caller
|
||||
// is expected to manage the clean-up
|
||||
defer func() {
|
||||
if err != nil {
|
||||
tsc.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
decompressedStream, err := archive.DecompressStream(tarStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = chrootarchive.Untar(sum, root, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tsc.sums = sum.GetSums()
|
||||
|
||||
return tsc, nil
|
||||
}
|
||||
|
||||
func (c *archiveContext) Root() containerfs.ContainerFS {
|
||||
return c.root
|
||||
}
|
||||
|
||||
func (c *archiveContext) Remove(path string) error {
|
||||
_, fullpath, err := normalize(path, c.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.root.RemoveAll(fullpath)
|
||||
}
|
||||
|
||||
func (c *archiveContext) Hash(path string) (string, error) {
|
||||
cleanpath, fullpath, err := normalize(path, c.root)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
rel, err := c.root.Rel(c.root.Path(), fullpath)
|
||||
if err != nil {
|
||||
return "", convertPathError(err, cleanpath)
|
||||
}
|
||||
|
||||
// Use the checksum of the followed path(not the possible symlink) because
|
||||
// this is the file that is actually copied.
|
||||
if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil {
|
||||
return tsInfo.Sum(), nil
|
||||
}
|
||||
// We set sum to path by default for the case where GetFile returns nil.
|
||||
// The usual case is if relative path is empty.
|
||||
return path, nil // backwards compat TODO: see if really needed
|
||||
}
|
||||
|
||||
func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) {
|
||||
cleanPath = root.Clean(string(root.Separator()) + path)[1:]
|
||||
fullPath, err = root.ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath)
|
||||
}
|
||||
if _, err := root.Lstat(fullPath); err != nil {
|
||||
return "", "", errors.WithStack(convertPathError(err, path))
|
||||
}
|
||||
return
|
||||
}
|
183
vendor/github.com/docker/docker/builder/remotecontext/detect.go
generated
vendored
183
vendor/github.com/docker/docker/builder/remotecontext/detect.go
generated
vendored
|
@ -1,183 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/continuity/driver"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/dockerignore"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ClientSessionRemote is identifier for client-session context transport
|
||||
const ClientSessionRemote = "client-session"
|
||||
|
||||
// Detect returns a context and dockerfile from remote location or local
|
||||
// archive. progressReader is only used if remoteURL is actually a URL
|
||||
// (not empty, and not a Git endpoint).
|
||||
func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) {
|
||||
remoteURL := config.Options.RemoteContext
|
||||
dockerfilePath := config.Options.Dockerfile
|
||||
|
||||
switch {
|
||||
case remoteURL == "":
|
||||
remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath)
|
||||
case remoteURL == ClientSessionRemote:
|
||||
res, err := parser.Parse(config.Source)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, res, nil
|
||||
case urlutil.IsGitURL(remoteURL):
|
||||
remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)
|
||||
case urlutil.IsURL(remoteURL):
|
||||
remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc)
|
||||
default:
|
||||
err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) {
|
||||
defer rc.Close()
|
||||
c, err := FromArchive(rc)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return withDockerfileFromContext(c.(modifiableContext), dockerfilePath)
|
||||
}
|
||||
|
||||
func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) {
|
||||
df, err := openAt(c, dockerfilePath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if dockerfilePath == builder.DefaultDockerfileName {
|
||||
lowercase := strings.ToLower(dockerfilePath)
|
||||
if _, err := StatAt(c, lowercase); err == nil {
|
||||
return withDockerfileFromContext(c, lowercase)
|
||||
}
|
||||
}
|
||||
return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error
|
||||
}
|
||||
c.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
res, err := readAndParseDockerfile(dockerfilePath, df)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
df.Close()
|
||||
|
||||
if err := removeDockerfile(c, dockerfilePath); err != nil {
|
||||
c.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return c, res, nil
|
||||
}
|
||||
|
||||
func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) {
|
||||
c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return withDockerfileFromContext(c.(modifiableContext), dockerfilePath)
|
||||
}
|
||||
|
||||
func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) {
|
||||
var dockerfile io.ReadCloser
|
||||
dockerfileFoundErr := errors.New("found-dockerfile")
|
||||
c, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){
|
||||
mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
dockerfile = rc
|
||||
return nil, dockerfileFoundErr
|
||||
},
|
||||
// fallback handler (tar context)
|
||||
"": func(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
return progressReader(rc), nil
|
||||
},
|
||||
})
|
||||
switch {
|
||||
case err == dockerfileFoundErr:
|
||||
res, err := parser.Parse(dockerfile)
|
||||
return nil, res, err
|
||||
case err != nil:
|
||||
return nil, nil, err
|
||||
}
|
||||
return withDockerfileFromContext(c.(modifiableContext), dockerfilePath)
|
||||
}
|
||||
|
||||
func removeDockerfile(c modifiableContext, filesToRemove ...string) error {
|
||||
f, err := openAt(c, ".dockerignore")
|
||||
// Note that a missing .dockerignore file isn't treated as an error
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
excludes, err := dockerignore.ReadAll(f)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
|
||||
for _, fileToRemove := range filesToRemove {
|
||||
if rm, _ := fileutils.Matches(fileToRemove, excludes); rm {
|
||||
if err := c.Remove(fileToRemove); err != nil {
|
||||
logrus.Errorf("failed to remove %s: %v", fileToRemove, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {
|
||||
br := bufio.NewReader(rc)
|
||||
if _, err := br.Peek(1); err != nil {
|
||||
if err == io.EOF {
|
||||
return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name)
|
||||
}
|
||||
return nil, errors.Wrap(err, "unexpected error reading Dockerfile")
|
||||
}
|
||||
return parser.Parse(br)
|
||||
}
|
||||
|
||||
func openAt(remote builder.Source, path string) (driver.File, error) {
|
||||
fullPath, err := FullPath(remote, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remote.Root().Open(fullPath)
|
||||
}
|
||||
|
||||
// StatAt is a helper for calling Stat on a path from a source
|
||||
func StatAt(remote builder.Source, path string) (os.FileInfo, error) {
|
||||
fullPath, err := FullPath(remote, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remote.Root().Stat(fullPath)
|
||||
}
|
||||
|
||||
// FullPath is a helper for getting a full path for a path from a source
|
||||
func FullPath(remote builder.Source, path string) (string, error) {
|
||||
fullPath, err := remote.Root().ResolveScopedPath(path, true)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error
|
||||
}
|
||||
return fullPath, nil
|
||||
}
|
75
vendor/github.com/docker/docker/builder/remotecontext/errors.go
generated
vendored
75
vendor/github.com/docker/docker/builder/remotecontext/errors.go
generated
vendored
|
@ -1,75 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
type notFoundError string
|
||||
|
||||
func (e notFoundError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (notFoundError) NotFound() {}
|
||||
|
||||
type requestError string
|
||||
|
||||
func (e requestError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e requestError) InvalidParameter() {}
|
||||
|
||||
type unauthorizedError string
|
||||
|
||||
func (e unauthorizedError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (unauthorizedError) Unauthorized() {}
|
||||
|
||||
type forbiddenError string
|
||||
|
||||
func (e forbiddenError) Error() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (forbiddenError) Forbidden() {}
|
||||
|
||||
type dnsError struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e dnsError) Error() string {
|
||||
return e.cause.Error()
|
||||
}
|
||||
|
||||
func (e dnsError) NotFound() {}
|
||||
|
||||
func (e dnsError) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
type systemError struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e systemError) Error() string {
|
||||
return e.cause.Error()
|
||||
}
|
||||
|
||||
func (e systemError) SystemError() {}
|
||||
|
||||
func (e systemError) Cause() error {
|
||||
return e.cause
|
||||
}
|
||||
|
||||
type unknownError struct {
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e unknownError) Error() string {
|
||||
return e.cause.Error()
|
||||
}
|
||||
|
||||
func (unknownError) Unknown() {}
|
||||
|
||||
func (e unknownError) Cause() error {
|
||||
return e.cause
|
||||
}
|
45
vendor/github.com/docker/docker/builder/remotecontext/filehash.go
generated
vendored
45
vendor/github.com/docker/docker/builder/remotecontext/filehash.go
generated
vendored
|
@ -1,45 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/tarsum"
|
||||
)
|
||||
|
||||
// NewFileHash returns new hash that is used for the builder cache keys
|
||||
func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) {
|
||||
var link string
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
var err error
|
||||
link, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
hdr, err := archive.FileInfoHeader(name, fi, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()}
|
||||
tsh.Reset() // initialize header
|
||||
return tsh, nil
|
||||
}
|
||||
|
||||
type tarsumHash struct {
|
||||
hash.Hash
|
||||
hdr *tar.Header
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (tsh *tarsumHash) Reset() {
|
||||
// comply with hash.Hash and reset to the state hash had before any writes
|
||||
tsh.Hash.Reset()
|
||||
tarsum.WriteV1Header(tsh.hdr, tsh.Hash)
|
||||
}
|
3
vendor/github.com/docker/docker/builder/remotecontext/generate.go
generated
vendored
3
vendor/github.com/docker/docker/builder/remotecontext/generate.go
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
//go:generate protoc --gogoslick_out=. tarsum.proto
|
29
vendor/github.com/docker/docker/builder/remotecontext/git.go
generated
vendored
29
vendor/github.com/docker/docker/builder/remotecontext/git.go
generated
vendored
|
@ -1,29 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/builder/remotecontext/git"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
|
||||
func MakeGitContext(gitURL string) (builder.Source, error) {
|
||||
root, err := git.Clone(gitURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := archive.Tar(root, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// TODO: print errors?
|
||||
c.Close()
|
||||
os.RemoveAll(root)
|
||||
}()
|
||||
return FromArchive(c)
|
||||
}
|
100
vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go
generated
vendored
100
vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go
generated
vendored
|
@ -1,100 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewLazySource creates a new LazyContext. LazyContext defines a hashed build
|
||||
// context based on a root directory. Individual files are hashed first time
|
||||
// they are asked. It is not safe to call methods of LazyContext concurrently.
|
||||
func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) {
|
||||
return &lazySource{
|
||||
root: root,
|
||||
sums: make(map[string]string),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type lazySource struct {
|
||||
root containerfs.ContainerFS
|
||||
sums map[string]string
|
||||
}
|
||||
|
||||
func (c *lazySource) Root() containerfs.ContainerFS {
|
||||
return c.root
|
||||
}
|
||||
|
||||
func (c *lazySource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *lazySource) Hash(path string) (string, error) {
|
||||
cleanPath, fullPath, err := normalize(path, c.root)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fi, err := c.root.Lstat(fullPath)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
||||
relPath, err := Rel(c.root, fullPath)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(convertPathError(err, cleanPath))
|
||||
}
|
||||
|
||||
sum, ok := c.sums[relPath]
|
||||
if !ok {
|
||||
sum, err = c.prepareHash(relPath, fi)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) {
|
||||
p := c.root.Join(c.root.Path(), relPath)
|
||||
h, err := NewFileHash(p, relPath, fi)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create hash for %s", relPath)
|
||||
}
|
||||
if fi.Mode().IsRegular() && fi.Size() > 0 {
|
||||
f, err := c.root.Open(p)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to open %s", relPath)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := pools.Copy(h, f); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to copy file data for %s", relPath)
|
||||
}
|
||||
}
|
||||
sum := hex.EncodeToString(h.Sum(nil))
|
||||
c.sums[relPath] = sum
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
// Rel makes a path relative to base path. Same as `filepath.Rel` but can also
|
||||
// handle UUID paths in windows.
|
||||
func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) {
|
||||
// filepath.Rel can't handle UUID paths in windows
|
||||
if basepath.OS() == "windows" {
|
||||
pfx := basepath.Path() + `\`
|
||||
if strings.HasPrefix(targpath, pfx) {
|
||||
p := strings.TrimPrefix(targpath, pfx)
|
||||
if p == "" {
|
||||
p = "."
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
return basepath.Rel(basepath.Path(), targpath)
|
||||
}
|
27
vendor/github.com/docker/docker/builder/remotecontext/mimetype.go
generated
vendored
27
vendor/github.com/docker/docker/builder/remotecontext/mimetype.go
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// mimeTypes stores the MIME content type.
|
||||
var mimeTypes = struct {
|
||||
TextPlain string
|
||||
OctetStream string
|
||||
}{"text/plain", "application/octet-stream"}
|
||||
|
||||
// detectContentType returns a best guess representation of the MIME
|
||||
// content type for the bytes at c. The value detected by
|
||||
// http.DetectContentType is guaranteed not be nil, defaulting to
|
||||
// application/octet-stream when a better guess cannot be made. The
|
||||
// result of this detection is then run through mime.ParseMediaType()
|
||||
// which separates the actual MIME string from any parameters.
|
||||
func detectContentType(c []byte) (string, map[string]string, error) {
|
||||
ct := http.DetectContentType(c)
|
||||
contentType, args, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return contentType, args, nil
|
||||
}
|
153
vendor/github.com/docker/docker/builder/remotecontext/remote.go
generated
vendored
153
vendor/github.com/docker/docker/builder/remotecontext/remote.go
generated
vendored
|
@ -1,153 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// When downloading remote contexts, limit the amount (in bytes)
|
||||
// to be read from the response body in order to detect its Content-Type
|
||||
const maxPreambleLength = 100
|
||||
|
||||
const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))`
|
||||
|
||||
var mimeRe = regexp.MustCompile(acceptableRemoteMIME)
|
||||
|
||||
// MakeRemoteContext downloads a context from remoteURL and returns it.
|
||||
//
|
||||
// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of
|
||||
// maxPreambleLength bytes from the body to help detecting the MIME type.
|
||||
// Look at acceptableRemoteMIME for more details.
|
||||
//
|
||||
// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected
|
||||
// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not).
|
||||
// In either case, an (assumed) tar stream is passed to FromArchive whose result is returned.
|
||||
func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (builder.Source, error) {
|
||||
f, err := GetWithStatusError(remoteURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err)
|
||||
}
|
||||
defer f.Body.Close()
|
||||
|
||||
var contextReader io.ReadCloser
|
||||
if contentTypeHandlers != nil {
|
||||
contentType := f.Header.Get("Content-Type")
|
||||
clen := f.ContentLength
|
||||
|
||||
contentType, contextReader, err = inspectResponse(contentType, f.Body, clen)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err)
|
||||
}
|
||||
defer contextReader.Close()
|
||||
|
||||
// This loop tries to find a content-type handler for the detected content-type.
|
||||
// If it could not find one from the caller-supplied map, it tries the empty content-type `""`
|
||||
// which is interpreted as a fallback handler (usually used for raw tar contexts).
|
||||
for _, ct := range []string{contentType, ""} {
|
||||
if fn, ok := contentTypeHandlers[ct]; ok {
|
||||
defer contextReader.Close()
|
||||
if contextReader, err = fn(contextReader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pass through - this is a pre-packaged context, presumably
|
||||
// with a Dockerfile with the right name inside it.
|
||||
return FromArchive(contextReader)
|
||||
}
|
||||
|
||||
// GetWithStatusError does an http.Get() and returns an error if the
|
||||
// status code is 4xx or 5xx.
|
||||
func GetWithStatusError(address string) (resp *http.Response, err error) {
|
||||
if resp, err = http.Get(address); err != nil {
|
||||
if uerr, ok := err.(*url.Error); ok {
|
||||
if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout {
|
||||
return nil, dnsError{err}
|
||||
}
|
||||
}
|
||||
return nil, systemError{err}
|
||||
}
|
||||
if resp.StatusCode < 400 {
|
||||
return resp, nil
|
||||
}
|
||||
msg := fmt.Sprintf("failed to GET %s with status %s", address, resp.Status)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(systemError{err}, msg+": error reading body")
|
||||
}
|
||||
|
||||
msg += ": " + string(bytes.TrimSpace(body))
|
||||
switch resp.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
return nil, notFoundError(msg)
|
||||
case http.StatusBadRequest:
|
||||
return nil, requestError(msg)
|
||||
case http.StatusUnauthorized:
|
||||
return nil, unauthorizedError(msg)
|
||||
case http.StatusForbidden:
|
||||
return nil, forbiddenError(msg)
|
||||
}
|
||||
return nil, unknownError{errors.New(msg)}
|
||||
}
|
||||
|
||||
// inspectResponse looks into the http response data at r to determine whether its
|
||||
// content-type is on the list of acceptable content types for remote build contexts.
|
||||
// This function returns:
|
||||
// - a string representation of the detected content-type
|
||||
// - an io.Reader for the response body
|
||||
// - an error value which will be non-nil either when something goes wrong while
|
||||
// reading bytes from r or when the detected content-type is not acceptable.
|
||||
func inspectResponse(ct string, r io.Reader, clen int64) (string, io.ReadCloser, error) {
|
||||
plen := clen
|
||||
if plen <= 0 || plen > maxPreambleLength {
|
||||
plen = maxPreambleLength
|
||||
}
|
||||
|
||||
preamble := make([]byte, plen)
|
||||
rlen, err := r.Read(preamble)
|
||||
if rlen == 0 {
|
||||
return ct, ioutil.NopCloser(r), errors.New("empty response")
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
return ct, ioutil.NopCloser(r), err
|
||||
}
|
||||
|
||||
preambleR := bytes.NewReader(preamble[:rlen])
|
||||
bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r))
|
||||
// Some web servers will use application/octet-stream as the default
|
||||
// content type for files without an extension (e.g. 'Dockerfile')
|
||||
// so if we receive this value we better check for text content
|
||||
contentType := ct
|
||||
if len(ct) == 0 || ct == mimeTypes.OctetStream {
|
||||
contentType, _, err = detectContentType(preamble)
|
||||
if err != nil {
|
||||
return contentType, bodyReader, err
|
||||
}
|
||||
}
|
||||
|
||||
contentType = selectAcceptableMIME(contentType)
|
||||
var cterr error
|
||||
if len(contentType) == 0 {
|
||||
cterr = fmt.Errorf("unsupported Content-Type %q", ct)
|
||||
contentType = ct
|
||||
}
|
||||
|
||||
return contentType, bodyReader, cterr
|
||||
}
|
||||
|
||||
func selectAcceptableMIME(ct string) string {
|
||||
return mimeRe.FindString(ct)
|
||||
}
|
157
vendor/github.com/docker/docker/builder/remotecontext/tarsum.go
generated
vendored
157
vendor/github.com/docker/docker/builder/remotecontext/tarsum.go
generated
vendored
|
@ -1,157 +0,0 @@
|
|||
package remotecontext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/containerfs"
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
)
|
||||
|
||||
type hashed interface {
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// CachableSource is a source that contains cache records for its contents
|
||||
type CachableSource struct {
|
||||
mu sync.Mutex
|
||||
root containerfs.ContainerFS
|
||||
tree *iradix.Tree
|
||||
txn *iradix.Txn
|
||||
}
|
||||
|
||||
// NewCachableSource creates new CachableSource
|
||||
func NewCachableSource(root string) *CachableSource {
|
||||
ts := &CachableSource{
|
||||
tree: iradix.New(),
|
||||
root: containerfs.NewLocalContainerFS(root),
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// MarshalBinary marshals current cache information to a byte array
|
||||
func (cs *CachableSource) MarshalBinary() ([]byte, error) {
|
||||
b := TarsumBackup{Hashes: make(map[string]string)}
|
||||
root := cs.getRoot()
|
||||
root.Walk(func(k []byte, v interface{}) bool {
|
||||
b.Hashes[string(k)] = v.(*fileInfo).sum
|
||||
return false
|
||||
})
|
||||
return b.Marshal()
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes cache information for presented byte array
|
||||
func (cs *CachableSource) UnmarshalBinary(data []byte) error {
|
||||
var b TarsumBackup
|
||||
if err := b.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
txn := iradix.New().Txn()
|
||||
for p, v := range b.Hashes {
|
||||
txn.Insert([]byte(p), &fileInfo{sum: v})
|
||||
}
|
||||
cs.mu.Lock()
|
||||
defer cs.mu.Unlock()
|
||||
cs.tree = txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Scan rescans the cache information from the file system
|
||||
func (cs *CachableSource) Scan() error {
|
||||
lc, err := NewLazySource(cs.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txn := iradix.New().Txn()
|
||||
err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to walk %s", path)
|
||||
}
|
||||
rel, err := Rel(cs.root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h, err := lc.Hash(rel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txn.Insert([]byte(rel), &fileInfo{sum: h})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cs.mu.Lock()
|
||||
defer cs.mu.Unlock()
|
||||
cs.tree = txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleChange notifies the source about a modification operation
|
||||
func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
|
||||
cs.mu.Lock()
|
||||
if cs.txn == nil {
|
||||
cs.txn = cs.tree.Txn()
|
||||
}
|
||||
if kind == fsutil.ChangeKindDelete {
|
||||
cs.txn.Delete([]byte(p))
|
||||
cs.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
h, ok := fi.(hashed)
|
||||
if !ok {
|
||||
cs.mu.Unlock()
|
||||
return errors.Errorf("invalid fileinfo: %s", p)
|
||||
}
|
||||
|
||||
hfi := &fileInfo{
|
||||
sum: h.Digest().Hex(),
|
||||
}
|
||||
cs.txn.Insert([]byte(p), hfi)
|
||||
cs.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *CachableSource) getRoot() *iradix.Node {
|
||||
cs.mu.Lock()
|
||||
if cs.txn != nil {
|
||||
cs.tree = cs.txn.Commit()
|
||||
cs.txn = nil
|
||||
}
|
||||
t := cs.tree
|
||||
cs.mu.Unlock()
|
||||
return t.Root()
|
||||
}
|
||||
|
||||
// Close closes the source
|
||||
func (cs *CachableSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns a hash for a single file in the source
|
||||
func (cs *CachableSource) Hash(path string) (string, error) {
|
||||
n := cs.getRoot()
|
||||
// TODO: check this for symlinks
|
||||
v, ok := n.Get([]byte(path))
|
||||
if !ok {
|
||||
return path, nil
|
||||
}
|
||||
return v.(*fileInfo).sum, nil
|
||||
}
|
||||
|
||||
// Root returns a root directory for the source
|
||||
func (cs *CachableSource) Root() containerfs.ContainerFS {
|
||||
return cs.root
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
sum string
|
||||
}
|
||||
|
||||
func (fi *fileInfo) Hash() string {
|
||||
return fi.sum
|
||||
}
|
525
vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go
generated
vendored
525
vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go
generated
vendored
|
@ -1,525 +0,0 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: tarsum.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package remotecontext is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
tarsum.proto
|
||||
|
||||
It has these top-level messages:
|
||||
TarsumBackup
|
||||
*/
|
||||
package remotecontext
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type TarsumBackup struct {
|
||||
Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (m *TarsumBackup) Reset() { *m = TarsumBackup{} }
|
||||
func (*TarsumBackup) ProtoMessage() {}
|
||||
func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} }
|
||||
|
||||
func (m *TarsumBackup) GetHashes() map[string]string {
|
||||
if m != nil {
|
||||
return m.Hashes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup")
|
||||
}
|
||||
func (this *TarsumBackup) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
that1, ok := that.(*TarsumBackup)
|
||||
if !ok {
|
||||
that2, ok := that.(TarsumBackup)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if len(this.Hashes) != len(that1.Hashes) {
|
||||
return false
|
||||
}
|
||||
for i := range this.Hashes {
|
||||
if this.Hashes[i] != that1.Hashes[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *TarsumBackup) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&remotecontext.TarsumBackup{")
|
||||
keysForHashes := make([]string, 0, len(this.Hashes))
|
||||
for k := range this.Hashes {
|
||||
keysForHashes = append(keysForHashes, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
|
||||
mapStringForHashes := "map[string]string{"
|
||||
for _, k := range keysForHashes {
|
||||
mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k])
|
||||
}
|
||||
mapStringForHashes += "}"
|
||||
if this.Hashes != nil {
|
||||
s = append(s, "Hashes: "+mapStringForHashes+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringTarsum(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *TarsumBackup) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Hashes) > 0 {
|
||||
for k := range m.Hashes {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
v := m.Hashes[k]
|
||||
mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v)))
|
||||
i = encodeVarintTarsum(dAtA, i, uint64(mapSize))
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintTarsum(dAtA, i, uint64(len(k)))
|
||||
i += copy(dAtA[i:], k)
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintTarsum(dAtA, i, uint64(len(v)))
|
||||
i += copy(dAtA[i:], v)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *TarsumBackup) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Hashes) > 0 {
|
||||
for k, v := range m.Hashes {
|
||||
_ = k
|
||||
_ = v
|
||||
mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v)))
|
||||
n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTarsum(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozTarsum(x uint64) (n int) {
|
||||
return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *TarsumBackup) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
keysForHashes := make([]string, 0, len(this.Hashes))
|
||||
for k := range this.Hashes {
|
||||
keysForHashes = append(keysForHashes, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
|
||||
mapStringForHashes := "map[string]string{"
|
||||
for _, k := range keysForHashes {
|
||||
mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k])
|
||||
}
|
||||
mapStringForHashes += "}"
|
||||
s := strings.Join([]string{`&TarsumBackup{`,
|
||||
`Hashes:` + mapStringForHashes + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringTarsum(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *TarsumBackup) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTarsum
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthTarsum
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Hashes == nil {
|
||||
m.Hashes = make(map[string]string)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthTarsum
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.Hashes[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.Hashes[mapkey] = mapvalue
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTarsum(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthTarsum
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTarsum(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTarsum
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTarsum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipTarsum(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) }
|
||||
|
||||
var fileDescriptorTarsum = []byte{
|
||||
// 196 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a,
|
||||
0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49,
|
||||
0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b,
|
||||
0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b,
|
||||
0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51,
|
||||
0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24,
|
||||
0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89,
|
||||
0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b,
|
||||
0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1,
|
||||
0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7,
|
||||
0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c,
|
||||
0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f,
|
||||
0xe0, 0x00, 0x00, 0x00,
|
||||
}
|
150
vendor/github.com/docker/docker/cli/cobra.go
generated
vendored
150
vendor/github.com/docker/docker/cli/cobra.go
generated
vendored
|
@ -1,150 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// SetupRootCommand sets default usage, help, and error handling for the
|
||||
// root command.
|
||||
func SetupRootCommand(rootCmd *cobra.Command) {
|
||||
cobra.AddTemplateFunc("hasSubCommands", hasSubCommands)
|
||||
cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands)
|
||||
cobra.AddTemplateFunc("operationSubCommands", operationSubCommands)
|
||||
cobra.AddTemplateFunc("managementSubCommands", managementSubCommands)
|
||||
cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages)
|
||||
|
||||
rootCmd.SetUsageTemplate(usageTemplate)
|
||||
rootCmd.SetHelpTemplate(helpTemplate)
|
||||
rootCmd.SetFlagErrorFunc(FlagErrorFunc)
|
||||
rootCmd.SetHelpCommand(helpCommand)
|
||||
|
||||
rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage")
|
||||
rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help")
|
||||
}
|
||||
|
||||
// FlagErrorFunc prints an error message which matches the format of the
|
||||
// docker/docker/cli error messages
|
||||
func FlagErrorFunc(cmd *cobra.Command, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
usage := ""
|
||||
if cmd.HasSubCommands() {
|
||||
usage = "\n\n" + cmd.UsageString()
|
||||
}
|
||||
return StatusError{
|
||||
Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage),
|
||||
StatusCode: 125,
|
||||
}
|
||||
}
|
||||
|
||||
var helpCommand = &cobra.Command{
|
||||
Use: "help [command]",
|
||||
Short: "Help about the command",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {},
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {},
|
||||
RunE: func(c *cobra.Command, args []string) error {
|
||||
cmd, args, e := c.Root().Find(args)
|
||||
if cmd == nil || e != nil || len(args) > 0 {
|
||||
return errors.Errorf("unknown help topic: %v", strings.Join(args, " "))
|
||||
}
|
||||
|
||||
helpFunc := cmd.HelpFunc()
|
||||
helpFunc(cmd, args)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func hasSubCommands(cmd *cobra.Command) bool {
|
||||
return len(operationSubCommands(cmd)) > 0
|
||||
}
|
||||
|
||||
func hasManagementSubCommands(cmd *cobra.Command) bool {
|
||||
return len(managementSubCommands(cmd)) > 0
|
||||
}
|
||||
|
||||
func operationSubCommands(cmd *cobra.Command) []*cobra.Command {
|
||||
cmds := []*cobra.Command{}
|
||||
for _, sub := range cmd.Commands() {
|
||||
if sub.IsAvailableCommand() && !sub.HasSubCommands() {
|
||||
cmds = append(cmds, sub)
|
||||
}
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
func wrappedFlagUsages(cmd *cobra.Command) string {
|
||||
width := 80
|
||||
if ws, err := term.GetWinsize(0); err == nil {
|
||||
width = int(ws.Width)
|
||||
}
|
||||
return cmd.Flags().FlagUsagesWrapped(width - 1)
|
||||
}
|
||||
|
||||
func managementSubCommands(cmd *cobra.Command) []*cobra.Command {
|
||||
cmds := []*cobra.Command{}
|
||||
for _, sub := range cmd.Commands() {
|
||||
if sub.IsAvailableCommand() && sub.HasSubCommands() {
|
||||
cmds = append(cmds, sub)
|
||||
}
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
var usageTemplate = `Usage:
|
||||
|
||||
{{- if not .HasSubCommands}} {{.UseLine}}{{end}}
|
||||
{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}}
|
||||
|
||||
{{ .Short | trim }}
|
||||
|
||||
{{- if gt .Aliases 0}}
|
||||
|
||||
Aliases:
|
||||
{{.NameAndAliases}}
|
||||
|
||||
{{- end}}
|
||||
{{- if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{ .Example }}
|
||||
|
||||
{{- end}}
|
||||
{{- if .HasFlags}}
|
||||
|
||||
Options:
|
||||
{{ wrappedFlagUsages . | trimRightSpace}}
|
||||
|
||||
{{- end}}
|
||||
{{- if hasManagementSubCommands . }}
|
||||
|
||||
Management Commands:
|
||||
|
||||
{{- range managementSubCommands . }}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}
|
||||
{{- end}}
|
||||
|
||||
{{- end}}
|
||||
{{- if hasSubCommands .}}
|
||||
|
||||
Commands:
|
||||
|
||||
{{- range operationSubCommands . }}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
{{- if .HasSubCommands }}
|
||||
|
||||
Run '{{.CommandPath}} COMMAND --help' for more information on a command.
|
||||
{{- end}}
|
||||
`
|
||||
|
||||
var helpTemplate = `
|
||||
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
33
vendor/github.com/docker/docker/cli/error.go
generated
vendored
33
vendor/github.com/docker/docker/cli/error.go
generated
vendored
|
@ -1,33 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Errors is a list of errors.
|
||||
// Useful in a loop if you don't want to return the error right away and you want to display after the loop,
|
||||
// all the errors that happened during the loop.
|
||||
type Errors []error
|
||||
|
||||
func (errList Errors) Error() string {
|
||||
if len(errList) < 1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
out := make([]string, len(errList))
|
||||
for i := range errList {
|
||||
out[i] = errList[i].Error()
|
||||
}
|
||||
return strings.Join(out, ", ")
|
||||
}
|
||||
|
||||
// StatusError reports an unsuccessful exit by a command.
|
||||
type StatusError struct {
|
||||
Status string
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
func (e StatusError) Error() string {
|
||||
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
|
||||
}
|
27
vendor/github.com/docker/docker/cli/required.go
generated
vendored
27
vendor/github.com/docker/docker/cli/required.go
generated
vendored
|
@ -1,27 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// NoArgs validates args and returns an error if there are any args
|
||||
func NoArgs(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if cmd.HasSubCommands() {
|
||||
return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n"))
|
||||
}
|
||||
|
||||
return errors.Errorf(
|
||||
"\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s",
|
||||
cmd.CommandPath(),
|
||||
cmd.CommandPath(),
|
||||
cmd.UseLine(),
|
||||
cmd.Short,
|
||||
)
|
||||
}
|
339
vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE
generated
vendored
Normal file
339
vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,339 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Lesser General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software interchange; or,
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
this License, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than `show w' and `show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License.
|
340
vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE
generated
vendored
Normal file
340
vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,340 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
|
||||
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Library General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software interchange; or,
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
this License, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than `show w' and `show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Library General
|
||||
Public License instead of this License.
|
22
vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE
generated
vendored
Normal file
22
vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2013 Honza Pokorny
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
3
vendor/github.com/docker/go-connections/doc.go
generated
vendored
3
vendor/github.com/docker/go-connections/doc.go
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
// Package connections provides libraries to work with network connections.
|
||||
// This library is divided in several components for specific usage.
|
||||
package connections
|
1
vendor/github.com/docker/libcompose/package.go
generated
vendored
1
vendor/github.com/docker/libcompose/package.go
generated
vendored
|
@ -1 +0,0 @@
|
|||
package libcompose
|
Loading…
Add table
Add a link
Reference in a new issue