1
0
Fork 0

Merge 'v1.5.2' into master

This commit is contained in:
Fernandez Ludovic 2018-02-12 15:26:49 +01:00
commit 794c0206f3
338 changed files with 4158 additions and 48549 deletions

View file

@ -1,107 +0,0 @@
// Package builder defines interfaces for any Docker builder to implement.
//
// Historically, only server-side Dockerfile interpreters existed.
// This package allows for other implementations of Docker builders.
package builder
import (
"io"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/api/types/container"
containerpkg "github.com/docker/docker/container"
"github.com/docker/docker/layer"
"github.com/docker/docker/pkg/containerfs"
"golang.org/x/net/context"
)
const (
// DefaultDockerfileName is the Default filename with Docker commands, read by docker build
DefaultDockerfileName string = "Dockerfile"
)
// Source defines a location that can be used as a source for the ADD/COPY
// instructions in the builder.
type Source interface {
// Root returns root path for accessing source
Root() containerfs.ContainerFS
// Close allows to signal that the filesystem tree won't be used anymore.
// For Context implementations using a temporary directory, it is recommended to
// delete the temporary directory in Close().
Close() error
// Hash returns a checksum for a file
Hash(path string) (string, error)
}
// Backend abstracts calls to a Docker Daemon.
type Backend interface {
ImageBackend
ExecBackend
// Commit creates a new Docker image from an existing Docker container.
Commit(string, *backend.ContainerCommitConfig) (string, error)
// ContainerCreateWorkdir creates the workdir
ContainerCreateWorkdir(containerID string) error
CreateImage(config []byte, parent string, platform string) (Image, error)
ImageCacheBuilder
}
// ImageBackend are the interface methods required from an image component
type ImageBackend interface {
GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ReleaseableLayer, error)
}
// ExecBackend contains the interface methods required for executing containers
type ExecBackend interface {
// ContainerAttachRaw attaches to container.
ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error
// ContainerCreate creates a new Docker container and returns potential warnings
ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error)
// ContainerRm removes a container specified by `id`.
ContainerRm(name string, config *types.ContainerRmConfig) error
// ContainerKill stops the container execution abruptly.
ContainerKill(containerID string, sig uint64) error
// ContainerStart starts a new container
ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
// ContainerWait stops processing until the given container is stopped.
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
}
// Result is the output produced by a Builder
type Result struct {
ImageID string
FromImage Image
}
// ImageCacheBuilder represents a generator for stateful image cache.
type ImageCacheBuilder interface {
// MakeImageCache creates a stateful image cache.
MakeImageCache(cacheFrom []string, platform string) ImageCache
}
// ImageCache abstracts an image cache.
// (parent image, child runconfig) -> child image
type ImageCache interface {
// GetCache returns a reference to a cached image whose parent equals `parent`
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
GetCache(parentID string, cfg *container.Config) (imageID string, err error)
}
// Image represents a Docker image used by the builder.
type Image interface {
ImageID() string
RunConfig() *container.Config
MarshalJSON() ([]byte, error)
OperatingSystem() string
}
// ReleaseableLayer is an image layer that can be mounted and released
type ReleaseableLayer interface {
Release() error
Mount() (containerfs.ContainerFS, error)
Commit(platform string) (ReleaseableLayer, error)
DiffID() layer.DiffID
}

View file

@ -1,129 +0,0 @@
package remotecontext
import (
"io"
"os"
"path/filepath"
"github.com/docker/docker/builder"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/tarsum"
"github.com/pkg/errors"
)
type archiveContext struct {
root containerfs.ContainerFS
sums tarsum.FileInfoSums
}
func (c *archiveContext) Close() error {
return c.root.RemoveAll(c.root.Path())
}
func convertPathError(err error, cleanpath string) error {
if err, ok := err.(*os.PathError); ok {
err.Path = cleanpath
return err
}
return err
}
type modifiableContext interface {
builder.Source
// Remove deletes the entry specified by `path`.
// It is usual for directory entries to delete all its subentries.
Remove(path string) error
}
// FromArchive returns a build source from a tar stream.
//
// It extracts the tar stream to a temporary folder that is deleted as soon as
// the Context is closed.
// As the extraction happens, a tarsum is calculated for every file, and the set of
// all those sums then becomes the source of truth for all operations on this Context.
//
// Closing tarStream has to be done by the caller.
func FromArchive(tarStream io.Reader) (builder.Source, error) {
root, err := ioutils.TempDir("", "docker-builder")
if err != nil {
return nil, err
}
// Assume local file system. Since it's coming from a tar file.
tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)}
// Make sure we clean-up upon error. In the happy case the caller
// is expected to manage the clean-up
defer func() {
if err != nil {
tsc.Close()
}
}()
decompressedStream, err := archive.DecompressStream(tarStream)
if err != nil {
return nil, err
}
sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1)
if err != nil {
return nil, err
}
err = chrootarchive.Untar(sum, root, nil)
if err != nil {
return nil, err
}
tsc.sums = sum.GetSums()
return tsc, nil
}
func (c *archiveContext) Root() containerfs.ContainerFS {
return c.root
}
func (c *archiveContext) Remove(path string) error {
_, fullpath, err := normalize(path, c.root)
if err != nil {
return err
}
return c.root.RemoveAll(fullpath)
}
func (c *archiveContext) Hash(path string) (string, error) {
cleanpath, fullpath, err := normalize(path, c.root)
if err != nil {
return "", err
}
rel, err := c.root.Rel(c.root.Path(), fullpath)
if err != nil {
return "", convertPathError(err, cleanpath)
}
// Use the checksum of the followed path(not the possible symlink) because
// this is the file that is actually copied.
if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil {
return tsInfo.Sum(), nil
}
// We set sum to path by default for the case where GetFile returns nil.
// The usual case is if relative path is empty.
return path, nil // backwards compat TODO: see if really needed
}
func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) {
cleanPath = root.Clean(string(root.Separator()) + path)[1:]
fullPath, err = root.ResolveScopedPath(path, true)
if err != nil {
return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath)
}
if _, err := root.Lstat(fullPath); err != nil {
return "", "", errors.WithStack(convertPathError(err, path))
}
return
}

View file

@ -1,183 +0,0 @@
package remotecontext
import (
"bufio"
"fmt"
"io"
"os"
"strings"
"github.com/containerd/continuity/driver"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/docker/docker/builder/dockerignore"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/urlutil"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// ClientSessionRemote is identifier for client-session context transport
const ClientSessionRemote = "client-session"
// Detect returns a context and dockerfile from remote location or local
// archive. progressReader is only used if remoteURL is actually a URL
// (not empty, and not a Git endpoint).
func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) {
remoteURL := config.Options.RemoteContext
dockerfilePath := config.Options.Dockerfile
switch {
case remoteURL == "":
remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath)
case remoteURL == ClientSessionRemote:
res, err := parser.Parse(config.Source)
if err != nil {
return nil, nil, err
}
return nil, res, nil
case urlutil.IsGitURL(remoteURL):
remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)
case urlutil.IsURL(remoteURL):
remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc)
default:
err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL)
}
return
}
func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) {
defer rc.Close()
c, err := FromArchive(rc)
if err != nil {
return nil, nil, err
}
return withDockerfileFromContext(c.(modifiableContext), dockerfilePath)
}
func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) {
df, err := openAt(c, dockerfilePath)
if err != nil {
if os.IsNotExist(err) {
if dockerfilePath == builder.DefaultDockerfileName {
lowercase := strings.ToLower(dockerfilePath)
if _, err := StatAt(c, lowercase); err == nil {
return withDockerfileFromContext(c, lowercase)
}
}
return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error
}
c.Close()
return nil, nil, err
}
res, err := readAndParseDockerfile(dockerfilePath, df)
if err != nil {
return nil, nil, err
}
df.Close()
if err := removeDockerfile(c, dockerfilePath); err != nil {
c.Close()
return nil, nil, err
}
return c, res, nil
}
func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) {
c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource
if err != nil {
return nil, nil, err
}
return withDockerfileFromContext(c.(modifiableContext), dockerfilePath)
}
func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) {
var dockerfile io.ReadCloser
dockerfileFoundErr := errors.New("found-dockerfile")
c, err := MakeRemoteContext(url, map[string]func(io.ReadCloser) (io.ReadCloser, error){
mimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) {
dockerfile = rc
return nil, dockerfileFoundErr
},
// fallback handler (tar context)
"": func(rc io.ReadCloser) (io.ReadCloser, error) {
return progressReader(rc), nil
},
})
switch {
case err == dockerfileFoundErr:
res, err := parser.Parse(dockerfile)
return nil, res, err
case err != nil:
return nil, nil, err
}
return withDockerfileFromContext(c.(modifiableContext), dockerfilePath)
}
func removeDockerfile(c modifiableContext, filesToRemove ...string) error {
f, err := openAt(c, ".dockerignore")
// Note that a missing .dockerignore file isn't treated as an error
switch {
case os.IsNotExist(err):
return nil
case err != nil:
return err
}
excludes, err := dockerignore.ReadAll(f)
if err != nil {
f.Close()
return err
}
f.Close()
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
for _, fileToRemove := range filesToRemove {
if rm, _ := fileutils.Matches(fileToRemove, excludes); rm {
if err := c.Remove(fileToRemove); err != nil {
logrus.Errorf("failed to remove %s: %v", fileToRemove, err)
}
}
}
return nil
}
func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) {
br := bufio.NewReader(rc)
if _, err := br.Peek(1); err != nil {
if err == io.EOF {
return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name)
}
return nil, errors.Wrap(err, "unexpected error reading Dockerfile")
}
return parser.Parse(br)
}
func openAt(remote builder.Source, path string) (driver.File, error) {
fullPath, err := FullPath(remote, path)
if err != nil {
return nil, err
}
return remote.Root().Open(fullPath)
}
// StatAt is a helper for calling Stat on a path from a source
func StatAt(remote builder.Source, path string) (os.FileInfo, error) {
fullPath, err := FullPath(remote, path)
if err != nil {
return nil, err
}
return remote.Root().Stat(fullPath)
}
// FullPath is a helper for getting a full path for a path from a source
func FullPath(remote builder.Source, path string) (string, error) {
fullPath, err := remote.Root().ResolveScopedPath(path, true)
if err != nil {
return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error
}
return fullPath, nil
}

View file

@ -1,75 +0,0 @@
package remotecontext
type notFoundError string
func (e notFoundError) Error() string {
return string(e)
}
func (notFoundError) NotFound() {}
type requestError string
func (e requestError) Error() string {
return string(e)
}
func (e requestError) InvalidParameter() {}
type unauthorizedError string
func (e unauthorizedError) Error() string {
return string(e)
}
func (unauthorizedError) Unauthorized() {}
type forbiddenError string
func (e forbiddenError) Error() string {
return string(e)
}
func (forbiddenError) Forbidden() {}
type dnsError struct {
cause error
}
func (e dnsError) Error() string {
return e.cause.Error()
}
func (e dnsError) NotFound() {}
func (e dnsError) Cause() error {
return e.cause
}
type systemError struct {
cause error
}
func (e systemError) Error() string {
return e.cause.Error()
}
func (e systemError) SystemError() {}
func (e systemError) Cause() error {
return e.cause
}
type unknownError struct {
cause error
}
func (e unknownError) Error() string {
return e.cause.Error()
}
func (unknownError) Unknown() {}
func (e unknownError) Cause() error {
return e.cause
}

View file

@ -1,45 +0,0 @@
package remotecontext
import (
"archive/tar"
"crypto/sha256"
"hash"
"os"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/tarsum"
)
// NewFileHash returns new hash that is used for the builder cache keys
func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) {
var link string
if fi.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(path)
if err != nil {
return nil, err
}
}
hdr, err := archive.FileInfoHeader(name, fi, link)
if err != nil {
return nil, err
}
if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil {
return nil, err
}
tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()}
tsh.Reset() // initialize header
return tsh, nil
}
type tarsumHash struct {
hash.Hash
hdr *tar.Header
}
// Reset resets the Hash to its initial state.
func (tsh *tarsumHash) Reset() {
// comply with hash.Hash and reset to the state hash had before any writes
tsh.Hash.Reset()
tarsum.WriteV1Header(tsh.hdr, tsh.Hash)
}

View file

@ -1,3 +0,0 @@
package remotecontext
//go:generate protoc --gogoslick_out=. tarsum.proto

View file

@ -1,29 +0,0 @@
package remotecontext
import (
"os"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/remotecontext/git"
"github.com/docker/docker/pkg/archive"
)
// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
func MakeGitContext(gitURL string) (builder.Source, error) {
root, err := git.Clone(gitURL)
if err != nil {
return nil, err
}
c, err := archive.Tar(root, archive.Uncompressed)
if err != nil {
return nil, err
}
defer func() {
// TODO: print errors?
c.Close()
os.RemoveAll(root)
}()
return FromArchive(c)
}

View file

@ -1,100 +0,0 @@
package remotecontext
import (
"encoding/hex"
"os"
"strings"
"github.com/docker/docker/builder"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/pools"
"github.com/pkg/errors"
)
// NewLazySource creates a new LazyContext. LazyContext defines a hashed build
// context based on a root directory. Individual files are hashed first time
// they are asked. It is not safe to call methods of LazyContext concurrently.
func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) {
return &lazySource{
root: root,
sums: make(map[string]string),
}, nil
}
type lazySource struct {
root containerfs.ContainerFS
sums map[string]string
}
func (c *lazySource) Root() containerfs.ContainerFS {
return c.root
}
func (c *lazySource) Close() error {
return nil
}
func (c *lazySource) Hash(path string) (string, error) {
cleanPath, fullPath, err := normalize(path, c.root)
if err != nil {
return "", err
}
fi, err := c.root.Lstat(fullPath)
if err != nil {
return "", errors.WithStack(err)
}
relPath, err := Rel(c.root, fullPath)
if err != nil {
return "", errors.WithStack(convertPathError(err, cleanPath))
}
sum, ok := c.sums[relPath]
if !ok {
sum, err = c.prepareHash(relPath, fi)
if err != nil {
return "", err
}
}
return sum, nil
}
func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) {
p := c.root.Join(c.root.Path(), relPath)
h, err := NewFileHash(p, relPath, fi)
if err != nil {
return "", errors.Wrapf(err, "failed to create hash for %s", relPath)
}
if fi.Mode().IsRegular() && fi.Size() > 0 {
f, err := c.root.Open(p)
if err != nil {
return "", errors.Wrapf(err, "failed to open %s", relPath)
}
defer f.Close()
if _, err := pools.Copy(h, f); err != nil {
return "", errors.Wrapf(err, "failed to copy file data for %s", relPath)
}
}
sum := hex.EncodeToString(h.Sum(nil))
c.sums[relPath] = sum
return sum, nil
}
// Rel makes a path relative to base path. Same as `filepath.Rel` but can also
// handle UUID paths in windows.
func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) {
// filepath.Rel can't handle UUID paths in windows
if basepath.OS() == "windows" {
pfx := basepath.Path() + `\`
if strings.HasPrefix(targpath, pfx) {
p := strings.TrimPrefix(targpath, pfx)
if p == "" {
p = "."
}
return p, nil
}
}
return basepath.Rel(basepath.Path(), targpath)
}

View file

@ -1,27 +0,0 @@
package remotecontext
import (
"mime"
"net/http"
)
// mimeTypes stores the MIME content type.
var mimeTypes = struct {
TextPlain string
OctetStream string
}{"text/plain", "application/octet-stream"}
// detectContentType returns a best guess representation of the MIME
// content type for the bytes at c. The value detected by
// http.DetectContentType is guaranteed not be nil, defaulting to
// application/octet-stream when a better guess cannot be made. The
// result of this detection is then run through mime.ParseMediaType()
// which separates the actual MIME string from any parameters.
func detectContentType(c []byte) (string, map[string]string, error) {
ct := http.DetectContentType(c)
contentType, args, err := mime.ParseMediaType(ct)
if err != nil {
return "", nil, err
}
return contentType, args, nil
}

View file

@ -1,153 +0,0 @@
package remotecontext
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"github.com/docker/docker/builder"
"github.com/pkg/errors"
)
// When downloading remote contexts, limit the amount (in bytes)
// to be read from the response body in order to detect its Content-Type
const maxPreambleLength = 100
const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))`
var mimeRe = regexp.MustCompile(acceptableRemoteMIME)
// MakeRemoteContext downloads a context from remoteURL and returns it.
//
// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of
// maxPreambleLength bytes from the body to help detecting the MIME type.
// Look at acceptableRemoteMIME for more details.
//
// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected
// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not).
// In either case, an (assumed) tar stream is passed to FromArchive whose result is returned.
func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (builder.Source, error) {
f, err := GetWithStatusError(remoteURL)
if err != nil {
return nil, fmt.Errorf("error downloading remote context %s: %v", remoteURL, err)
}
defer f.Body.Close()
var contextReader io.ReadCloser
if contentTypeHandlers != nil {
contentType := f.Header.Get("Content-Type")
clen := f.ContentLength
contentType, contextReader, err = inspectResponse(contentType, f.Body, clen)
if err != nil {
return nil, fmt.Errorf("error detecting content type for remote %s: %v", remoteURL, err)
}
defer contextReader.Close()
// This loop tries to find a content-type handler for the detected content-type.
// If it could not find one from the caller-supplied map, it tries the empty content-type `""`
// which is interpreted as a fallback handler (usually used for raw tar contexts).
for _, ct := range []string{contentType, ""} {
if fn, ok := contentTypeHandlers[ct]; ok {
defer contextReader.Close()
if contextReader, err = fn(contextReader); err != nil {
return nil, err
}
break
}
}
}
// Pass through - this is a pre-packaged context, presumably
// with a Dockerfile with the right name inside it.
return FromArchive(contextReader)
}
// GetWithStatusError does an http.Get() and returns an error if the
// status code is 4xx or 5xx.
func GetWithStatusError(address string) (resp *http.Response, err error) {
if resp, err = http.Get(address); err != nil {
if uerr, ok := err.(*url.Error); ok {
if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout {
return nil, dnsError{err}
}
}
return nil, systemError{err}
}
if resp.StatusCode < 400 {
return resp, nil
}
msg := fmt.Sprintf("failed to GET %s with status %s", address, resp.Status)
body, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
return nil, errors.Wrap(systemError{err}, msg+": error reading body")
}
msg += ": " + string(bytes.TrimSpace(body))
switch resp.StatusCode {
case http.StatusNotFound:
return nil, notFoundError(msg)
case http.StatusBadRequest:
return nil, requestError(msg)
case http.StatusUnauthorized:
return nil, unauthorizedError(msg)
case http.StatusForbidden:
return nil, forbiddenError(msg)
}
return nil, unknownError{errors.New(msg)}
}
// inspectResponse looks into the http response data at r to determine whether its
// content-type is on the list of acceptable content types for remote build contexts.
// This function returns:
// - a string representation of the detected content-type
// - an io.Reader for the response body
// - an error value which will be non-nil either when something goes wrong while
// reading bytes from r or when the detected content-type is not acceptable.
func inspectResponse(ct string, r io.Reader, clen int64) (string, io.ReadCloser, error) {
plen := clen
if plen <= 0 || plen > maxPreambleLength {
plen = maxPreambleLength
}
preamble := make([]byte, plen)
rlen, err := r.Read(preamble)
if rlen == 0 {
return ct, ioutil.NopCloser(r), errors.New("empty response")
}
if err != nil && err != io.EOF {
return ct, ioutil.NopCloser(r), err
}
preambleR := bytes.NewReader(preamble[:rlen])
bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r))
// Some web servers will use application/octet-stream as the default
// content type for files without an extension (e.g. 'Dockerfile')
// so if we receive this value we better check for text content
contentType := ct
if len(ct) == 0 || ct == mimeTypes.OctetStream {
contentType, _, err = detectContentType(preamble)
if err != nil {
return contentType, bodyReader, err
}
}
contentType = selectAcceptableMIME(contentType)
var cterr error
if len(contentType) == 0 {
cterr = fmt.Errorf("unsupported Content-Type %q", ct)
contentType = ct
}
return contentType, bodyReader, cterr
}
func selectAcceptableMIME(ct string) string {
return mimeRe.FindString(ct)
}

View file

@ -1,157 +0,0 @@
package remotecontext
import (
"os"
"sync"
"github.com/docker/docker/pkg/containerfs"
iradix "github.com/hashicorp/go-immutable-radix"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
)
type hashed interface {
Digest() digest.Digest
}
// CachableSource is a source that contains cache records for its contents
type CachableSource struct {
mu sync.Mutex
root containerfs.ContainerFS
tree *iradix.Tree
txn *iradix.Txn
}
// NewCachableSource creates new CachableSource
func NewCachableSource(root string) *CachableSource {
ts := &CachableSource{
tree: iradix.New(),
root: containerfs.NewLocalContainerFS(root),
}
return ts
}
// MarshalBinary marshals current cache information to a byte array
func (cs *CachableSource) MarshalBinary() ([]byte, error) {
b := TarsumBackup{Hashes: make(map[string]string)}
root := cs.getRoot()
root.Walk(func(k []byte, v interface{}) bool {
b.Hashes[string(k)] = v.(*fileInfo).sum
return false
})
return b.Marshal()
}
// UnmarshalBinary decodes cache information for presented byte array
func (cs *CachableSource) UnmarshalBinary(data []byte) error {
var b TarsumBackup
if err := b.Unmarshal(data); err != nil {
return err
}
txn := iradix.New().Txn()
for p, v := range b.Hashes {
txn.Insert([]byte(p), &fileInfo{sum: v})
}
cs.mu.Lock()
defer cs.mu.Unlock()
cs.tree = txn.Commit()
return nil
}
// Scan rescans the cache information from the file system
func (cs *CachableSource) Scan() error {
lc, err := NewLazySource(cs.root)
if err != nil {
return err
}
txn := iradix.New().Txn()
err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return errors.Wrapf(err, "failed to walk %s", path)
}
rel, err := Rel(cs.root, path)
if err != nil {
return err
}
h, err := lc.Hash(rel)
if err != nil {
return err
}
txn.Insert([]byte(rel), &fileInfo{sum: h})
return nil
})
if err != nil {
return err
}
cs.mu.Lock()
defer cs.mu.Unlock()
cs.tree = txn.Commit()
return nil
}
// HandleChange notifies the source about a modification operation
func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
cs.mu.Lock()
if cs.txn == nil {
cs.txn = cs.tree.Txn()
}
if kind == fsutil.ChangeKindDelete {
cs.txn.Delete([]byte(p))
cs.mu.Unlock()
return
}
h, ok := fi.(hashed)
if !ok {
cs.mu.Unlock()
return errors.Errorf("invalid fileinfo: %s", p)
}
hfi := &fileInfo{
sum: h.Digest().Hex(),
}
cs.txn.Insert([]byte(p), hfi)
cs.mu.Unlock()
return nil
}
func (cs *CachableSource) getRoot() *iradix.Node {
cs.mu.Lock()
if cs.txn != nil {
cs.tree = cs.txn.Commit()
cs.txn = nil
}
t := cs.tree
cs.mu.Unlock()
return t.Root()
}
// Close closes the source
func (cs *CachableSource) Close() error {
return nil
}
// Hash returns a hash for a single file in the source
func (cs *CachableSource) Hash(path string) (string, error) {
n := cs.getRoot()
// TODO: check this for symlinks
v, ok := n.Get([]byte(path))
if !ok {
return path, nil
}
return v.(*fileInfo).sum, nil
}
// Root returns a root directory for the source
func (cs *CachableSource) Root() containerfs.ContainerFS {
return cs.root
}
type fileInfo struct {
sum string
}
func (fi *fileInfo) Hash() string {
return fi.sum
}

View file

@ -1,525 +0,0 @@
// Code generated by protoc-gen-gogo.
// source: tarsum.proto
// DO NOT EDIT!
/*
Package remotecontext is a generated protocol buffer package.
It is generated from these files:
tarsum.proto
It has these top-level messages:
TarsumBackup
*/
package remotecontext
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import strings "strings"
import reflect "reflect"
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type TarsumBackup struct {
Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *TarsumBackup) Reset() { *m = TarsumBackup{} }
func (*TarsumBackup) ProtoMessage() {}
func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} }
func (m *TarsumBackup) GetHashes() map[string]string {
if m != nil {
return m.Hashes
}
return nil
}
func init() {
proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup")
}
func (this *TarsumBackup) Equal(that interface{}) bool {
if that == nil {
if this == nil {
return true
}
return false
}
that1, ok := that.(*TarsumBackup)
if !ok {
that2, ok := that.(TarsumBackup)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
if this == nil {
return true
}
return false
} else if this == nil {
return false
}
if len(this.Hashes) != len(that1.Hashes) {
return false
}
for i := range this.Hashes {
if this.Hashes[i] != that1.Hashes[i] {
return false
}
}
return true
}
func (this *TarsumBackup) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&remotecontext.TarsumBackup{")
keysForHashes := make([]string, 0, len(this.Hashes))
for k := range this.Hashes {
keysForHashes = append(keysForHashes, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
mapStringForHashes := "map[string]string{"
for _, k := range keysForHashes {
mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k])
}
mapStringForHashes += "}"
if this.Hashes != nil {
s = append(s, "Hashes: "+mapStringForHashes+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringTarsum(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *TarsumBackup) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Hashes) > 0 {
for k := range m.Hashes {
dAtA[i] = 0xa
i++
v := m.Hashes[k]
mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v)))
i = encodeVarintTarsum(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintTarsum(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintTarsum(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
return i, nil
}
func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *TarsumBackup) Size() (n int) {
var l int
_ = l
if len(m.Hashes) > 0 {
for k, v := range m.Hashes {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v)))
n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize))
}
}
return n
}
func sovTarsum(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozTarsum(x uint64) (n int) {
return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *TarsumBackup) String() string {
if this == nil {
return "nil"
}
keysForHashes := make([]string, 0, len(this.Hashes))
for k := range this.Hashes {
keysForHashes = append(keysForHashes, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
mapStringForHashes := "map[string]string{"
for _, k := range keysForHashes {
mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k])
}
mapStringForHashes += "}"
s := strings.Join([]string{`&TarsumBackup{`,
`Hashes:` + mapStringForHashes + `,`,
`}`,
}, "")
return s
}
func valueToStringTarsum(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *TarsumBackup) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTarsum
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
var keykey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
keykey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthTarsum
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
if m.Hashes == nil {
m.Hashes = make(map[string]string)
}
if iNdEx < postIndex {
var valuekey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
valuekey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthTarsum
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
m.Hashes[mapkey] = mapvalue
} else {
var mapvalue string
m.Hashes[mapkey] = mapvalue
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTarsum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTarsum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTarsum(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTarsum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTarsum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTarsum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthTarsum
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTarsum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipTarsum(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) }
var fileDescriptorTarsum = []byte{
// 196 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a,
0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49,
0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b,
0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b,
0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51,
0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24,
0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89,
0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b,
0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1,
0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7,
0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c,
0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f,
0xe0, 0x00, 0x00, 0x00,
}