1
0
Fork 0

Update traefik dependencies (docker/docker and related) (#1823)

Update traefik dependencies (docker/docker and related)

- Update dependencies
- Fix compilation problems
- Remove vdemeester/docker-events (in docker api now)
- Remove `integration/vendor`
- Use `testImport`
- update some deps.
- regenerate the lock from scratch (after a `glide cc`)
This commit is contained in:
Vincent Demeester 2017-07-06 16:28:13 +02:00 committed by Ludovic Fernandez
parent 7d178f49b4
commit b7daa2f3a4
1301 changed files with 21476 additions and 150099 deletions

View file

@ -0,0 +1,41 @@
package auth
import (
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/docker/api/types"
"github.com/docker/docker/registry"
)
// Lookup defines a method for looking up authentication information
type Lookup interface {
All() map[string]types.AuthConfig
Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig
}
// ConfigLookup implements AuthLookup by reading a Docker config file
type ConfigLookup struct {
*configfile.ConfigFile
}
// NewConfigLookup creates a new ConfigLookup for a given context
func NewConfigLookup(configfile *configfile.ConfigFile) *ConfigLookup {
return &ConfigLookup{
ConfigFile: configfile,
}
}
// Lookup uses a Docker config file to lookup authentication information
func (c *ConfigLookup) Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig {
if c.ConfigFile == nil || repoInfo == nil || repoInfo.Index == nil {
return types.AuthConfig{}
}
return registry.ResolveAuthConfig(c.ConfigFile.AuthConfigs, repoInfo.Index)
}
// All uses a Docker config file to get all authentication information
func (c *ConfigLookup) All() map[string]types.AuthConfig {
if c.ConfigFile == nil {
return map[string]types.AuthConfig{}
}
return c.ConfigFile.AuthConfigs
}

View file

@ -0,0 +1,207 @@
package builder
import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/cli/cli/command/image/build"
"github.com/docker/docker/api/types"
"github.com/docker/docker/builder/dockerignore"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/term"
"github.com/docker/libcompose/logger"
"golang.org/x/net/context"
)
// DefaultDockerfileName is the default name of a Dockerfile
const DefaultDockerfileName = "Dockerfile"
// Builder defines methods to provide a docker builder. This makes libcompose
// not tied up to the docker daemon builder.
type Builder interface {
Build(imageName string) error
}
// DaemonBuilder is the daemon "docker build" Builder implementation.
type DaemonBuilder struct {
Client client.ImageAPIClient
ContextDirectory string
Dockerfile string
AuthConfigs map[string]types.AuthConfig
NoCache bool
ForceRemove bool
Pull bool
BuildArgs map[string]*string
LoggerFactory logger.Factory
}
// Build implements Builder. It consumes the docker build API endpoint and sends
// a tar of the specified service build context.
func (d *DaemonBuilder) Build(ctx context.Context, imageName string) error {
buildCtx, err := CreateTar(d.ContextDirectory, d.Dockerfile)
if err != nil {
return err
}
defer buildCtx.Close()
if d.LoggerFactory == nil {
d.LoggerFactory = &logger.NullLogger{}
}
l := d.LoggerFactory.CreateBuildLogger(imageName)
progBuff := &logger.Wrapper{
Err: false,
Logger: l,
}
buildBuff := &logger.Wrapper{
Err: false,
Logger: l,
}
errBuff := &logger.Wrapper{
Err: true,
Logger: l,
}
// Setup an upload progress bar
progressOutput := streamformatter.NewProgressOutput(progBuff)
var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
logrus.Infof("Building %s...", imageName)
outFd, isTerminalOut := term.GetFdInfo(os.Stdout)
w := l.OutWriter()
if w != nil {
outFd, isTerminalOut = term.GetFdInfo(w)
}
response, err := d.Client.ImageBuild(ctx, body, types.ImageBuildOptions{
Tags: []string{imageName},
NoCache: d.NoCache,
Remove: true,
ForceRemove: d.ForceRemove,
PullParent: d.Pull,
Dockerfile: d.Dockerfile,
AuthConfigs: d.AuthConfigs,
BuildArgs: d.BuildArgs,
})
if err != nil {
return err
}
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, outFd, isTerminalOut, nil)
if err != nil {
if jerr, ok := err.(*jsonmessage.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
errBuff.Write([]byte(jerr.Error()))
return fmt.Errorf("Status: %s, Code: %d", jerr.Message, jerr.Code)
}
}
return err
}
// CreateTar create a build context tar for the specified project and service name.
func CreateTar(contextDirectory, dockerfile string) (io.ReadCloser, error) {
// This code was ripped off from docker/api/client/build.go
dockerfileName := filepath.Join(contextDirectory, dockerfile)
absContextDirectory, err := filepath.Abs(contextDirectory)
if err != nil {
return nil, err
}
filename := dockerfileName
if dockerfile == "" {
// No -f/--file was specified so use the default
dockerfileName = DefaultDockerfileName
filename = filepath.Join(absContextDirectory, dockerfileName)
// Just to be nice ;-) look for 'dockerfile' too but only
// use it if we found it, otherwise ignore this check
if _, err = os.Lstat(filename); os.IsNotExist(err) {
tmpFN := path.Join(absContextDirectory, strings.ToLower(dockerfileName))
if _, err = os.Lstat(tmpFN); err == nil {
dockerfileName = strings.ToLower(dockerfileName)
filename = tmpFN
}
}
}
origDockerfile := dockerfileName // used for error msg
if filename, err = filepath.Abs(filename); err != nil {
return nil, err
}
// Now reset the dockerfileName to be relative to the build context
dockerfileName, err = filepath.Rel(absContextDirectory, filename)
if err != nil {
return nil, err
}
// And canonicalize dockerfile name to a platform-independent one
dockerfileName, err = archive.CanonicalTarNameForPath(dockerfileName)
if err != nil {
return nil, fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", dockerfileName, err)
}
if _, err = os.Lstat(filename); os.IsNotExist(err) {
return nil, fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
}
var includes = []string{"."}
var excludes []string
dockerIgnorePath := path.Join(contextDirectory, ".dockerignore")
dockerIgnore, err := os.Open(dockerIgnorePath)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
logrus.Warnf("Error while reading .dockerignore (%s) : %s", dockerIgnorePath, err.Error())
excludes = make([]string, 0)
} else {
excludes, err = dockerignore.ReadAll(dockerIgnore)
if err != nil {
return nil, err
}
}
// If .dockerignore mentions .dockerignore or the Dockerfile
// then make sure we send both files over to the daemon
// because Dockerfile is, obviously, needed no matter what, and
// .dockerignore is needed to know if either one needs to be
// removed. The deamon will remove them for us, if needed, after it
// parses the Dockerfile.
keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
keepThem2, _ := fileutils.Matches(dockerfileName, excludes)
if keepThem1 || keepThem2 {
includes = append(includes, ".dockerignore", dockerfileName)
}
if err := build.ValidateContextDirectory(contextDirectory, excludes); err != nil {
return nil, fmt.Errorf("error checking context is accessible: '%s', please check permissions and try again", err)
}
options := &archive.TarOptions{
Compression: archive.Uncompressed,
ExcludePatterns: excludes,
IncludeFiles: includes,
}
return archive.TarWithOptions(contextDirectory, options)
}

View file

@ -0,0 +1,115 @@
package client
import (
"fmt"
"net/http"
"os"
"path/filepath"
"runtime"
cliconfig "github.com/docker/cli/cli/config"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/docker/libcompose/version"
)
const (
// DefaultAPIVersion is the default docker API version set by libcompose
DefaultAPIVersion = "v1.20"
defaultTrustKeyFile = "key.json"
defaultCaFile = "ca.pem"
defaultKeyFile = "key.pem"
defaultCertFile = "cert.pem"
)
var (
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
)
func init() {
if dockerCertPath == "" {
dockerCertPath = cliconfig.Dir()
}
}
// Options holds docker client options (host, tls, ..)
type Options struct {
TLS bool
TLSVerify bool
TLSOptions tlsconfig.Options
TrustKey string
Host string
APIVersion string
}
// Create creates a docker client based on the specified options.
func Create(c Options) (client.APIClient, error) {
if c.Host == "" {
if os.Getenv("DOCKER_API_VERSION") == "" {
os.Setenv("DOCKER_API_VERSION", DefaultAPIVersion)
}
client, err := client.NewEnvClient()
if err != nil {
return nil, err
}
return client, nil
}
apiVersion := c.APIVersion
if apiVersion == "" {
apiVersion = DefaultAPIVersion
}
if c.TLSOptions.CAFile == "" {
c.TLSOptions.CAFile = filepath.Join(dockerCertPath, defaultCaFile)
}
if c.TLSOptions.CertFile == "" {
c.TLSOptions.CertFile = filepath.Join(dockerCertPath, defaultCertFile)
}
if c.TLSOptions.KeyFile == "" {
c.TLSOptions.KeyFile = filepath.Join(dockerCertPath, defaultKeyFile)
}
if c.TrustKey == "" {
c.TrustKey = filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile)
}
if c.TLSVerify {
c.TLS = true
}
if c.TLS {
c.TLSOptions.InsecureSkipVerify = !c.TLSVerify
}
var httpClient *http.Client
if c.TLS {
config, err := tlsconfig.Client(c.TLSOptions)
if err != nil {
return nil, err
}
tr := &http.Transport{
TLSClientConfig: config,
}
proto, addr, _, err := client.ParseHost(c.Host)
if err != nil {
return nil, err
}
if err := sockets.ConfigureTransport(tr, proto, addr); err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: tr,
}
}
customHeaders := map[string]string{}
customHeaders["User-Agent"] = fmt.Sprintf("Libcompose-Client/%s (%s)", version.VERSION, runtime.GOOS)
client, err := client.NewClient(c.Host, apiVersion, httpClient, customHeaders)
if err != nil {
return nil, err
}
return client, nil
}

View file

@ -0,0 +1,35 @@
package client
import (
"github.com/docker/docker/client"
"github.com/docker/libcompose/project"
)
// Factory is a factory to create docker clients.
type Factory interface {
// Create constructs a Docker client for the given service. The passed in
// config may be nil in which case a generic client for the project should
// be returned.
Create(service project.Service) client.APIClient
}
type defaultFactory struct {
client client.APIClient
}
// NewDefaultFactory creates and returns the default client factory that uses
// github.com/docker/docker client.
func NewDefaultFactory(opts Options) (Factory, error) {
client, err := Create(opts)
if err != nil {
return nil, err
}
return &defaultFactory{
client: client,
}, nil
}
func (s *defaultFactory) Create(service project.Service) client.APIClient {
return s.client
}

View file

@ -0,0 +1,397 @@
package container
import (
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
"time"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/term"
"github.com/docker/go-connections/nat"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/labels"
"github.com/docker/libcompose/logger"
"github.com/docker/libcompose/project"
)
// Container holds information about a docker container and the service it is tied on.
type Container struct {
client client.ContainerAPIClient
id string
container *types.ContainerJSON
}
// Create creates a container and return a Container struct (and an error if any)
func Create(ctx context.Context, client client.ContainerAPIClient, name string, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig) (*Container, error) {
container, err := client.ContainerCreate(ctx, config, hostConfig, networkingConfig, name)
if err != nil {
return nil, err
}
return New(ctx, client, container.ID)
}
// New creates a container struct with the specified client, id and name
func New(ctx context.Context, client client.ContainerAPIClient, id string) (*Container, error) {
container, err := Get(ctx, client, id)
if err != nil {
return nil, err
}
return &Container{
client: client,
id: id,
container: container,
}, nil
}
// NewInspected creates a container struct from an inspected container
func NewInspected(client client.ContainerAPIClient, container *types.ContainerJSON) *Container {
return &Container{
client: client,
id: container.ID,
container: container,
}
}
// Info returns info about the container, like name, command, state or ports.
func (c *Container) Info(ctx context.Context) (project.Info, error) {
infos, err := ListByFilter(ctx, c.client, map[string][]string{
"name": {c.container.Name},
})
if err != nil || len(infos) == 0 {
return nil, err
}
info := infos[0]
result := project.Info{}
result["Id"] = c.container.ID
result["Name"] = name(info.Names)
result["Command"] = info.Command
result["State"] = info.Status
result["Ports"] = portString(info.Ports)
return result, nil
}
func portString(ports []types.Port) string {
result := []string{}
for _, port := range ports {
if port.PublicPort > 0 {
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
} else {
result = append(result, fmt.Sprintf("%d/%s", port.PrivatePort, port.Type))
}
}
return strings.Join(result, ", ")
}
func name(names []string) string {
max := math.MaxInt32
var current string
for _, v := range names {
if len(v) < max {
max = len(v)
current = v
}
}
return current[1:]
}
// Rename rename the container.
func (c *Container) Rename(ctx context.Context, newName string) error {
return c.client.ContainerRename(ctx, c.container.ID, newName)
}
// Remove removes the container.
func (c *Container) Remove(ctx context.Context, removeVolume bool) error {
return c.client.ContainerRemove(ctx, c.container.ID, types.ContainerRemoveOptions{
Force: true,
RemoveVolumes: removeVolume,
})
}
// Stop stops the container.
func (c *Container) Stop(ctx context.Context, timeout int) error {
timeoutDuration := time.Duration(timeout) * time.Second
return c.client.ContainerStop(ctx, c.container.ID, &timeoutDuration)
}
// Pause pauses the container. If the containers are already paused, don't fail.
func (c *Container) Pause(ctx context.Context) error {
if !c.container.State.Paused {
if err := c.client.ContainerPause(ctx, c.container.ID); err != nil {
return err
}
return c.updateInnerContainer(ctx)
}
return nil
}
// Unpause unpauses the container. If the containers are not paused, don't fail.
func (c *Container) Unpause(ctx context.Context) error {
if c.container.State.Paused {
if err := c.client.ContainerUnpause(ctx, c.container.ID); err != nil {
return err
}
return c.updateInnerContainer(ctx)
}
return nil
}
func (c *Container) updateInnerContainer(ctx context.Context) error {
container, err := Get(ctx, c.client, c.container.ID)
if err != nil {
return err
}
c.container = container
return nil
}
// Kill kill the container.
func (c *Container) Kill(ctx context.Context, signal string) error {
return c.client.ContainerKill(ctx, c.container.ID, signal)
}
// IsRunning returns the running state of the container.
func (c *Container) IsRunning(ctx context.Context) bool {
return c.container.State.Running
}
// Run creates, start and attach to the container based on the image name,
// the specified configuration.
// It will always create a new container.
func (c *Container) Run(ctx context.Context, configOverride *config.ServiceConfig) (int, error) {
var (
errCh chan error
out, stderr io.Writer
in io.ReadCloser
)
if configOverride.StdinOpen {
in = os.Stdin
}
if configOverride.Tty {
out = os.Stdout
stderr = os.Stderr
}
options := types.ContainerAttachOptions{
Stream: true,
Stdin: configOverride.StdinOpen,
Stdout: configOverride.Tty,
Stderr: configOverride.Tty,
}
resp, err := c.client.ContainerAttach(ctx, c.container.ID, options)
if err != nil {
return -1, err
}
// set raw terminal
inFd, _ := term.GetFdInfo(in)
state, err := term.SetRawTerminal(inFd)
if err != nil {
return -1, err
}
// restore raw terminal
defer term.RestoreTerminal(inFd, state)
// holdHijackedConnection (in goroutine)
errCh = promise.Go(func() error {
return holdHijackedConnection(configOverride.Tty, in, out, stderr, resp)
})
if err := c.client.ContainerStart(ctx, c.container.ID, types.ContainerStartOptions{}); err != nil {
return -1, err
}
if configOverride.Tty {
ws, err := term.GetWinsize(inFd)
if err != nil {
return -1, err
}
resizeOpts := types.ResizeOptions{
Height: uint(ws.Height),
Width: uint(ws.Width),
}
if err := c.client.ContainerResize(ctx, c.container.ID, resizeOpts); err != nil {
return -1, err
}
}
if err := <-errCh; err != nil {
logrus.Debugf("Error hijack: %s", err)
return -1, err
}
exitedContainer, err := c.client.ContainerInspect(ctx, c.container.ID)
if err != nil {
return -1, err
}
return exitedContainer.State.ExitCode, nil
}
func holdHijackedConnection(tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error {
var err error
receiveStdout := make(chan error, 1)
if outputStream != nil || errorStream != nil {
go func() {
// When TTY is ON, use regular copy
if tty && outputStream != nil {
_, err = io.Copy(outputStream, resp.Reader)
} else {
_, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader)
}
logrus.Debugf("[hijack] End of stdout")
receiveStdout <- err
}()
}
stdinDone := make(chan struct{})
go func() {
if inputStream != nil {
io.Copy(resp.Conn, inputStream)
logrus.Debugf("[hijack] End of stdin")
}
if err := resp.CloseWrite(); err != nil {
logrus.Debugf("Couldn't send EOF: %s", err)
}
close(stdinDone)
}()
select {
case err := <-receiveStdout:
if err != nil {
logrus.Debugf("Error receiveStdout: %s", err)
return err
}
case <-stdinDone:
if outputStream != nil || errorStream != nil {
if err := <-receiveStdout; err != nil {
logrus.Debugf("Error receiveStdout: %s", err)
return err
}
}
}
return nil
}
// Start the specified container with the specified host config
func (c *Container) Start(ctx context.Context) error {
logrus.WithFields(logrus.Fields{"container.ID": c.container.ID, "container.Name": c.container.Name}).Debug("Starting container")
if err := c.client.ContainerStart(ctx, c.container.ID, types.ContainerStartOptions{}); err != nil {
logrus.WithFields(logrus.Fields{"container.ID": c.container.ID, "container.Name": c.container.Name}).Debug("Failed to start container")
return err
}
return nil
}
// Restart restarts the container if existing, does nothing otherwise.
func (c *Container) Restart(ctx context.Context, timeout int) error {
timeoutDuration := time.Duration(timeout) * time.Second
return c.client.ContainerRestart(ctx, c.container.ID, &timeoutDuration)
}
// Log forwards container logs to the project configured logger.
func (c *Container) Log(ctx context.Context, l logger.Logger, follow bool) error {
info, err := c.client.ContainerInspect(ctx, c.container.ID)
if err != nil {
return err
}
options := types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: follow,
Tail: "all",
}
responseBody, err := c.client.ContainerLogs(ctx, c.container.ID, options)
if err != nil {
return err
}
defer responseBody.Close()
if info.Config.Tty {
_, err = io.Copy(&logger.Wrapper{Logger: l}, responseBody)
} else {
_, err = stdcopy.StdCopy(&logger.Wrapper{Logger: l}, &logger.Wrapper{Logger: l, Err: true}, responseBody)
}
logrus.WithFields(logrus.Fields{"Logger": l, "err": err}).Debug("c.client.Logs() returned error")
return err
}
// Port returns the host port the specified port is mapped on.
func (c *Container) Port(ctx context.Context, port string) (string, error) {
if bindings, ok := c.container.NetworkSettings.Ports[nat.Port(port)]; ok {
result := []string{}
for _, binding := range bindings {
result = append(result, binding.HostIP+":"+binding.HostPort)
}
return strings.Join(result, "\n"), nil
}
return "", nil
}
// Networks returns the containers network
func (c *Container) Networks() (map[string]*network.EndpointSettings, error) {
return c.container.NetworkSettings.Networks, nil
}
// ID returns the container Id.
func (c *Container) ID() string {
return c.container.ID
}
// ShortID return the container Id in its short form
func (c *Container) ShortID() string {
return c.container.ID[:12]
}
// Name returns the container name.
func (c *Container) Name() string {
return c.container.Name
}
// Image returns the container image. Depending on the engine version its either
// the complete id or the digest reference the image.
func (c *Container) Image() string {
return c.container.Image
}
// ImageConfig returns the container image stored in the config. It's the
// human-readable name of the image.
func (c *Container) ImageConfig() string {
return c.container.Config.Image
}
// Hash returns the container hash stored as label.
func (c *Container) Hash() string {
return c.container.Config.Labels[labels.HASH.Str()]
}
// Number returns the container number stored as label.
func (c *Container) Number() (int, error) {
numberStr := c.container.Config.Labels[labels.NUMBER.Str()]
return strconv.Atoi(numberStr)
}

View file

@ -0,0 +1,41 @@
package container
import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"golang.org/x/net/context"
)
// ListByFilter looks up the hosts containers with the specified filters and
// returns a list of container matching it, or an error.
func ListByFilter(ctx context.Context, clientInstance client.ContainerAPIClient, containerFilters ...map[string][]string) ([]types.Container, error) {
filterArgs := filters.NewArgs()
// FIXME(vdemeester) I don't like 3 for loops >_<
for _, filter := range containerFilters {
for key, filterValue := range filter {
for _, value := range filterValue {
filterArgs.Add(key, value)
}
}
}
return clientInstance.ContainerList(ctx, types.ContainerListOptions{
All: true,
Filters: filterArgs,
})
}
// Get looks up the hosts containers with the specified ID
// or name and returns it, or an error.
func Get(ctx context.Context, clientInstance client.ContainerAPIClient, id string) (*types.ContainerJSON, error) {
container, err := clientInstance.ContainerInspect(ctx, id)
if err != nil {
if client.IsErrContainerNotFound(err) {
return nil, nil
}
return nil, err
}
return &container, nil
}

View file

@ -0,0 +1,35 @@
package ctx
import (
cliconfig "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/libcompose/docker/auth"
"github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
)
// Context holds context meta information about a libcompose project and docker
// client information (like configuration file, builder to use, …)
type Context struct {
project.Context
ClientFactory client.Factory
ConfigDir string
ConfigFile *configfile.ConfigFile
AuthLookup auth.Lookup
}
// LookupConfig tries to load the docker configuration files, if any.
func (c *Context) LookupConfig() error {
if c.ConfigFile != nil {
return nil
}
config, err := cliconfig.Load(c.ConfigDir)
if err != nil {
return err
}
c.ConfigFile = config
return nil
}

View file

@ -0,0 +1,104 @@
package image
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/registry"
"github.com/docker/libcompose/docker/auth"
"golang.org/x/net/context"
)
// Exists return whether or not the service image already exists
func Exists(ctx context.Context, clt client.ImageAPIClient, image string) (bool, error) {
_, err := InspectImage(ctx, clt, image)
if err != nil {
if client.IsErrImageNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
}
// InspectImage inspect the specified image (can be a name, an id or a digest)
// with the specified client.
func InspectImage(ctx context.Context, client client.ImageAPIClient, image string) (types.ImageInspect, error) {
imageInspect, _, err := client.ImageInspectWithRaw(ctx, image)
return imageInspect, err
}
// RemoveImage removes the specified image (can be a name, an id or a digest)
// from the daemon store with the specified client.
func RemoveImage(ctx context.Context, client client.ImageAPIClient, image string) error {
_, err := client.ImageRemove(ctx, image, types.ImageRemoveOptions{})
return err
}
// PullImage pulls the specified image (can be a name, an id or a digest)
// to the daemon store with the specified client.
func PullImage(ctx context.Context, client client.ImageAPIClient, serviceName string, authLookup auth.Lookup, image string) error {
fmt.Fprintf(os.Stderr, "Pulling %s (%s)...\n", serviceName, image)
distributionRef, err := reference.ParseNormalizedNamed(image)
if err != nil {
return err
}
repoInfo, err := registry.ParseRepositoryInfo(distributionRef)
if err != nil {
return err
}
authConfig := authLookup.Lookup(repoInfo)
// Use ConfigFile.SaveToWriter to not re-define encodeAuthToBase64
encodedAuth, err := encodeAuthToBase64(authConfig)
if err != nil {
return err
}
options := types.ImagePullOptions{
RegistryAuth: encodedAuth,
}
responseBody, err := client.ImagePull(ctx, distributionRef.String(), options)
if err != nil {
logrus.Errorf("Failed to pull image %s: %v", image, err)
return err
}
defer responseBody.Close()
var writeBuff io.Writer = os.Stderr
outFd, isTerminalOut := term.GetFdInfo(os.Stderr)
err = jsonmessage.DisplayJSONMessagesStream(responseBody, writeBuff, outFd, isTerminalOut, nil)
if err != nil {
if jerr, ok := err.(*jsonmessage.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
fmt.Fprintf(os.Stderr, "%s", writeBuff)
return fmt.Errorf("Status: %s, Code: %d", jerr.Message, jerr.Code)
}
}
return err
}
// encodeAuthToBase64 serializes the auth configuration as JSON base64 payload
func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(buf), nil
}

View file

@ -0,0 +1,19 @@
package network
import (
"github.com/docker/libcompose/config"
composeclient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
)
// DockerFactory implements project.NetworksFactory
type DockerFactory struct {
ClientFactory composeclient.Factory
}
// Create implements project.NetworksFactory Create method.
// It creates a Networks (that implements project.Networks) from specified configurations.
func (f *DockerFactory) Create(projectName string, networkConfigs map[string]*config.NetworkConfig, serviceConfigs *config.ServiceConfigs, networkEnabled bool) (project.Networks, error) {
cli := f.ClientFactory.Create(nil)
return NetworksFromServices(cli, projectName, networkConfigs, serviceConfigs, networkEnabled)
}

View file

@ -0,0 +1,200 @@
package network
import (
"fmt"
"reflect"
"strings"
"golang.org/x/net/context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/yaml"
)
// Network holds attributes and method for a network definition in compose
type Network struct {
client client.NetworkAPIClient
name string
projectName string
driver string
driverOptions map[string]string
ipam config.Ipam
external bool
}
func (n *Network) fullName() string {
name := n.projectName + "_" + n.name
if n.external {
name = n.name
}
return name
}
// Inspect inspect the current network
func (n *Network) Inspect(ctx context.Context) (types.NetworkResource, error) {
return n.client.NetworkInspect(ctx, n.fullName(), false)
}
// Remove removes the current network (from docker engine)
func (n *Network) Remove(ctx context.Context) error {
if n.external {
fmt.Printf("Network %s is external, skipping", n.fullName())
return nil
}
fmt.Printf("Removing network %q\n", n.fullName())
return n.client.NetworkRemove(ctx, n.fullName())
}
// EnsureItExists make sure the network exists and return an error if it does not exists
// and cannot be created.
func (n *Network) EnsureItExists(ctx context.Context) error {
networkResource, err := n.Inspect(ctx)
if n.external {
if client.IsErrNetworkNotFound(err) {
// FIXME(vdemeester) introduce some libcompose error type
return fmt.Errorf("Network %s declared as external, but could not be found. Please create the network manually using docker network create %s and try again", n.fullName(), n.fullName())
}
return err
}
if err != nil && client.IsErrNetworkNotFound(err) {
return n.create(ctx)
}
if n.driver != "" && networkResource.Driver != n.driver {
return fmt.Errorf("Network %q needs to be recreated - driver has changed", n.fullName())
}
if len(n.driverOptions) != 0 && !reflect.DeepEqual(networkResource.Options, n.driverOptions) {
return fmt.Errorf("Network %q needs to be recreated - options have changed", n.fullName())
}
return err
}
func (n *Network) create(ctx context.Context) error {
fmt.Printf("Creating network %q with driver %q\n", n.fullName(), n.driver)
_, err := n.client.NetworkCreate(ctx, n.fullName(), types.NetworkCreate{
Driver: n.driver,
Options: n.driverOptions,
IPAM: convertToAPIIpam(n.ipam),
})
return err
}
func convertToAPIIpam(ipam config.Ipam) *network.IPAM {
ipamConfigs := []network.IPAMConfig{}
for _, config := range ipam.Config {
ipamConfigs = append(ipamConfigs, network.IPAMConfig{
Subnet: config.Subnet,
IPRange: config.IPRange,
Gateway: config.Gateway,
AuxAddress: config.AuxAddress,
})
}
return &network.IPAM{
Driver: ipam.Driver,
Config: ipamConfigs,
}
}
// NewNetwork creates a new network from the specified name and config.
func NewNetwork(projectName, name string, config *config.NetworkConfig, client client.NetworkAPIClient) *Network {
networkName := name
if config.External.External {
networkName = config.External.Name
}
return &Network{
client: client,
name: networkName,
projectName: projectName,
driver: config.Driver,
driverOptions: config.DriverOpts,
external: config.External.External,
ipam: config.Ipam,
}
}
// Networks holds a list of network
type Networks struct {
networks []*Network
networkEnabled bool
}
// Initialize make sure network exists if network is enabled
func (n *Networks) Initialize(ctx context.Context) error {
if !n.networkEnabled {
return nil
}
for _, network := range n.networks {
err := network.EnsureItExists(ctx)
if err != nil {
return err
}
}
return nil
}
// Remove removes networks (clean-up)
func (n *Networks) Remove(ctx context.Context) error {
if !n.networkEnabled {
return nil
}
for _, network := range n.networks {
err := network.Remove(ctx)
if err != nil {
return err
}
}
return nil
}
// NetworksFromServices creates a new Networks struct based on networks configurations and
// services configuration. If a network is defined but not used by any service, it will return
// an error along the Networks.
func NetworksFromServices(cli client.NetworkAPIClient, projectName string, networkConfigs map[string]*config.NetworkConfig, services *config.ServiceConfigs, networkEnabled bool) (*Networks, error) {
var err error
networks := make([]*Network, 0, len(networkConfigs))
networkNames := map[string]*yaml.Network{}
for _, serviceName := range services.Keys() {
serviceConfig, _ := services.Get(serviceName)
if serviceConfig.NetworkMode != "" || serviceConfig.Networks == nil || len(serviceConfig.Networks.Networks) == 0 {
continue
}
for _, network := range serviceConfig.Networks.Networks {
if network.Name != "default" {
if _, ok := networkConfigs[network.Name]; !ok {
return nil, fmt.Errorf(`Service "%s" uses an undefined network "%s"`, serviceName, network.Name)
}
}
networkNames[network.Name] = network
}
}
if len(networkConfigs) == 0 {
network := NewNetwork(projectName, "default", &config.NetworkConfig{
Driver: "bridge",
}, cli)
networks = append(networks, network)
}
for name, config := range networkConfigs {
network := NewNetwork(projectName, name, config, cli)
networks = append(networks, network)
}
if len(networkNames) != len(networks) {
unused := []string{}
for name := range networkConfigs {
if name == "default" {
continue
}
if _, ok := networkNames[name]; !ok {
unused = append(unused, name)
}
}
if len(unused) != 0 {
err = fmt.Errorf("Some networks were defined but are not used by any service: %v", strings.Join(unused, " "))
}
}
return &Networks{
networks: networks,
networkEnabled: networkEnabled,
}, err
}

106
vendor/github.com/docker/libcompose/docker/project.go generated vendored Normal file
View file

@ -0,0 +1,106 @@
package docker
import (
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker/auth"
"github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/docker/ctx"
"github.com/docker/libcompose/docker/network"
"github.com/docker/libcompose/docker/service"
"github.com/docker/libcompose/docker/volume"
"github.com/docker/libcompose/labels"
"github.com/docker/libcompose/project"
)
// NewProject creates a Project with the specified context.
func NewProject(context *ctx.Context, parseOptions *config.ParseOptions) (project.APIProject, error) {
if err := context.LookupConfig(); err != nil {
logrus.Errorf("Failed to load docker config: %v", err)
}
if context.AuthLookup == nil {
context.AuthLookup = auth.NewConfigLookup(context.ConfigFile)
}
if context.ServiceFactory == nil {
context.ServiceFactory = service.NewFactory(context)
}
if context.ClientFactory == nil {
factory, err := client.NewDefaultFactory(client.Options{})
if err != nil {
return nil, err
}
context.ClientFactory = factory
}
if context.NetworksFactory == nil {
networksFactory := &network.DockerFactory{
ClientFactory: context.ClientFactory,
}
context.NetworksFactory = networksFactory
}
if context.VolumesFactory == nil {
volumesFactory := &volume.DockerFactory{
ClientFactory: context.ClientFactory,
}
context.VolumesFactory = volumesFactory
}
// FIXME(vdemeester) Remove the context duplication ?
runtime := &Project{
clientFactory: context.ClientFactory,
}
p := project.NewProject(&context.Context, runtime, parseOptions)
err := p.Parse()
if err != nil {
return nil, err
}
return p, err
}
// Project implements project.RuntimeProject and define docker runtime specific methods.
type Project struct {
clientFactory client.Factory
}
// RemoveOrphans implements project.RuntimeProject.RemoveOrphans.
// It will remove orphan containers that are part of the project but not to any services.
func (p *Project) RemoveOrphans(ctx context.Context, projectName string, serviceConfigs *config.ServiceConfigs) error {
client := p.clientFactory.Create(nil)
filter := filters.NewArgs()
filter.Add("label", labels.PROJECT.EqString(projectName))
containers, err := client.ContainerList(ctx, types.ContainerListOptions{
Filters: filter,
})
if err != nil {
return err
}
currentServices := map[string]struct{}{}
for _, serviceName := range serviceConfigs.Keys() {
currentServices[serviceName] = struct{}{}
}
for _, container := range containers {
serviceLabel := container.Labels[labels.SERVICE.Str()]
if _, ok := currentServices[serviceLabel]; !ok {
if err := client.ContainerKill(ctx, container.ID, "SIGKILL"); err != nil {
return err
}
if err := client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{
Force: true,
}); err != nil {
return err
}
}
}
return nil
}

View file

@ -0,0 +1,376 @@
package service
import (
"fmt"
"strings"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/runconfig/opts"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
"github.com/docker/libcompose/config"
composeclient "github.com/docker/libcompose/docker/client"
composecontainer "github.com/docker/libcompose/docker/container"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/utils"
"golang.org/x/net/context"
)
// ConfigWrapper wraps Config, HostConfig and NetworkingConfig for a container.
type ConfigWrapper struct {
Config *container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
}
// Filter filters the specified string slice with the specified function.
func Filter(vs []string, f func(string) bool) []string {
r := make([]string, 0, len(vs))
for _, v := range vs {
if f(v) {
r = append(r, v)
}
}
return r
}
func toMap(vs []string) map[string]struct{} {
m := map[string]struct{}{}
for _, v := range vs {
if v != "" {
m[v] = struct{}{}
}
}
return m
}
func isBind(s string) bool {
return strings.ContainsRune(s, ':')
}
func isVolume(s string) bool {
return !isBind(s)
}
// ConvertToAPI converts a service configuration to a docker API container configuration.
func ConvertToAPI(serviceConfig *config.ServiceConfig, ctx project.Context, clientFactory composeclient.Factory) (*ConfigWrapper, error) {
config, hostConfig, err := Convert(serviceConfig, ctx, clientFactory)
if err != nil {
return nil, err
}
result := ConfigWrapper{
Config: config,
HostConfig: hostConfig,
}
return &result, nil
}
func volumes(c *config.ServiceConfig, ctx project.Context) []string {
if c.Volumes == nil {
return []string{}
}
volumes := make([]string, len(c.Volumes.Volumes))
for _, v := range c.Volumes.Volumes {
vol := v
if len(ctx.ComposeFiles) > 0 && !project.IsNamedVolume(v.Source) {
sourceVol := ctx.ResourceLookup.ResolvePath(v.String(), ctx.ComposeFiles[0])
vol.Source = strings.SplitN(sourceVol, ":", 2)[0]
}
volumes = append(volumes, vol.String())
}
return volumes
}
func restartPolicy(c *config.ServiceConfig) (*container.RestartPolicy, error) {
restart, err := opts.ParseRestartPolicy(c.Restart)
if err != nil {
return nil, err
}
return &container.RestartPolicy{Name: restart.Name, MaximumRetryCount: restart.MaximumRetryCount}, nil
}
func ports(c *config.ServiceConfig) (map[nat.Port]struct{}, nat.PortMap, error) {
ports, binding, err := nat.ParsePortSpecs(c.Ports)
if err != nil {
return nil, nil, err
}
exPorts, _, err := nat.ParsePortSpecs(c.Expose)
if err != nil {
return nil, nil, err
}
for k, v := range exPorts {
ports[k] = v
}
exposedPorts := map[nat.Port]struct{}{}
for k, v := range ports {
exposedPorts[nat.Port(k)] = v
}
portBindings := nat.PortMap{}
for k, bv := range binding {
dcbs := make([]nat.PortBinding, len(bv))
for k, v := range bv {
dcbs[k] = nat.PortBinding{HostIP: v.HostIP, HostPort: v.HostPort}
}
portBindings[nat.Port(k)] = dcbs
}
return exposedPorts, portBindings, nil
}
// Convert converts a service configuration to an docker API structures (Config and HostConfig)
func Convert(c *config.ServiceConfig, ctx project.Context, clientFactory composeclient.Factory) (*container.Config, *container.HostConfig, error) {
restartPolicy, err := restartPolicy(c)
if err != nil {
return nil, nil, err
}
exposedPorts, portBindings, err := ports(c)
if err != nil {
return nil, nil, err
}
deviceMappings, err := parseDevices(c.Devices)
if err != nil {
return nil, nil, err
}
var volumesFrom []string
if c.VolumesFrom != nil {
volumesFrom, err = getVolumesFrom(c.VolumesFrom, ctx.Project.ServiceConfigs, ctx.ProjectName)
if err != nil {
return nil, nil, err
}
}
vols := volumes(c, ctx)
config := &container.Config{
Entrypoint: strslice.StrSlice(utils.CopySlice(c.Entrypoint)),
Hostname: c.Hostname,
Domainname: c.DomainName,
User: c.User,
Env: utils.CopySlice(c.Environment),
Cmd: strslice.StrSlice(utils.CopySlice(c.Command)),
Image: c.Image,
Labels: utils.CopyMap(c.Labels),
ExposedPorts: exposedPorts,
Tty: c.Tty,
OpenStdin: c.StdinOpen,
WorkingDir: c.WorkingDir,
Volumes: toMap(Filter(vols, isVolume)),
MacAddress: c.MacAddress,
StopSignal: c.StopSignal,
StopTimeout: utils.DurationStrToSecondsInt(c.StopGracePeriod),
}
ulimits := []*units.Ulimit{}
if c.Ulimits.Elements != nil {
for _, ulimit := range c.Ulimits.Elements {
ulimits = append(ulimits, &units.Ulimit{
Name: ulimit.Name,
Soft: ulimit.Soft,
Hard: ulimit.Hard,
})
}
}
memorySwappiness := int64(c.MemSwappiness)
resources := container.Resources{
CgroupParent: c.CgroupParent,
Memory: int64(c.MemLimit),
MemoryReservation: int64(c.MemReservation),
MemorySwap: int64(c.MemSwapLimit),
MemorySwappiness: &memorySwappiness,
CPUShares: int64(c.CPUShares),
CPUQuota: int64(c.CPUQuota),
CpusetCpus: c.CPUSet,
Ulimits: ulimits,
Devices: deviceMappings,
OomKillDisable: &c.OomKillDisable,
}
networkMode := c.NetworkMode
if c.NetworkMode == "" {
if c.Networks != nil && len(c.Networks.Networks) > 0 {
networkMode = c.Networks.Networks[0].RealName
}
} else {
switch {
case strings.HasPrefix(c.NetworkMode, "service:"):
serviceName := c.NetworkMode[8:]
if serviceConfig, ok := ctx.Project.ServiceConfigs.Get(serviceName); ok {
// FIXME(vdemeester) this is actually not right, should be fixed but not there
service, err := ctx.ServiceFactory.Create(ctx.Project, serviceName, serviceConfig)
if err != nil {
return nil, nil, err
}
containers, err := service.Containers(context.Background())
if err != nil {
return nil, nil, err
}
if len(containers) != 0 {
container := containers[0]
containerID := container.ID()
networkMode = "container:" + containerID
}
// FIXME(vdemeester) log/warn in case of len(containers) == 0
}
case strings.HasPrefix(c.NetworkMode, "container:"):
containerName := c.NetworkMode[10:]
client := clientFactory.Create(nil)
container, err := composecontainer.Get(context.Background(), client, containerName)
if err != nil {
return nil, nil, err
}
networkMode = "container:" + container.ID
default:
// do nothing :)
}
}
tmpfs := map[string]string{}
for _, path := range c.Tmpfs {
split := strings.SplitN(path, ":", 2)
if len(split) == 1 {
tmpfs[split[0]] = ""
} else if len(split) == 2 {
tmpfs[split[0]] = split[1]
}
}
hostConfig := &container.HostConfig{
VolumesFrom: volumesFrom,
CapAdd: strslice.StrSlice(utils.CopySlice(c.CapAdd)),
CapDrop: strslice.StrSlice(utils.CopySlice(c.CapDrop)),
GroupAdd: c.GroupAdd,
ExtraHosts: utils.CopySlice(c.ExtraHosts),
Privileged: c.Privileged,
Binds: Filter(vols, isBind),
DNS: utils.CopySlice(c.DNS),
DNSOptions: utils.CopySlice(c.DNSOpts),
DNSSearch: utils.CopySlice(c.DNSSearch),
Isolation: container.Isolation(c.Isolation),
LogConfig: container.LogConfig{
Type: c.Logging.Driver,
Config: utils.CopyMap(c.Logging.Options),
},
NetworkMode: container.NetworkMode(networkMode),
ReadonlyRootfs: c.ReadOnly,
OomScoreAdj: int(c.OomScoreAdj),
PidMode: container.PidMode(c.Pid),
UTSMode: container.UTSMode(c.Uts),
IpcMode: container.IpcMode(c.Ipc),
PortBindings: portBindings,
RestartPolicy: *restartPolicy,
ShmSize: int64(c.ShmSize),
SecurityOpt: utils.CopySlice(c.SecurityOpt),
Tmpfs: tmpfs,
VolumeDriver: c.VolumeDriver,
Resources: resources,
}
if config.Labels == nil {
config.Labels = map[string]string{}
}
return config, hostConfig, nil
}
func getVolumesFrom(volumesFrom []string, serviceConfigs *config.ServiceConfigs, projectName string) ([]string, error) {
volumes := []string{}
for _, volumeFrom := range volumesFrom {
if serviceConfig, ok := serviceConfigs.Get(volumeFrom); ok {
// It's a service - Use the first one
name := fmt.Sprintf("%s_%s_1", projectName, volumeFrom)
// If a container name is specified, use that instead
if serviceConfig.ContainerName != "" {
name = serviceConfig.ContainerName
}
volumes = append(volumes, name)
} else {
volumes = append(volumes, volumeFrom)
}
}
return volumes, nil
}
func parseDevices(devices []string) ([]container.DeviceMapping, error) {
// parse device mappings
deviceMappings := []container.DeviceMapping{}
for _, device := range devices {
v, err := parseDevice(device)
if err != nil {
return nil, err
}
deviceMappings = append(deviceMappings, container.DeviceMapping{
PathOnHost: v.PathOnHost,
PathInContainer: v.PathInContainer,
CgroupPermissions: v.CgroupPermissions,
})
}
return deviceMappings, nil
}
// parseDevice parses a device mapping string to a container.DeviceMapping struct
// FIXME(vdemeester) de-duplicate this by re-exporting it in docker/docker
func parseDevice(device string) (container.DeviceMapping, error) {
src := ""
dst := ""
permissions := "rwm"
arr := strings.Split(device, ":")
switch len(arr) {
case 3:
permissions = arr[2]
fallthrough
case 2:
if validDeviceMode(arr[1]) {
permissions = arr[1]
} else {
dst = arr[1]
}
fallthrough
case 1:
src = arr[0]
default:
return container.DeviceMapping{}, fmt.Errorf("invalid device specification: %s", device)
}
if dst == "" {
dst = src
}
deviceMapping := container.DeviceMapping{
PathOnHost: src,
PathInContainer: dst,
CgroupPermissions: permissions,
}
return deviceMapping, nil
}
// validDeviceMode checks if the mode for device is valid or not.
// Valid mode is a composition of r (read), w (write), and m (mknod).
func validDeviceMode(mode string) bool {
var legalDeviceMode = map[rune]bool{
'r': true,
'w': true,
'm': true,
}
if mode == "" {
return false
}
for _, c := range mode {
if !legalDeviceMode[c] {
return false
}
legalDeviceMode[c] = false
}
return true
}

View file

@ -0,0 +1,92 @@
package service
import (
"fmt"
"strconv"
"golang.org/x/net/context"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/docker/libcompose/labels"
)
const format = "%s_%s_%d"
// Namer defines method to provide container name.
type Namer interface {
Next() (string, int)
}
type defaultNamer struct {
project string
service string
oneOff bool
currentNumber int
}
type singleNamer struct {
name string
}
// NewSingleNamer returns a namer that only allows a single name.
func NewSingleNamer(name string) Namer {
return &singleNamer{name}
}
// NewNamer returns a namer that returns names based on the specified project and
// service name and an inner counter, e.g. project_service_1, project_service_2…
func NewNamer(ctx context.Context, client client.ContainerAPIClient, project, service string, oneOff bool) (Namer, error) {
namer := &defaultNamer{
project: project,
service: service,
oneOff: oneOff,
}
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", labels.PROJECT.Str(), project))
filter.Add("label", fmt.Sprintf("%s=%s", labels.SERVICE.Str(), service))
if oneOff {
filter.Add("label", fmt.Sprintf("%s=%s", labels.ONEOFF.Str(), "True"))
} else {
filter.Add("label", fmt.Sprintf("%s=%s", labels.ONEOFF.Str(), "False"))
}
containers, err := client.ContainerList(ctx, types.ContainerListOptions{
All: true,
Filters: filter,
})
if err != nil {
return nil, err
}
maxNumber := 0
for _, container := range containers {
number, err := strconv.Atoi(container.Labels[labels.NUMBER.Str()])
if err != nil {
return nil, err
}
if number > maxNumber {
maxNumber = number
}
}
namer.currentNumber = maxNumber + 1
return namer, nil
}
func (i *defaultNamer) Next() (string, int) {
service := i.service
if i.oneOff {
service = i.service + "_run"
}
name := fmt.Sprintf(format, i.project, service, i.currentNumber)
number := i.currentNumber
i.currentNumber = i.currentNumber + 1
return name, number
}
func (s *singleNamer) Next() (string, int) {
return s.name, 1
}

View file

@ -0,0 +1,749 @@
package service
import (
"fmt"
"strings"
"time"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker/auth"
"github.com/docker/libcompose/docker/builder"
composeclient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/docker/container"
"github.com/docker/libcompose/docker/ctx"
"github.com/docker/libcompose/docker/image"
"github.com/docker/libcompose/labels"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/project/events"
"github.com/docker/libcompose/project/options"
"github.com/docker/libcompose/utils"
"github.com/docker/libcompose/yaml"
)
// Service is a project.Service implementations.
type Service struct {
name string
project *project.Project
serviceConfig *config.ServiceConfig
clientFactory composeclient.Factory
authLookup auth.Lookup
// FIXME(vdemeester) remove this at some point
context *ctx.Context
}
// NewService creates a service
func NewService(name string, serviceConfig *config.ServiceConfig, context *ctx.Context) *Service {
return &Service{
name: name,
project: context.Project,
serviceConfig: serviceConfig,
clientFactory: context.ClientFactory,
authLookup: context.AuthLookup,
context: context,
}
}
// Name returns the service name.
func (s *Service) Name() string {
return s.name
}
// Config returns the configuration of the service (config.ServiceConfig).
func (s *Service) Config() *config.ServiceConfig {
return s.serviceConfig
}
// DependentServices returns the dependent services (as an array of ServiceRelationship) of the service.
func (s *Service) DependentServices() []project.ServiceRelationship {
return DefaultDependentServices(s.project, s)
}
// Create implements Service.Create. It ensures the image exists or build it
// if it can and then create a container.
func (s *Service) Create(ctx context.Context, options options.Create) error {
containers, err := s.collectContainers(ctx)
if err != nil {
return err
}
if err := s.ensureImageExists(ctx, options.NoBuild, options.ForceBuild); err != nil {
return err
}
if len(containers) != 0 {
return s.eachContainer(ctx, containers, func(c *container.Container) error {
_, err := s.recreateIfNeeded(ctx, c, options.NoRecreate, options.ForceRecreate)
return err
})
}
namer, err := s.namer(ctx, 1)
if err != nil {
return err
}
_, err = s.createContainer(ctx, namer, "", nil, false)
return err
}
func (s *Service) namer(ctx context.Context, count int) (Namer, error) {
var namer Namer
var err error
if s.serviceConfig.ContainerName != "" {
if count > 1 {
logrus.Warnf(`The "%s" service is using the custom container name "%s". Docker requires each container to have a unique name. Remove the custom name to scale the service.`, s.name, s.serviceConfig.ContainerName)
}
namer = NewSingleNamer(s.serviceConfig.ContainerName)
} else {
client := s.clientFactory.Create(s)
namer, err = NewNamer(ctx, client, s.project.Name, s.name, false)
if err != nil {
return nil, err
}
}
return namer, nil
}
func (s *Service) collectContainers(ctx context.Context) ([]*container.Container, error) {
client := s.clientFactory.Create(s)
containers, err := container.ListByFilter(ctx, client, labels.SERVICE.Eq(s.name), labels.PROJECT.Eq(s.project.Name))
if err != nil {
return nil, err
}
result := []*container.Container{}
for _, cont := range containers {
c, err := container.New(ctx, client, cont.ID)
if err != nil {
return nil, err
}
result = append(result, c)
}
return result, nil
}
func (s *Service) ensureImageExists(ctx context.Context, noBuild bool, forceBuild bool) error {
if forceBuild {
return s.build(ctx, options.Build{})
}
exists, err := image.Exists(ctx, s.clientFactory.Create(s), s.imageName())
if err != nil {
return err
}
if exists {
return nil
}
if s.Config().Build.Context != "" {
if noBuild {
return fmt.Errorf("Service %q needs to be built, but no-build was specified", s.name)
}
return s.build(ctx, options.Build{})
}
return s.Pull(ctx)
}
func (s *Service) imageName() string {
if s.Config().Image != "" {
return s.Config().Image
}
return fmt.Sprintf("%s_%s", s.project.Name, s.Name())
}
// Build implements Service.Build. It will try to build the image and returns an error if any.
func (s *Service) Build(ctx context.Context, buildOptions options.Build) error {
return s.build(ctx, buildOptions)
}
func (s *Service) build(ctx context.Context, buildOptions options.Build) error {
if s.Config().Build.Context == "" {
return fmt.Errorf("Specified service does not have a build section")
}
builder := &builder.DaemonBuilder{
Client: s.clientFactory.Create(s),
ContextDirectory: s.Config().Build.Context,
Dockerfile: s.Config().Build.Dockerfile,
BuildArgs: s.Config().Build.Args,
AuthConfigs: s.authLookup.All(),
NoCache: buildOptions.NoCache,
ForceRemove: buildOptions.ForceRemove,
Pull: buildOptions.Pull,
LoggerFactory: s.context.LoggerFactory,
}
return builder.Build(ctx, s.imageName())
}
func (s *Service) constructContainers(ctx context.Context, count int) ([]*container.Container, error) {
result, err := s.collectContainers(ctx)
if err != nil {
return nil, err
}
client := s.clientFactory.Create(s)
var namer Namer
if s.serviceConfig.ContainerName != "" {
if count > 1 {
logrus.Warnf(`The "%s" service is using the custom container name "%s". Docker requires each container to have a unique name. Remove the custom name to scale the service.`, s.name, s.serviceConfig.ContainerName)
}
namer = NewSingleNamer(s.serviceConfig.ContainerName)
} else {
namer, err = NewNamer(ctx, client, s.project.Name, s.name, false)
if err != nil {
return nil, err
}
}
for i := len(result); i < count; i++ {
c, err := s.createContainer(ctx, namer, "", nil, false)
if err != nil {
return nil, err
}
id := c.ID()
logrus.Debugf("Created container %s: %v", id, c.Name())
result = append(result, c)
}
return result, nil
}
// Up implements Service.Up. It builds the image if needed, creates a container
// and start it.
func (s *Service) Up(ctx context.Context, options options.Up) error {
containers, err := s.collectContainers(ctx)
if err != nil {
return err
}
var imageName = s.imageName()
if len(containers) == 0 || !options.NoRecreate {
if err = s.ensureImageExists(ctx, options.NoBuild, options.ForceBuild); err != nil {
return err
}
}
return s.up(ctx, imageName, true, options)
}
// Run implements Service.Run. It runs a one of command within the service container.
// It always create a new container.
func (s *Service) Run(ctx context.Context, commandParts []string, options options.Run) (int, error) {
err := s.ensureImageExists(ctx, false, false)
if err != nil {
return -1, err
}
client := s.clientFactory.Create(s)
namer, err := NewNamer(ctx, client, s.project.Name, s.name, true)
if err != nil {
return -1, err
}
configOverride := &config.ServiceConfig{Command: commandParts, Tty: true, StdinOpen: true}
c, err := s.createContainer(ctx, namer, "", configOverride, true)
if err != nil {
return -1, err
}
if err := s.connectContainerToNetworks(ctx, c, true); err != nil {
return -1, err
}
if options.Detached {
logrus.Infof("%s", c.Name())
return 0, c.Start(ctx)
}
return c.Run(ctx, configOverride)
}
// Info implements Service.Info. It returns an project.InfoSet with the containers
// related to this service (can be multiple if using the scale command).
func (s *Service) Info(ctx context.Context) (project.InfoSet, error) {
result := project.InfoSet{}
containers, err := s.collectContainers(ctx)
if err != nil {
return nil, err
}
for _, c := range containers {
info, err := c.Info(ctx)
if err != nil {
return nil, err
}
result = append(result, info)
}
return result, nil
}
// Start implements Service.Start. It tries to start a container without creating it.
func (s *Service) Start(ctx context.Context) error {
return s.collectContainersAndDo(ctx, func(c *container.Container) error {
if err := s.connectContainerToNetworks(ctx, c, false); err != nil {
return err
}
return c.Start(ctx)
})
}
func (s *Service) up(ctx context.Context, imageName string, create bool, options options.Up) error {
containers, err := s.collectContainers(ctx)
if err != nil {
return err
}
logrus.Debugf("Found %d existing containers for service %s", len(containers), s.name)
if len(containers) == 0 && create {
namer, err := s.namer(ctx, 1)
if err != nil {
return err
}
c, err := s.createContainer(ctx, namer, "", nil, false)
if err != nil {
return err
}
containers = []*container.Container{c}
}
return s.eachContainer(ctx, containers, func(c *container.Container) error {
var err error
if create {
c, err = s.recreateIfNeeded(ctx, c, options.NoRecreate, options.ForceRecreate)
if err != nil {
return err
}
}
if err := s.connectContainerToNetworks(ctx, c, false); err != nil {
return err
}
err = c.Start(ctx)
if err == nil {
s.project.Notify(events.ContainerStarted, s.name, map[string]string{
"name": c.Name(),
})
}
return err
})
}
func (s *Service) connectContainerToNetworks(ctx context.Context, c *container.Container, oneOff bool) error {
connectedNetworks, err := c.Networks()
if err != nil {
return nil
}
if s.serviceConfig.Networks != nil {
for _, network := range s.serviceConfig.Networks.Networks {
existingNetwork, ok := connectedNetworks[network.Name]
if ok {
// FIXME(vdemeester) implement alias checking (to not disconnect/reconnect for nothing)
aliasPresent := false
for _, alias := range existingNetwork.Aliases {
ID := c.ShortID()
if alias == ID {
aliasPresent = true
}
}
if aliasPresent {
continue
}
if err := s.NetworkDisconnect(ctx, c, network, oneOff); err != nil {
return err
}
}
if err := s.NetworkConnect(ctx, c, network, oneOff); err != nil {
return err
}
}
}
return nil
}
// NetworkDisconnect disconnects the container from the specified network
func (s *Service) NetworkDisconnect(ctx context.Context, c *container.Container, net *yaml.Network, oneOff bool) error {
containerID := c.ID()
client := s.clientFactory.Create(s)
return client.NetworkDisconnect(ctx, net.RealName, containerID, true)
}
// NetworkConnect connects the container to the specified network
// FIXME(vdemeester) will be refactor with Container refactoring
func (s *Service) NetworkConnect(ctx context.Context, c *container.Container, net *yaml.Network, oneOff bool) error {
containerID := c.ID()
client := s.clientFactory.Create(s)
internalLinks, err := s.getLinks()
if err != nil {
return err
}
links := []string{}
// TODO(vdemeester) handle link to self (?)
for k, v := range internalLinks {
links = append(links, strings.Join([]string{v, k}, ":"))
}
for _, v := range s.serviceConfig.ExternalLinks {
links = append(links, v)
}
aliases := []string{}
if !oneOff {
aliases = []string{s.Name()}
}
aliases = append(aliases, net.Aliases...)
return client.NetworkConnect(ctx, net.RealName, containerID, &network.EndpointSettings{
Aliases: aliases,
Links: links,
IPAddress: net.IPv4Address,
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: net.IPv4Address,
IPv6Address: net.IPv6Address,
},
})
}
func (s *Service) recreateIfNeeded(ctx context.Context, c *container.Container, noRecreate, forceRecreate bool) (*container.Container, error) {
if noRecreate {
return c, nil
}
outOfSync, err := s.OutOfSync(ctx, c)
if err != nil {
return c, err
}
logrus.WithFields(logrus.Fields{
"outOfSync": outOfSync,
"ForceRecreate": forceRecreate,
"NoRecreate": noRecreate}).Debug("Going to decide if recreate is needed")
if forceRecreate || outOfSync {
logrus.Infof("Recreating %s", s.name)
newContainer, err := s.recreate(ctx, c)
if err != nil {
return c, err
}
return newContainer, nil
}
return c, err
}
func (s *Service) recreate(ctx context.Context, c *container.Container) (*container.Container, error) {
name := c.Name()
id := c.ID()
newName := fmt.Sprintf("%s_%s", name, id[:12])
logrus.Debugf("Renaming %s => %s", name, newName)
if err := c.Rename(ctx, newName); err != nil {
logrus.Errorf("Failed to rename old container %s", c.Name())
return nil, err
}
namer := NewSingleNamer(name)
newContainer, err := s.createContainer(ctx, namer, id, nil, false)
if err != nil {
return nil, err
}
newID := newContainer.ID()
logrus.Debugf("Created replacement container %s", newID)
if err := c.Remove(ctx, false); err != nil {
logrus.Errorf("Failed to remove old container %s", c.Name())
return nil, err
}
logrus.Debugf("Removed old container %s %s", c.Name(), id)
return newContainer, nil
}
// OutOfSync checks if the container is out of sync with the service definition.
// It looks if the the service hash container label is the same as the computed one.
func (s *Service) OutOfSync(ctx context.Context, c *container.Container) (bool, error) {
if c.ImageConfig() != s.serviceConfig.Image {
logrus.Debugf("Images for %s do not match %s!=%s", c.Name(), c.ImageConfig(), s.serviceConfig.Image)
return true, nil
}
expectedHash := config.GetServiceHash(s.name, s.Config())
if c.Hash() != expectedHash {
logrus.Debugf("Hashes for %s do not match %s!=%s", c.Name(), c.Hash(), expectedHash)
return true, nil
}
image, err := image.InspectImage(ctx, s.clientFactory.Create(s), c.ImageConfig())
if err != nil {
if client.IsErrImageNotFound(err) {
logrus.Debugf("Image %s do not exist, do not know if it's out of sync", c.Image())
return false, nil
}
return false, err
}
logrus.Debugf("Checking existing image name vs id: %s == %s", image.ID, c.Image())
return image.ID != c.Image(), err
}
func (s *Service) collectContainersAndDo(ctx context.Context, action func(*container.Container) error) error {
containers, err := s.collectContainers(ctx)
if err != nil {
return err
}
return s.eachContainer(ctx, containers, action)
}
func (s *Service) eachContainer(ctx context.Context, containers []*container.Container, action func(*container.Container) error) error {
tasks := utils.InParallel{}
for _, cont := range containers {
task := func(cont *container.Container) func() error {
return func() error {
return action(cont)
}
}(cont)
tasks.Add(task)
}
return tasks.Wait()
}
// Stop implements Service.Stop. It stops any containers related to the service.
func (s *Service) Stop(ctx context.Context, timeout int) error {
timeout = s.stopTimeout(timeout)
return s.collectContainersAndDo(ctx, func(c *container.Container) error {
return c.Stop(ctx, timeout)
})
}
// Restart implements Service.Restart. It restarts any containers related to the service.
func (s *Service) Restart(ctx context.Context, timeout int) error {
timeout = s.stopTimeout(timeout)
return s.collectContainersAndDo(ctx, func(c *container.Container) error {
return c.Restart(ctx, timeout)
})
}
// Kill implements Service.Kill. It kills any containers related to the service.
func (s *Service) Kill(ctx context.Context, signal string) error {
return s.collectContainersAndDo(ctx, func(c *container.Container) error {
return c.Kill(ctx, signal)
})
}
// Delete implements Service.Delete. It removes any containers related to the service.
func (s *Service) Delete(ctx context.Context, options options.Delete) error {
return s.collectContainersAndDo(ctx, func(c *container.Container) error {
running := c.IsRunning(ctx)
if !running || options.RemoveRunning {
return c.Remove(ctx, options.RemoveVolume)
}
return nil
})
}
// Log implements Service.Log. It returns the docker logs for each container related to the service.
func (s *Service) Log(ctx context.Context, follow bool) error {
return s.collectContainersAndDo(ctx, func(c *container.Container) error {
containerNumber, err := c.Number()
if err != nil {
return err
}
name := fmt.Sprintf("%s_%d", s.name, containerNumber)
if s.Config().ContainerName != "" {
name = s.Config().ContainerName
}
l := s.context.LoggerFactory.CreateContainerLogger(name)
return c.Log(ctx, l, follow)
})
}
// Scale implements Service.Scale. It creates or removes containers to have the specified number
// of related container to the service to run.
func (s *Service) Scale(ctx context.Context, scale int, timeout int) error {
if s.specificiesHostPort() {
logrus.Warnf("The \"%s\" service specifies a port on the host. If multiple containers for this service are created on a single host, the port will clash.", s.Name())
}
containers, err := s.collectContainers(ctx)
if err != nil {
return err
}
if len(containers) > scale {
foundCount := 0
for _, c := range containers {
foundCount++
if foundCount > scale {
timeout = s.stopTimeout(timeout)
if err := c.Stop(ctx, timeout); err != nil {
return err
}
// FIXME(vdemeester) remove volume in scale by default ?
if err := c.Remove(ctx, false); err != nil {
return err
}
}
}
}
if err != nil {
return err
}
if len(containers) < scale {
err := s.ensureImageExists(ctx, false, false)
if err != nil {
return err
}
if _, err = s.constructContainers(ctx, scale); err != nil {
return err
}
}
return s.up(ctx, "", false, options.Up{})
}
// Pull implements Service.Pull. It pulls the image of the service and skip the service that
// would need to be built.
func (s *Service) Pull(ctx context.Context) error {
if s.Config().Image == "" {
return nil
}
return image.PullImage(ctx, s.clientFactory.Create(s), s.name, s.authLookup, s.Config().Image)
}
// Pause implements Service.Pause. It puts into pause the container(s) related
// to the service.
func (s *Service) Pause(ctx context.Context) error {
return s.collectContainersAndDo(ctx, func(c *container.Container) error {
return c.Pause(ctx)
})
}
// Unpause implements Service.Pause. It brings back from pause the container(s)
// related to the service.
func (s *Service) Unpause(ctx context.Context) error {
return s.collectContainersAndDo(ctx, func(c *container.Container) error {
return c.Unpause(ctx)
})
}
// RemoveImage implements Service.RemoveImage. It removes images used for the service
// depending on the specified type.
func (s *Service) RemoveImage(ctx context.Context, imageType options.ImageType) error {
switch imageType {
case "local":
if s.Config().Image != "" {
return nil
}
return image.RemoveImage(ctx, s.clientFactory.Create(s), s.imageName())
case "all":
return image.RemoveImage(ctx, s.clientFactory.Create(s), s.imageName())
default:
// Don't do a thing, should be validated up-front
return nil
}
}
var eventAttributes = []string{"image", "name"}
// Events implements Service.Events. It listen to all real-time events happening
// for the service, and put them into the specified chan.
func (s *Service) Events(ctx context.Context, evts chan events.ContainerEvent) error {
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", labels.PROJECT, s.project.Name))
filter.Add("label", fmt.Sprintf("%s=%s", labels.SERVICE, s.name))
client := s.clientFactory.Create(s)
eventq, errq := client.Events(ctx, types.EventsOptions{
Filters: filter,
})
go func() {
for {
select {
case event := <-eventq:
service := event.Actor.Attributes[labels.SERVICE.Str()]
attributes := map[string]string{}
for _, attr := range eventAttributes {
attributes[attr] = event.Actor.Attributes[attr]
}
e := events.ContainerEvent{
Service: service,
Event: event.Action,
Type: event.Type,
ID: event.Actor.ID,
Time: time.Unix(event.Time, 0),
Attributes: attributes,
}
evts <- e
}
}
}()
return <-errq
}
// Containers implements Service.Containers. It returns the list of containers
// that are related to the service.
func (s *Service) Containers(ctx context.Context) ([]project.Container, error) {
result := []project.Container{}
containers, err := s.collectContainers(ctx)
if err != nil {
return nil, err
}
for _, c := range containers {
result = append(result, c)
}
return result, nil
}
func (s *Service) specificiesHostPort() bool {
_, bindings, err := nat.ParsePortSpecs(s.Config().Ports)
if err != nil {
fmt.Println(err)
}
for _, portBindings := range bindings {
for _, portBinding := range portBindings {
if portBinding.HostPort != "" {
return true
}
}
}
return false
}
//take in timeout flag from cli as parameter
//return timeout if it is set,
//else return stop_grace_period if it is set,
//else return default 10s
func (s *Service) stopTimeout(timeout int) int {
DEFAULTTIMEOUT := 10
if timeout != 0 {
return timeout
}
configTimeout := utils.DurationStrToSecondsInt(s.Config().StopGracePeriod)
if configTimeout != nil {
return *configTimeout
}
return DEFAULTTIMEOUT
}

View file

@ -0,0 +1,181 @@
package service
import (
"fmt"
"strconv"
"strings"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/libcompose/config"
composecontainer "github.com/docker/libcompose/docker/container"
"github.com/docker/libcompose/labels"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/project/events"
util "github.com/docker/libcompose/utils"
)
func (s *Service) createContainer(ctx context.Context, namer Namer, oldContainer string, configOverride *config.ServiceConfig, oneOff bool) (*composecontainer.Container, error) {
serviceConfig := s.serviceConfig
if configOverride != nil {
serviceConfig.Command = configOverride.Command
serviceConfig.Tty = configOverride.Tty
serviceConfig.StdinOpen = configOverride.StdinOpen
}
configWrapper, err := ConvertToAPI(serviceConfig, s.context.Context, s.clientFactory)
if err != nil {
return nil, err
}
configWrapper.Config.Image = s.imageName()
containerName, containerNumber := namer.Next()
configWrapper.Config.Labels[labels.SERVICE.Str()] = s.name
configWrapper.Config.Labels[labels.PROJECT.Str()] = s.project.Name
configWrapper.Config.Labels[labels.HASH.Str()] = config.GetServiceHash(s.name, serviceConfig)
configWrapper.Config.Labels[labels.ONEOFF.Str()] = strings.Title(strconv.FormatBool(oneOff))
configWrapper.Config.Labels[labels.NUMBER.Str()] = fmt.Sprintf("%d", containerNumber)
configWrapper.Config.Labels[labels.VERSION.Str()] = project.ComposeVersion
err = s.populateAdditionalHostConfig(configWrapper.HostConfig)
if err != nil {
return nil, err
}
// FIXME(vdemeester): oldContainer should be a Container instead of a string
client := s.clientFactory.Create(s)
if oldContainer != "" {
info, err := client.ContainerInspect(ctx, oldContainer)
if err != nil {
return nil, err
}
configWrapper.HostConfig.Binds = util.Merge(configWrapper.HostConfig.Binds, volumeBinds(configWrapper.Config.Volumes, &info))
}
logrus.Debugf("Creating container %s %#v", containerName, configWrapper)
// FIXME(vdemeester): long-term will be container.Create(…)
container, err := composecontainer.Create(ctx, client, containerName, configWrapper.Config, configWrapper.HostConfig, configWrapper.NetworkingConfig)
if err != nil {
return nil, err
}
s.project.Notify(events.ContainerCreated, s.name, map[string]string{
"name": containerName,
})
return container, nil
}
func (s *Service) populateAdditionalHostConfig(hostConfig *containertypes.HostConfig) error {
links, err := s.getLinks()
if err != nil {
return err
}
for _, link := range s.DependentServices() {
if !s.project.ServiceConfigs.Has(link.Target) {
continue
}
service, err := s.project.CreateService(link.Target)
if err != nil {
return err
}
containers, err := service.Containers(context.Background())
if err != nil {
return err
}
if link.Type == project.RelTypeIpcNamespace {
hostConfig, err = addIpc(hostConfig, service, containers, s.serviceConfig.Ipc)
} else if link.Type == project.RelTypeNetNamespace {
hostConfig, err = addNetNs(hostConfig, service, containers, s.serviceConfig.NetworkMode)
}
if err != nil {
return err
}
}
hostConfig.Links = []string{}
for k, v := range links {
hostConfig.Links = append(hostConfig.Links, strings.Join([]string{v, k}, ":"))
}
for _, v := range s.serviceConfig.ExternalLinks {
hostConfig.Links = append(hostConfig.Links, v)
}
return nil
}
// FIXME(vdemeester) this is temporary
func (s *Service) getLinks() (map[string]string, error) {
links := map[string]string{}
for _, link := range s.DependentServices() {
if !s.project.ServiceConfigs.Has(link.Target) {
continue
}
service, err := s.project.CreateService(link.Target)
if err != nil {
return nil, err
}
// FIXME(vdemeester) container should not know service
containers, err := service.Containers(context.Background())
if err != nil {
return nil, err
}
if link.Type == project.RelTypeLink {
addLinks(links, service, link, containers)
}
if err != nil {
return nil, err
}
}
return links, nil
}
func addLinks(links map[string]string, service project.Service, rel project.ServiceRelationship, containers []project.Container) {
for _, container := range containers {
if _, ok := links[rel.Alias]; !ok {
links[rel.Alias] = container.Name()
}
links[container.Name()] = container.Name()
}
}
func addIpc(config *containertypes.HostConfig, service project.Service, containers []project.Container, ipc string) (*containertypes.HostConfig, error) {
if len(containers) == 0 {
return nil, fmt.Errorf("Failed to find container for IPC %v", ipc)
}
id := containers[0].ID()
config.IpcMode = containertypes.IpcMode("container:" + id)
return config, nil
}
func addNetNs(config *containertypes.HostConfig, service project.Service, containers []project.Container, networkMode string) (*containertypes.HostConfig, error) {
if len(containers) == 0 {
return nil, fmt.Errorf("Failed to find container for networks ns %v", networkMode)
}
id := containers[0].ID()
config.NetworkMode = containertypes.NetworkMode("container:" + id)
return config, nil
}
func volumeBinds(volumes map[string]struct{}, container *types.ContainerJSON) []string {
result := make([]string, 0, len(container.Mounts))
for _, mount := range container.Mounts {
if _, ok := volumes[mount.Destination]; ok {
result = append(result, fmt.Sprint(mount.Source, ":", mount.Destination))
}
}
return result
}

View file

@ -0,0 +1,24 @@
package service
import (
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker/ctx"
"github.com/docker/libcompose/project"
)
// Factory is an implementation of project.ServiceFactory.
type Factory struct {
context *ctx.Context
}
// NewFactory creates a new service factory for the given context
func NewFactory(context *ctx.Context) *Factory {
return &Factory{
context: context,
}
}
// Create creates a Service based on the specified project, name and service configuration.
func (s *Factory) Create(project *project.Project, name string, serviceConfig *config.ServiceConfig) (project.Service, error) {
return NewService(name, serviceConfig, s.context), nil
}

View file

@ -0,0 +1,45 @@
package service
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/libcompose/project"
)
// DefaultDependentServices return the dependent services (as an array of ServiceRelationship)
// for the specified project and service. It looks for : links, volumesFrom, net and ipc configuration.
// It uses default project implementation and append some docker specific ones.
func DefaultDependentServices(p *project.Project, s project.Service) []project.ServiceRelationship {
result := project.DefaultDependentServices(p, s)
result = appendNs(p, result, s.Config().NetworkMode, project.RelTypeNetNamespace)
result = appendNs(p, result, s.Config().Ipc, project.RelTypeIpcNamespace)
return result
}
func appendNs(p *project.Project, rels []project.ServiceRelationship, conf string, relType project.ServiceRelationshipType) []project.ServiceRelationship {
service := GetContainerFromIpcLikeConfig(p, conf)
if service != "" {
rels = append(rels, project.NewServiceRelationship(service, relType))
}
return rels
}
// GetContainerFromIpcLikeConfig returns name of the service that shares the IPC
// namespace with the specified service.
func GetContainerFromIpcLikeConfig(p *project.Project, conf string) string {
ipc := container.IpcMode(conf)
if !ipc.IsContainer() {
return ""
}
name := ipc.Container()
if name == "" {
return ""
}
if p.ServiceConfigs.Has(name) {
return name
}
return ""
}

View file

@ -0,0 +1,157 @@
package volume
import (
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/volume"
"github.com/docker/docker/client"
"github.com/docker/libcompose/config"
composeclient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
"golang.org/x/net/context"
)
// Volume holds attributes and method for a volume definition in compose
type Volume struct {
client client.VolumeAPIClient
projectName string
name string
driver string
driverOptions map[string]string
external bool
// TODO (shouze) missing labels
}
func (v *Volume) fullName() string {
name := v.projectName + "_" + v.name
if v.external {
name = v.name
}
return name
}
// Inspect inspect the current volume
func (v *Volume) Inspect(ctx context.Context) (types.Volume, error) {
return v.client.VolumeInspect(ctx, v.fullName())
}
// Remove removes the current volume (from docker engine)
func (v *Volume) Remove(ctx context.Context) error {
if v.external {
fmt.Printf("Volume %s is external, skipping", v.fullName())
return nil
}
fmt.Printf("Removing volume %q\n", v.fullName())
return v.client.VolumeRemove(ctx, v.fullName(), true)
}
// EnsureItExists make sure the volume exists and return an error if it does not exists
// and cannot be created.
func (v *Volume) EnsureItExists(ctx context.Context) error {
volumeResource, err := v.Inspect(ctx)
if v.external {
if client.IsErrVolumeNotFound(err) {
// FIXME(shouze) introduce some libcompose error type
return fmt.Errorf("Volume %s declared as external, but could not be found. Please create the volume manually using docker volume create %s and try again", v.name, v.name)
}
return err
}
if err != nil && client.IsErrVolumeNotFound(err) {
return v.create(ctx)
}
if volumeResource.Driver != v.driver {
return fmt.Errorf("Volume %q needs to be recreated - driver has changed", v.name)
}
return err
}
func (v *Volume) create(ctx context.Context) error {
fmt.Printf("Creating volume %q with driver %q\n", v.fullName(), v.driver)
_, err := v.client.VolumeCreate(ctx, volume.VolumesCreateBody{
Name: v.fullName(),
Driver: v.driver,
DriverOpts: v.driverOptions,
// TODO (shouze) missing labels
})
return err
}
// NewVolume creates a new volume from the specified name and config.
func NewVolume(projectName, name string, config *config.VolumeConfig, client client.VolumeAPIClient) *Volume {
vol := &Volume{
client: client,
projectName: projectName,
name: name,
}
if config != nil {
vol.driver = config.Driver
vol.driverOptions = config.DriverOpts
vol.external = config.External.External
}
return vol
}
// Volumes holds a list of volume
type Volumes struct {
volumes []*Volume
volumeEnabled bool
}
// Initialize make sure volume exists if volume is enabled
func (v *Volumes) Initialize(ctx context.Context) error {
if !v.volumeEnabled {
return nil
}
for _, volume := range v.volumes {
err := volume.EnsureItExists(ctx)
if err != nil {
return err
}
}
return nil
}
// Remove removes volumes (clean-up)
func (v *Volumes) Remove(ctx context.Context) error {
if !v.volumeEnabled {
return nil
}
for _, volume := range v.volumes {
err := volume.Remove(ctx)
if err != nil {
return err
}
}
return nil
}
// VolumesFromServices creates a new Volumes struct based on volumes configurations and
// services configuration. If a volume is defined but not used by any service, it will return
// an error along the Volumes.
func VolumesFromServices(cli client.VolumeAPIClient, projectName string, volumeConfigs map[string]*config.VolumeConfig, services *config.ServiceConfigs, volumeEnabled bool) (*Volumes, error) {
var err error
volumes := make([]*Volume, 0, len(volumeConfigs))
for name, config := range volumeConfigs {
volume := NewVolume(projectName, name, config, cli)
volumes = append(volumes, volume)
}
return &Volumes{
volumes: volumes,
volumeEnabled: volumeEnabled,
}, err
}
// DockerFactory implements project.VolumesFactory
type DockerFactory struct {
ClientFactory composeclient.Factory
}
// Create implements project.VolumesFactory Create method.
// It creates a Volumes (that implements project.Volumes) from specified configurations.
func (f *DockerFactory) Create(projectName string, volumeConfigs map[string]*config.VolumeConfig, serviceConfigs *config.ServiceConfigs, volumeEnabled bool) (project.Volumes, error) {
cli := f.ClientFactory.Create(nil)
return VolumesFromServices(cli, projectName, volumeConfigs, serviceConfigs, volumeEnabled)
}