1
0
Fork 0

Vendor integration dependencies.

This commit is contained in:
Timo Reimann 2017-02-07 22:33:23 +01:00
parent dd5e3fba01
commit 55b57c736b
2451 changed files with 731611 additions and 0 deletions

View file

@ -0,0 +1,40 @@
package docker
import (
"github.com/docker/docker/registry"
"github.com/docker/engine-api/types"
)
// AuthLookup defines a method for looking up authentication information
type AuthLookup interface {
All() map[string]types.AuthConfig
Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig
}
// ConfigAuthLookup implements AuthLookup by reading a Docker config file
type ConfigAuthLookup struct {
context *Context
}
// NewConfigAuthLookup creates a new ConfigAuthLookup for a given context
func NewConfigAuthLookup(context *Context) *ConfigAuthLookup {
return &ConfigAuthLookup{
context: context,
}
}
// Lookup uses a Docker config file to lookup authentication information
func (c *ConfigAuthLookup) Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig {
if c.context.ConfigFile == nil || repoInfo == nil || repoInfo.Index == nil {
return types.AuthConfig{}
}
return registry.ResolveAuthConfig(c.context.ConfigFile.AuthConfigs, repoInfo.Index)
}
// All uses a Docker config file to get all authentication information
func (c *ConfigAuthLookup) All() map[string]types.AuthConfig {
if c.context.ConfigFile == nil {
return map[string]types.AuthConfig{}
}
return c.context.ConfigFile.AuthConfigs
}

View file

@ -0,0 +1,184 @@
package builder
import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"strings"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerignore"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/term"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
)
// DefaultDockerfileName is the default name of a Dockerfile
const DefaultDockerfileName = "Dockerfile"
// Builder defines methods to provide a docker builder. This makes libcompose
// not tied up to the docker daemon builder.
type Builder interface {
Build(imageName string) error
}
// DaemonBuilder is the daemon "docker build" Builder implementation.
type DaemonBuilder struct {
Client client.ImageAPIClient
ContextDirectory string
Dockerfile string
AuthConfigs map[string]types.AuthConfig
NoCache bool
ForceRemove bool
Pull bool
BuildArgs map[string]string
}
// Build implements Builder. It consumes the docker build API endpoint and sends
// a tar of the specified service build context.
func (d *DaemonBuilder) Build(ctx context.Context, imageName string) error {
buildCtx, err := createTar(d.ContextDirectory, d.Dockerfile)
if err != nil {
return err
}
defer buildCtx.Close()
var progBuff io.Writer = os.Stdout
var buildBuff io.Writer = os.Stdout
// Setup an upload progress bar
progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true)
var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
logrus.Infof("Building %s...", imageName)
outFd, isTerminalOut := term.GetFdInfo(os.Stdout)
response, err := d.Client.ImageBuild(ctx, body, types.ImageBuildOptions{
Tags: []string{imageName},
NoCache: d.NoCache,
Remove: true,
ForceRemove: d.ForceRemove,
PullParent: d.Pull,
Dockerfile: d.Dockerfile,
AuthConfigs: d.AuthConfigs,
BuildArgs: d.BuildArgs,
})
if err != nil {
return err
}
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, outFd, isTerminalOut, nil)
if err != nil {
if jerr, ok := err.(*jsonmessage.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
return fmt.Errorf("Status: %s, Code: %d", jerr.Message, jerr.Code)
}
}
return err
}
// CreateTar create a build context tar for the specified project and service name.
func createTar(contextDirectory, dockerfile string) (io.ReadCloser, error) {
// This code was ripped off from docker/api/client/build.go
dockerfileName := filepath.Join(contextDirectory, dockerfile)
absContextDirectory, err := filepath.Abs(contextDirectory)
if err != nil {
return nil, err
}
filename := dockerfileName
if dockerfile == "" {
// No -f/--file was specified so use the default
dockerfileName = DefaultDockerfileName
filename = filepath.Join(absContextDirectory, dockerfileName)
// Just to be nice ;-) look for 'dockerfile' too but only
// use it if we found it, otherwise ignore this check
if _, err = os.Lstat(filename); os.IsNotExist(err) {
tmpFN := path.Join(absContextDirectory, strings.ToLower(dockerfileName))
if _, err = os.Lstat(tmpFN); err == nil {
dockerfileName = strings.ToLower(dockerfileName)
filename = tmpFN
}
}
}
origDockerfile := dockerfileName // used for error msg
if filename, err = filepath.Abs(filename); err != nil {
return nil, err
}
// Now reset the dockerfileName to be relative to the build context
dockerfileName, err = filepath.Rel(absContextDirectory, filename)
if err != nil {
return nil, err
}
// And canonicalize dockerfile name to a platform-independent one
dockerfileName, err = archive.CanonicalTarNameForPath(dockerfileName)
if err != nil {
return nil, fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", dockerfileName, err)
}
if _, err = os.Lstat(filename); os.IsNotExist(err) {
return nil, fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile)
}
var includes = []string{"."}
var excludes []string
dockerIgnorePath := path.Join(contextDirectory, ".dockerignore")
dockerIgnore, err := os.Open(dockerIgnorePath)
if err != nil {
if !os.IsNotExist(err) {
return nil, err
}
logrus.Warnf("Error while reading .dockerignore (%s) : %s", dockerIgnorePath, err.Error())
excludes = make([]string, 0)
} else {
excludes, err = dockerignore.ReadAll(dockerIgnore)
if err != nil {
return nil, err
}
}
// If .dockerignore mentions .dockerignore or the Dockerfile
// then make sure we send both files over to the daemon
// because Dockerfile is, obviously, needed no matter what, and
// .dockerignore is needed to know if either one needs to be
// removed. The deamon will remove them for us, if needed, after it
// parses the Dockerfile.
keepThem1, _ := fileutils.Matches(".dockerignore", excludes)
keepThem2, _ := fileutils.Matches(dockerfileName, excludes)
if keepThem1 || keepThem2 {
includes = append(includes, ".dockerignore", dockerfileName)
}
if err := builder.ValidateContextDirectory(contextDirectory, excludes); err != nil {
return nil, fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
}
options := &archive.TarOptions{
Compression: archive.Uncompressed,
ExcludePatterns: excludes,
IncludeFiles: includes,
}
return archive.TarWithOptions(contextDirectory, options)
}

View file

@ -0,0 +1,115 @@
package client
import (
"fmt"
"net/http"
"os"
"path/filepath"
"runtime"
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/engine-api/client"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/docker/libcompose/version"
)
const (
// DefaultAPIVersion is the default docker API version set by libcompose
DefaultAPIVersion = "v1.20"
defaultTrustKeyFile = "key.json"
defaultCaFile = "ca.pem"
defaultKeyFile = "key.pem"
defaultCertFile = "cert.pem"
)
var (
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
)
func init() {
if dockerCertPath == "" {
dockerCertPath = cliconfig.ConfigDir()
}
}
// Options holds docker client options (host, tls, ..)
type Options struct {
TLS bool
TLSVerify bool
TLSOptions tlsconfig.Options
TrustKey string
Host string
APIVersion string
}
// Create creates a docker client based on the specified options.
func Create(c Options) (client.APIClient, error) {
if c.Host == "" {
if os.Getenv("DOCKER_API_VERSION") == "" {
os.Setenv("DOCKER_API_VERSION", DefaultAPIVersion)
}
client, err := client.NewEnvClient()
if err != nil {
return nil, err
}
return client, nil
}
apiVersion := c.APIVersion
if apiVersion == "" {
apiVersion = DefaultAPIVersion
}
if c.TLSOptions.CAFile == "" {
c.TLSOptions.CAFile = filepath.Join(dockerCertPath, defaultCaFile)
}
if c.TLSOptions.CertFile == "" {
c.TLSOptions.CertFile = filepath.Join(dockerCertPath, defaultCertFile)
}
if c.TLSOptions.KeyFile == "" {
c.TLSOptions.KeyFile = filepath.Join(dockerCertPath, defaultKeyFile)
}
if c.TrustKey == "" {
c.TrustKey = filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile)
}
if c.TLSVerify {
c.TLS = true
}
if c.TLS {
c.TLSOptions.InsecureSkipVerify = !c.TLSVerify
}
var httpClient *http.Client
if c.TLS {
config, err := tlsconfig.Client(c.TLSOptions)
if err != nil {
return nil, err
}
tr := &http.Transport{
TLSClientConfig: config,
}
proto, addr, _, err := client.ParseHost(c.Host)
if err != nil {
return nil, err
}
if err := sockets.ConfigureTransport(tr, proto, addr); err != nil {
return nil, err
}
httpClient = &http.Client{
Transport: tr,
}
}
customHeaders := map[string]string{}
customHeaders["User-Agent"] = fmt.Sprintf("Libcompose-Client/%s (%s)", version.VERSION, runtime.GOOS)
client, err := client.NewClient(c.Host, apiVersion, httpClient, customHeaders)
if err != nil {
return nil, err
}
return client, nil
}

View file

@ -0,0 +1,35 @@
package client
import (
"github.com/docker/engine-api/client"
"github.com/docker/libcompose/project"
)
// Factory is a factory to create docker clients.
type Factory interface {
// Create constructs a Docker client for the given service. The passed in
// config may be nil in which case a generic client for the project should
// be returned.
Create(service project.Service) client.APIClient
}
type defaultFactory struct {
client client.APIClient
}
// NewDefaultFactory creates and returns the default client factory that uses
// github.com/docker/engine-api client.
func NewDefaultFactory(opts Options) (Factory, error) {
client, err := Create(opts)
if err != nil {
return nil, err
}
return &defaultFactory{
client: client,
}, nil
}
func (s *defaultFactory) Create(service project.Service) client.APIClient {
return s.client
}

View file

@ -0,0 +1,766 @@
package docker
import (
"fmt"
"io"
"math"
"os"
"strings"
"time"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/stdcopy"
"github.com/docker/docker/pkg/term"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/network"
"github.com/docker/go-connections/nat"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/labels"
"github.com/docker/libcompose/logger"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/project/events"
util "github.com/docker/libcompose/utils"
"github.com/docker/libcompose/yaml"
)
// Container holds information about a docker container and the service it is tied on.
type Container struct {
name string
serviceName string
projectName string
containerNumber int
oneOff bool
eventNotifier events.Notifier
loggerFactory logger.Factory
client client.APIClient
// FIXME(vdemeester) Remove this dependency
service *Service
}
// NewContainer creates a container struct with the specified docker client, name and service.
func NewContainer(client client.APIClient, name string, containerNumber int, service *Service) *Container {
return &Container{
client: client,
name: name,
containerNumber: containerNumber,
// TODO(vdemeester) Move these to arguments
serviceName: service.name,
projectName: service.project.Name,
eventNotifier: service.project,
loggerFactory: service.context.LoggerFactory,
// TODO(vdemeester) Remove this dependency
service: service,
}
}
// NewOneOffContainer creates a "oneoff" container struct with the specified docker client, name and service.
func NewOneOffContainer(client client.APIClient, name string, containerNumber int, service *Service) *Container {
c := NewContainer(client, name, containerNumber, service)
c.oneOff = true
return c
}
func (c *Container) findExisting(ctx context.Context) (*types.ContainerJSON, error) {
return GetContainer(ctx, c.client, c.name)
}
// Info returns info about the container, like name, command, state or ports.
func (c *Container) Info(ctx context.Context, qFlag bool) (project.Info, error) {
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return nil, err
}
infos, err := GetContainersByFilter(ctx, c.client, map[string][]string{
"name": {container.Name},
})
if err != nil || len(infos) == 0 {
return nil, err
}
info := infos[0]
result := project.Info{}
if qFlag {
result = append(result, project.InfoPart{Key: "Id", Value: container.ID})
} else {
result = append(result, project.InfoPart{Key: "Name", Value: name(info.Names)})
result = append(result, project.InfoPart{Key: "Command", Value: info.Command})
result = append(result, project.InfoPart{Key: "State", Value: info.Status})
result = append(result, project.InfoPart{Key: "Ports", Value: portString(info.Ports)})
}
return result, nil
}
func portString(ports []types.Port) string {
result := []string{}
for _, port := range ports {
if port.PublicPort > 0 {
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
} else {
result = append(result, fmt.Sprintf("%d/%s", port.PrivatePort, port.Type))
}
}
return strings.Join(result, ", ")
}
func name(names []string) string {
max := math.MaxInt32
var current string
for _, v := range names {
if len(v) < max {
max = len(v)
current = v
}
}
return current[1:]
}
// Recreate will not refresh the container by means of relaxation and enjoyment,
// just delete it and create a new one with the current configuration
func (c *Container) Recreate(ctx context.Context, imageName string) (*types.ContainerJSON, error) {
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return nil, err
}
hash := container.Config.Labels[labels.HASH.Str()]
if hash == "" {
return nil, fmt.Errorf("Failed to find hash on old container: %s", container.Name)
}
name := container.Name[1:]
newName := fmt.Sprintf("%s_%s", name, container.ID[:12])
logrus.Debugf("Renaming %s => %s", name, newName)
if err := c.client.ContainerRename(ctx, container.ID, newName); err != nil {
logrus.Errorf("Failed to rename old container %s", c.name)
return nil, err
}
newContainer, err := c.createContainer(ctx, imageName, container.ID, nil)
if err != nil {
return nil, err
}
logrus.Debugf("Created replacement container %s", newContainer.ID)
if err := c.client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{
Force: true,
RemoveVolumes: false,
}); err != nil {
logrus.Errorf("Failed to remove old container %s", c.name)
return nil, err
}
logrus.Debugf("Removed old container %s %s", c.name, container.ID)
return newContainer, nil
}
// Create creates the container based on the specified image name and send an event
// to notify the container has been created. If the container already exists, does
// nothing.
func (c *Container) Create(ctx context.Context, imageName string) (*types.ContainerJSON, error) {
return c.CreateWithOverride(ctx, imageName, nil)
}
// CreateWithOverride create container and override parts of the config to
// allow special situations to override the config generated from the compose
// file
func (c *Container) CreateWithOverride(ctx context.Context, imageName string, configOverride *config.ServiceConfig) (*types.ContainerJSON, error) {
container, err := c.findExisting(ctx)
if err != nil {
return nil, err
}
if container == nil {
container, err = c.createContainer(ctx, imageName, "", configOverride)
if err != nil {
return nil, err
}
c.eventNotifier.Notify(events.ContainerCreated, c.serviceName, map[string]string{
"name": c.Name(),
})
}
return container, err
}
// Stop stops the container.
func (c *Container) Stop(ctx context.Context, timeout int) error {
return c.withContainer(ctx, func(container *types.ContainerJSON) error {
timeoutDuration := time.Duration(timeout) * time.Second
return c.client.ContainerStop(ctx, container.ID, &timeoutDuration)
})
}
// Pause pauses the container. If the containers are already paused, don't fail.
func (c *Container) Pause(ctx context.Context) error {
return c.withContainer(ctx, func(container *types.ContainerJSON) error {
if !container.State.Paused {
return c.client.ContainerPause(ctx, container.ID)
}
return nil
})
}
// Unpause unpauses the container. If the containers are not paused, don't fail.
func (c *Container) Unpause(ctx context.Context) error {
return c.withContainer(ctx, func(container *types.ContainerJSON) error {
if container.State.Paused {
return c.client.ContainerUnpause(ctx, container.ID)
}
return nil
})
}
// Kill kill the container.
func (c *Container) Kill(ctx context.Context, signal string) error {
return c.withContainer(ctx, func(container *types.ContainerJSON) error {
return c.client.ContainerKill(ctx, container.ID, signal)
})
}
// Delete removes the container if existing. If the container is running, it tries
// to stop it first.
func (c *Container) Delete(ctx context.Context, removeVolume bool) error {
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return err
}
info, err := c.client.ContainerInspect(ctx, container.ID)
if err != nil {
return err
}
if !info.State.Running {
return c.client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{
Force: true,
RemoveVolumes: removeVolume,
})
}
return nil
}
// IsRunning returns the running state of the container.
func (c *Container) IsRunning(ctx context.Context) (bool, error) {
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return false, err
}
info, err := c.client.ContainerInspect(ctx, container.ID)
if err != nil {
return false, err
}
return info.State.Running, nil
}
// Run creates, start and attach to the container based on the image name,
// the specified configuration.
// It will always create a new container.
func (c *Container) Run(ctx context.Context, configOverride *config.ServiceConfig) (int, error) {
var (
errCh chan error
out, stderr io.Writer
in io.ReadCloser
)
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return -1, err
}
if configOverride.StdinOpen {
in = os.Stdin
}
if configOverride.Tty {
out = os.Stdout
}
if configOverride.Tty {
stderr = os.Stderr
}
options := types.ContainerAttachOptions{
Stream: true,
Stdin: configOverride.StdinOpen,
Stdout: configOverride.Tty,
Stderr: configOverride.Tty,
}
resp, err := c.client.ContainerAttach(ctx, container.ID, options)
if err != nil {
return -1, err
}
// set raw terminal
inFd, _ := term.GetFdInfo(in)
state, err := term.SetRawTerminal(inFd)
if err != nil {
return -1, err
}
// restore raw terminal
defer term.RestoreTerminal(inFd, state)
// holdHijackedConnection (in goroutine)
errCh = promise.Go(func() error {
return holdHijackedConnection(configOverride.Tty, in, out, stderr, resp)
})
if err := c.client.ContainerStart(ctx, container.ID, types.ContainerStartOptions{}); err != nil {
return -1, err
}
if err := <-errCh; err != nil {
logrus.Debugf("Error hijack: %s", err)
return -1, err
}
exitedContainer, err := c.client.ContainerInspect(ctx, container.ID)
if err != nil {
return -1, err
}
return exitedContainer.State.ExitCode, nil
}
func holdHijackedConnection(tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error {
var err error
receiveStdout := make(chan error, 1)
if outputStream != nil || errorStream != nil {
go func() {
// When TTY is ON, use regular copy
if tty && outputStream != nil {
_, err = io.Copy(outputStream, resp.Reader)
} else {
_, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader)
}
logrus.Debugf("[hijack] End of stdout")
receiveStdout <- err
}()
}
stdinDone := make(chan struct{})
go func() {
if inputStream != nil {
io.Copy(resp.Conn, inputStream)
logrus.Debugf("[hijack] End of stdin")
}
if err := resp.CloseWrite(); err != nil {
logrus.Debugf("Couldn't send EOF: %s", err)
}
close(stdinDone)
}()
select {
case err := <-receiveStdout:
if err != nil {
logrus.Debugf("Error receiveStdout: %s", err)
return err
}
case <-stdinDone:
if outputStream != nil || errorStream != nil {
if err := <-receiveStdout; err != nil {
logrus.Debugf("Error receiveStdout: %s", err)
return err
}
}
}
return nil
}
// Start the specified container with the specified host config
func (c *Container) Start(ctx context.Context) error {
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return err
}
logrus.WithFields(logrus.Fields{"container.ID": container.ID, "c.name": c.name}).Debug("Starting container")
if err := c.client.ContainerStart(context.Background(), container.ID, types.ContainerStartOptions{}); err != nil {
logrus.WithFields(logrus.Fields{"container.ID": container.ID, "c.name": c.name}).Debug("Failed to start container")
return err
}
c.eventNotifier.Notify(events.ContainerStarted, c.serviceName, map[string]string{
"name": c.Name(),
})
return nil
}
// OutOfSync checks if the container is out of sync with the service definition.
// It looks if the the service hash container label is the same as the computed one.
func (c *Container) OutOfSync(ctx context.Context, imageName string) (bool, error) {
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return false, err
}
if container.Config.Image != imageName {
logrus.Debugf("Images for %s do not match %s!=%s", c.name, container.Config.Image, imageName)
return true, nil
}
if container.Config.Labels[labels.HASH.Str()] != c.getHash() {
logrus.Debugf("Hashes for %s do not match %s!=%s", c.name, container.Config.Labels[labels.HASH.Str()], c.getHash())
return true, nil
}
image, _, err := c.client.ImageInspectWithRaw(ctx, container.Config.Image, false)
if err != nil {
if client.IsErrImageNotFound(err) {
logrus.Debugf("Image %s do not exist, do not know if it's out of sync", container.Config.Image)
return false, nil
}
return false, err
}
logrus.Debugf("Checking existing image name vs id: %s == %s", image.ID, container.Image)
return image.ID != container.Image, err
}
func (c *Container) getHash() string {
return config.GetServiceHash(c.serviceName, c.service.Config())
}
func volumeBinds(volumes map[string]struct{}, container *types.ContainerJSON) []string {
result := make([]string, 0, len(container.Mounts))
for _, mount := range container.Mounts {
if _, ok := volumes[mount.Destination]; ok {
result = append(result, fmt.Sprint(mount.Source, ":", mount.Destination))
}
}
return result
}
func (c *Container) createContainer(ctx context.Context, imageName, oldContainer string, configOverride *config.ServiceConfig) (*types.ContainerJSON, error) {
serviceConfig := c.service.serviceConfig
if configOverride != nil {
serviceConfig.Command = configOverride.Command
serviceConfig.Tty = configOverride.Tty
serviceConfig.StdinOpen = configOverride.StdinOpen
}
configWrapper, err := ConvertToAPI(c.service)
if err != nil {
return nil, err
}
configWrapper.Config.Image = imageName
if configWrapper.Config.Labels == nil {
configWrapper.Config.Labels = map[string]string{}
}
oneOffString := "False"
if c.oneOff {
oneOffString = "True"
}
configWrapper.Config.Labels[labels.SERVICE.Str()] = c.serviceName
configWrapper.Config.Labels[labels.PROJECT.Str()] = c.projectName
configWrapper.Config.Labels[labels.HASH.Str()] = c.getHash()
configWrapper.Config.Labels[labels.ONEOFF.Str()] = oneOffString
configWrapper.Config.Labels[labels.NUMBER.Str()] = fmt.Sprint(c.containerNumber)
configWrapper.Config.Labels[labels.VERSION.Str()] = ComposeVersion
err = c.populateAdditionalHostConfig(configWrapper.HostConfig)
if err != nil {
return nil, err
}
if oldContainer != "" {
info, err := c.client.ContainerInspect(ctx, oldContainer)
if err != nil {
return nil, err
}
configWrapper.HostConfig.Binds = util.Merge(configWrapper.HostConfig.Binds, volumeBinds(configWrapper.Config.Volumes, &info))
}
logrus.Debugf("Creating container %s %#v", c.name, configWrapper)
container, err := c.client.ContainerCreate(ctx, configWrapper.Config, configWrapper.HostConfig, configWrapper.NetworkingConfig, c.name)
if err != nil {
logrus.Debugf("Failed to create container %s: %v", c.name, err)
return nil, err
}
return GetContainer(ctx, c.client, container.ID)
}
func (c *Container) populateAdditionalHostConfig(hostConfig *container.HostConfig) error {
links, err := c.getLinks()
if err != nil {
return err
}
for _, link := range c.service.DependentServices() {
if !c.service.project.ServiceConfigs.Has(link.Target) {
continue
}
service, err := c.service.project.CreateService(link.Target)
if err != nil {
return err
}
// FIXME(vdemeester) container should not know service
containers, err := service.Containers(context.Background())
if err != nil {
return err
}
if link.Type == project.RelTypeIpcNamespace {
hostConfig, err = c.addIpc(hostConfig, service, containers)
} else if link.Type == project.RelTypeNetNamespace {
hostConfig, err = c.addNetNs(hostConfig, service, containers)
}
if err != nil {
return err
}
}
hostConfig.Links = []string{}
for k, v := range links {
hostConfig.Links = append(hostConfig.Links, strings.Join([]string{v, k}, ":"))
}
for _, v := range c.service.Config().ExternalLinks {
hostConfig.Links = append(hostConfig.Links, v)
}
return nil
}
// FIXME(vdemeester) this is temporary
func (c *Container) getLinks() (map[string]string, error) {
links := map[string]string{}
for _, link := range c.service.DependentServices() {
if !c.service.project.ServiceConfigs.Has(link.Target) {
continue
}
service, err := c.service.project.CreateService(link.Target)
if err != nil {
return nil, err
}
// FIXME(vdemeester) container should not know service
containers, err := service.Containers(context.Background())
if err != nil {
return nil, err
}
if link.Type == project.RelTypeLink {
c.addLinks(links, service, link, containers)
}
if err != nil {
return nil, err
}
}
return links, nil
}
func (c *Container) addLinks(links map[string]string, service project.Service, rel project.ServiceRelationship, containers []project.Container) {
for _, container := range containers {
if _, ok := links[rel.Alias]; !ok {
links[rel.Alias] = container.Name()
}
links[container.Name()] = container.Name()
}
}
func (c *Container) addIpc(config *container.HostConfig, service project.Service, containers []project.Container) (*container.HostConfig, error) {
if len(containers) == 0 {
return nil, fmt.Errorf("Failed to find container for IPC %v", c.service.Config().Ipc)
}
id, err := containers[0].ID()
if err != nil {
return nil, err
}
config.IpcMode = container.IpcMode("container:" + id)
return config, nil
}
func (c *Container) addNetNs(config *container.HostConfig, service project.Service, containers []project.Container) (*container.HostConfig, error) {
if len(containers) == 0 {
return nil, fmt.Errorf("Failed to find container for networks ns %v", c.service.Config().NetworkMode)
}
id, err := containers[0].ID()
if err != nil {
return nil, err
}
config.NetworkMode = container.NetworkMode("container:" + id)
return config, nil
}
// ID returns the container Id.
func (c *Container) ID() (string, error) {
// FIXME(vdemeester) container should not ask for his ID..
container, err := c.findExisting(context.Background())
if container == nil {
return "", err
}
return container.ID, err
}
// Name returns the container name.
func (c *Container) Name() string {
return c.name
}
// Restart restarts the container if existing, does nothing otherwise.
func (c *Container) Restart(ctx context.Context, timeout int) error {
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return err
}
timeoutDuration := time.Duration(timeout) * time.Second
return c.client.ContainerRestart(ctx, container.ID, &timeoutDuration)
}
// Log forwards container logs to the project configured logger.
func (c *Container) Log(ctx context.Context, follow bool) error {
container, err := c.findExisting(ctx)
if container == nil || err != nil {
return err
}
info, err := c.client.ContainerInspect(ctx, container.ID)
if err != nil {
return err
}
// FIXME(vdemeester) update container struct to do less API calls
name := fmt.Sprintf("%s_%d", c.service.name, c.containerNumber)
l := c.loggerFactory.Create(name)
options := types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: follow,
Tail: "all",
}
responseBody, err := c.client.ContainerLogs(ctx, c.name, options)
if err != nil {
return err
}
defer responseBody.Close()
if info.Config.Tty {
_, err = io.Copy(&logger.Wrapper{Logger: l}, responseBody)
} else {
_, err = stdcopy.StdCopy(&logger.Wrapper{Logger: l}, &logger.Wrapper{Logger: l, Err: true}, responseBody)
}
logrus.WithFields(logrus.Fields{"Logger": l, "err": err}).Debug("c.client.Logs() returned error")
return err
}
func (c *Container) withContainer(ctx context.Context, action func(*types.ContainerJSON) error) error {
container, err := c.findExisting(ctx)
if err != nil {
return err
}
if container != nil {
return action(container)
}
return nil
}
// Port returns the host port the specified port is mapped on.
func (c *Container) Port(ctx context.Context, port string) (string, error) {
container, err := c.findExisting(ctx)
if err != nil {
return "", err
}
if bindings, ok := container.NetworkSettings.Ports[nat.Port(port)]; ok {
result := []string{}
for _, binding := range bindings {
result = append(result, binding.HostIP+":"+binding.HostPort)
}
return strings.Join(result, "\n"), nil
}
return "", nil
}
// Networks returns the containers network
// FIXME(vdemeester) should not need ctx or calling the API, will take care of it
// when refactoring Container.
func (c *Container) Networks(ctx context.Context) (map[string]*network.EndpointSettings, error) {
container, err := c.findExisting(ctx)
if err != nil {
return nil, err
}
if container == nil {
return map[string]*network.EndpointSettings{}, nil
}
return container.NetworkSettings.Networks, nil
}
// NetworkDisconnect disconnects the container from the specified network
// FIXME(vdemeester) will be refactor with Container refactoring
func (c *Container) NetworkDisconnect(ctx context.Context, net *yaml.Network) error {
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return err
}
return c.client.NetworkDisconnect(ctx, net.RealName, container.ID, true)
}
// NetworkConnect connects the container to the specified network
// FIXME(vdemeester) will be refactor with Container refactoring
func (c *Container) NetworkConnect(ctx context.Context, net *yaml.Network) error {
container, err := c.findExisting(ctx)
if err != nil || container == nil {
return err
}
internalLinks, err := c.getLinks()
if err != nil {
return err
}
links := []string{}
// TODO(vdemeester) handle link to self (?)
for k, v := range internalLinks {
links = append(links, strings.Join([]string{v, k}, ":"))
}
for _, v := range c.service.Config().ExternalLinks {
links = append(links, v)
}
aliases := []string{}
if !c.oneOff {
aliases = []string{c.serviceName}
}
aliases = append(aliases, net.Aliases...)
return c.client.NetworkConnect(ctx, net.RealName, container.ID, &network.EndpointSettings{
Aliases: aliases,
Links: links,
IPAddress: net.IPv4Address,
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: net.IPv4Address,
IPv6Address: net.IPv6Address,
},
})
}

View file

@ -0,0 +1,38 @@
package docker
import (
"github.com/docker/docker/cliconfig"
"github.com/docker/docker/cliconfig/configfile"
"github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
)
// Context holds context meta information about a libcompose project and docker
// client information (like configuration file, builder to use, …)
type Context struct {
project.Context
ClientFactory client.Factory
ConfigDir string
ConfigFile *configfile.ConfigFile
AuthLookup AuthLookup
}
func (c *Context) open() error {
return c.LookupConfig()
}
// LookupConfig tries to load the docker configuration files, if any.
func (c *Context) LookupConfig() error {
if c.ConfigFile != nil {
return nil
}
config, err := cliconfig.Load(c.ConfigDir)
if err != nil {
return err
}
c.ConfigFile = config
return nil
}

View file

@ -0,0 +1,288 @@
package docker
import (
"fmt"
"strings"
"golang.org/x/net/context"
"github.com/docker/docker/runconfig/opts"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/network"
"github.com/docker/engine-api/types/strslice"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
"github.com/docker/libcompose/config"
composeclient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/utils"
)
// ConfigWrapper wraps Config, HostConfig and NetworkingConfig for a container.
type ConfigWrapper struct {
Config *container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
}
// Filter filters the specified string slice with the specified function.
func Filter(vs []string, f func(string) bool) []string {
r := make([]string, 0, len(vs))
for _, v := range vs {
if f(v) {
r = append(r, v)
}
}
return r
}
func isBind(s string) bool {
return strings.ContainsRune(s, ':')
}
func isVolume(s string) bool {
return !isBind(s)
}
// ConvertToAPI converts a service configuration to a docker API container configuration.
func ConvertToAPI(s *Service) (*ConfigWrapper, error) {
config, hostConfig, err := Convert(s.serviceConfig, s.context.Context, s.clientFactory)
if err != nil {
return nil, err
}
result := ConfigWrapper{
Config: config,
HostConfig: hostConfig,
}
return &result, nil
}
func isNamedVolume(volume string) bool {
return !strings.HasPrefix(volume, ".") && !strings.HasPrefix(volume, "/") && !strings.HasPrefix(volume, "~")
}
func volumes(c *config.ServiceConfig, ctx project.Context) map[string]struct{} {
volumes := make(map[string]struct{}, len(c.Volumes))
for k, v := range c.Volumes {
if len(ctx.ComposeFiles) > 0 && !isNamedVolume(v) {
v = ctx.ResourceLookup.ResolvePath(v, ctx.ComposeFiles[0])
}
c.Volumes[k] = v
if isVolume(v) {
volumes[v] = struct{}{}
}
}
return volumes
}
func restartPolicy(c *config.ServiceConfig) (*container.RestartPolicy, error) {
restart, err := opts.ParseRestartPolicy(c.Restart)
if err != nil {
return nil, err
}
return &container.RestartPolicy{Name: restart.Name, MaximumRetryCount: restart.MaximumRetryCount}, nil
}
func ports(c *config.ServiceConfig) (map[nat.Port]struct{}, nat.PortMap, error) {
ports, binding, err := nat.ParsePortSpecs(c.Ports)
if err != nil {
return nil, nil, err
}
exPorts, _, err := nat.ParsePortSpecs(c.Expose)
if err != nil {
return nil, nil, err
}
for k, v := range exPorts {
ports[k] = v
}
exposedPorts := map[nat.Port]struct{}{}
for k, v := range ports {
exposedPorts[nat.Port(k)] = v
}
portBindings := nat.PortMap{}
for k, bv := range binding {
dcbs := make([]nat.PortBinding, len(bv))
for k, v := range bv {
dcbs[k] = nat.PortBinding{HostIP: v.HostIP, HostPort: v.HostPort}
}
portBindings[nat.Port(k)] = dcbs
}
return exposedPorts, portBindings, nil
}
// Convert converts a service configuration to an docker API structures (Config and HostConfig)
func Convert(c *config.ServiceConfig, ctx project.Context, clientFactory composeclient.Factory) (*container.Config, *container.HostConfig, error) {
restartPolicy, err := restartPolicy(c)
if err != nil {
return nil, nil, err
}
exposedPorts, portBindings, err := ports(c)
if err != nil {
return nil, nil, err
}
deviceMappings, err := parseDevices(c.Devices)
if err != nil {
return nil, nil, err
}
var volumesFrom []string
if c.VolumesFrom != nil {
volumesFrom, err = getVolumesFrom(c.VolumesFrom, ctx.Project.ServiceConfigs, ctx.ProjectName)
if err != nil {
return nil, nil, err
}
}
config := &container.Config{
Entrypoint: strslice.StrSlice(utils.CopySlice(c.Entrypoint)),
Hostname: c.Hostname,
Domainname: c.DomainName,
User: c.User,
Env: utils.CopySlice(c.Environment),
Cmd: strslice.StrSlice(utils.CopySlice(c.Command)),
Image: c.Image,
Labels: utils.CopyMap(c.Labels),
ExposedPorts: exposedPorts,
Tty: c.Tty,
OpenStdin: c.StdinOpen,
WorkingDir: c.WorkingDir,
Volumes: volumes(c, ctx),
MacAddress: c.MacAddress,
}
ulimits := []*units.Ulimit{}
if c.Ulimits.Elements != nil {
for _, ulimit := range c.Ulimits.Elements {
ulimits = append(ulimits, &units.Ulimit{
Name: ulimit.Name,
Soft: ulimit.Soft,
Hard: ulimit.Hard,
})
}
}
resources := container.Resources{
CgroupParent: c.CgroupParent,
Memory: c.MemLimit,
MemorySwap: c.MemSwapLimit,
CPUShares: c.CPUShares,
CPUQuota: c.CPUQuota,
CpusetCpus: c.CPUSet,
Ulimits: ulimits,
Devices: deviceMappings,
}
networkMode := c.NetworkMode
if c.NetworkMode == "" {
if c.Networks != nil && len(c.Networks.Networks) > 0 {
networkMode = c.Networks.Networks[0].RealName
}
} else {
switch {
case strings.HasPrefix(c.NetworkMode, "service:"):
serviceName := c.NetworkMode[8:]
if serviceConfig, ok := ctx.Project.ServiceConfigs.Get(serviceName); ok {
// FIXME(vdemeester) this is actually not right, should be fixed but not there
service, err := ctx.ServiceFactory.Create(ctx.Project, serviceName, serviceConfig)
if err != nil {
return nil, nil, err
}
containers, err := service.Containers(context.Background())
if err != nil {
return nil, nil, err
}
if len(containers) != 0 {
container := containers[0]
containerID, err := container.ID()
if err != nil {
return nil, nil, err
}
networkMode = "container:" + containerID
}
// FIXME(vdemeester) log/warn in case of len(containers) == 0
}
case strings.HasPrefix(c.NetworkMode, "container:"):
containerName := c.NetworkMode[10:]
client := clientFactory.Create(nil)
container, err := GetContainer(context.Background(), client, containerName)
if err != nil {
return nil, nil, err
}
networkMode = "container:" + container.ID
default:
// do nothing :)
}
}
hostConfig := &container.HostConfig{
VolumesFrom: volumesFrom,
CapAdd: strslice.StrSlice(utils.CopySlice(c.CapAdd)),
CapDrop: strslice.StrSlice(utils.CopySlice(c.CapDrop)),
ExtraHosts: utils.CopySlice(c.ExtraHosts),
Privileged: c.Privileged,
Binds: Filter(c.Volumes, isBind),
DNS: utils.CopySlice(c.DNS),
DNSSearch: utils.CopySlice(c.DNSSearch),
LogConfig: container.LogConfig{
Type: c.Logging.Driver,
Config: utils.CopyMap(c.Logging.Options),
},
NetworkMode: container.NetworkMode(networkMode),
ReadonlyRootfs: c.ReadOnly,
PidMode: container.PidMode(c.Pid),
UTSMode: container.UTSMode(c.Uts),
IpcMode: container.IpcMode(c.Ipc),
PortBindings: portBindings,
RestartPolicy: *restartPolicy,
ShmSize: c.ShmSize,
SecurityOpt: utils.CopySlice(c.SecurityOpt),
VolumeDriver: c.VolumeDriver,
Resources: resources,
}
return config, hostConfig, nil
}
func getVolumesFrom(volumesFrom []string, serviceConfigs *config.ServiceConfigs, projectName string) ([]string, error) {
volumes := []string{}
for _, volumeFrom := range volumesFrom {
if serviceConfig, ok := serviceConfigs.Get(volumeFrom); ok {
// It's a service - Use the first one
name := fmt.Sprintf("%s_%s_1", projectName, volumeFrom)
// If a container name is specified, use that instead
if serviceConfig.ContainerName != "" {
name = serviceConfig.ContainerName
}
volumes = append(volumes, name)
} else {
volumes = append(volumes, volumeFrom)
}
}
return volumes, nil
}
func parseDevices(devices []string) ([]container.DeviceMapping, error) {
// parse device mappings
deviceMappings := []container.DeviceMapping{}
for _, device := range devices {
v, err := opts.ParseDevice(device)
if err != nil {
return nil, err
}
deviceMappings = append(deviceMappings, container.DeviceMapping{
PathOnHost: v.PathOnHost,
PathInContainer: v.PathInContainer,
CgroupPermissions: v.CgroupPermissions,
})
}
return deviceMappings, nil
}

View file

@ -0,0 +1,42 @@
package docker
import (
"golang.org/x/net/context"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
)
// GetContainersByFilter looks up the hosts containers with the specified filters and
// returns a list of container matching it, or an error.
func GetContainersByFilter(ctx context.Context, clientInstance client.APIClient, containerFilters ...map[string][]string) ([]types.Container, error) {
filterArgs := filters.NewArgs()
// FIXME(vdemeester) I don't like 3 for loops >_<
for _, filter := range containerFilters {
for key, filterValue := range filter {
for _, value := range filterValue {
filterArgs.Add(key, value)
}
}
}
return clientInstance.ContainerList(ctx, types.ContainerListOptions{
All: true,
Filter: filterArgs,
})
}
// GetContainer looks up the hosts containers with the specified ID
// or name and returns it, or an error.
func GetContainer(ctx context.Context, clientInstance client.APIClient, id string) (*types.ContainerJSON, error) {
container, err := clientInstance.ContainerInspect(ctx, id)
if err != nil {
if client.IsErrContainerNotFound(err) {
return nil, nil
}
return nil, err
}
return &container, nil
}

View file

@ -0,0 +1,80 @@
package docker
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"os"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
)
func removeImage(ctx context.Context, client client.APIClient, image string) error {
_, err := client.ImageRemove(ctx, image, types.ImageRemoveOptions{})
return err
}
func pullImage(ctx context.Context, client client.APIClient, service *Service, image string) error {
fmt.Fprintf(os.Stderr, "Pulling %s (%s)...\n", service.name, image)
distributionRef, err := reference.ParseNamed(image)
if err != nil {
return err
}
repoInfo, err := registry.ParseRepositoryInfo(distributionRef)
if err != nil {
return err
}
authConfig := service.authLookup.Lookup(repoInfo)
encodedAuth, err := encodeAuthToBase64(authConfig)
if err != nil {
return err
}
options := types.ImagePullOptions{
RegistryAuth: encodedAuth,
}
responseBody, err := client.ImagePull(ctx, distributionRef.String(), options)
if err != nil {
logrus.Errorf("Failed to pull image %s: %v", image, err)
return err
}
defer responseBody.Close()
var writeBuff io.Writer = os.Stderr
outFd, isTerminalOut := term.GetFdInfo(os.Stderr)
err = jsonmessage.DisplayJSONMessagesStream(responseBody, writeBuff, outFd, isTerminalOut, nil)
if err != nil {
if jerr, ok := err.(*jsonmessage.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
fmt.Fprintf(os.Stderr, "%s", writeBuff)
return fmt.Errorf("Status: %s, Code: %d", jerr.Message, jerr.Code)
}
}
return err
}
// encodeAuthToBase64 serializes the auth configuration as JSON base64 payload
func encodeAuthToBase64(authConfig types.AuthConfig) (string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(buf), nil
}

View file

@ -0,0 +1,92 @@
package docker
import (
"fmt"
"strconv"
"golang.org/x/net/context"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"github.com/docker/libcompose/labels"
)
const format = "%s_%s_%d"
// Namer defines method to provide container name.
type Namer interface {
Next() (string, int)
}
type defaultNamer struct {
project string
service string
oneOff bool
currentNumber int
}
type singleNamer struct {
name string
}
// NewSingleNamer returns a namer that only allows a single name.
func NewSingleNamer(name string) Namer {
return &singleNamer{name}
}
// NewNamer returns a namer that returns names based on the specified project and
// service name and an inner counter, e.g. project_service_1, project_service_2…
func NewNamer(ctx context.Context, client client.ContainerAPIClient, project, service string, oneOff bool) (Namer, error) {
namer := &defaultNamer{
project: project,
service: service,
oneOff: oneOff,
}
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", labels.PROJECT.Str(), project))
filter.Add("label", fmt.Sprintf("%s=%s", labels.SERVICE.Str(), service))
if oneOff {
filter.Add("label", fmt.Sprintf("%s=%s", labels.ONEOFF.Str(), "True"))
} else {
filter.Add("label", fmt.Sprintf("%s=%s", labels.ONEOFF.Str(), "False"))
}
containers, err := client.ContainerList(ctx, types.ContainerListOptions{
All: true,
Filter: filter,
})
if err != nil {
return nil, err
}
maxNumber := 0
for _, container := range containers {
number, err := strconv.Atoi(container.Labels[labels.NUMBER.Str()])
if err != nil {
return nil, err
}
if number > maxNumber {
maxNumber = number
}
}
namer.currentNumber = maxNumber + 1
return namer, nil
}
func (i *defaultNamer) Next() (string, int) {
service := i.service
if i.oneOff {
service = i.service + "_run"
}
name := fmt.Sprintf(format, i.project, service, i.currentNumber)
number := i.currentNumber
i.currentNumber = i.currentNumber + 1
return name, number
}
func (s *singleNamer) Next() (string, int) {
return s.name, 1
}

View file

@ -0,0 +1,19 @@
package network
import (
"github.com/docker/libcompose/config"
composeclient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
)
// DockerFactory implements project.NetworksFactory
type DockerFactory struct {
ClientFactory composeclient.Factory
}
// Create implements project.NetworksFactory Create method.
// It creates a Networks (that implements project.Networks) from specified configurations.
func (f *DockerFactory) Create(projectName string, networkConfigs map[string]*config.NetworkConfig, serviceConfigs *config.ServiceConfigs, networkEnabled bool) (project.Networks, error) {
cli := f.ClientFactory.Create(nil)
return NetworksFromServices(cli, projectName, networkConfigs, serviceConfigs, networkEnabled)
}

View file

@ -0,0 +1,194 @@
package network
import (
"fmt"
"reflect"
"strings"
"golang.org/x/net/context"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/network"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/yaml"
)
// Network holds attributes and method for a network definition in compose
type Network struct {
client client.NetworkAPIClient
name string
projectName string
driver string
driverOptions map[string]string
ipam config.Ipam
external bool
}
func (n *Network) fullName() string {
name := n.projectName + "_" + n.name
if n.external {
name = n.name
}
return name
}
// Inspect inspect the current network
func (n *Network) Inspect(ctx context.Context) (types.NetworkResource, error) {
return n.client.NetworkInspect(ctx, n.fullName())
}
// Remove removes the current network (from docker engine)
func (n *Network) Remove(ctx context.Context) error {
if n.external {
fmt.Printf("Network %s is external, skipping", n.fullName())
return nil
}
fmt.Printf("Removing network %q\n", n.fullName())
return n.client.NetworkRemove(ctx, n.fullName())
}
// EnsureItExists make sure the network exists and return an error if it does not exists
// and cannot be created.
func (n *Network) EnsureItExists(ctx context.Context) error {
networkResource, err := n.Inspect(ctx)
if n.external {
if client.IsErrNetworkNotFound(err) {
// FIXME(vdemeester) introduce some libcompose error type
return fmt.Errorf("Network %s declared as external, but could not be found. Please create the network manually using docker network create %s and try again", n.fullName(), n.fullName())
}
return err
}
if err != nil && client.IsErrNetworkNotFound(err) {
return n.create(ctx)
}
if n.driver != "" && networkResource.Driver != n.driver {
return fmt.Errorf("Network %q needs to be recreated - driver has changed", n.fullName())
}
if len(n.driverOptions) != 0 && !reflect.DeepEqual(networkResource.Options, n.driverOptions) {
return fmt.Errorf("Network %q needs to be recreated - options have changed", n.fullName())
}
return err
}
func (n *Network) create(ctx context.Context) error {
fmt.Printf("Creating network %q with driver %q\n", n.fullName(), n.driver)
_, err := n.client.NetworkCreate(ctx, n.fullName(), types.NetworkCreate{
Driver: n.driver,
Options: n.driverOptions,
IPAM: convertToAPIIpam(n.ipam),
})
return err
}
func convertToAPIIpam(ipam config.Ipam) network.IPAM {
ipamConfigs := []network.IPAMConfig{}
for _, config := range ipam.Config {
ipamConfigs = append(ipamConfigs, network.IPAMConfig{
Subnet: config.Subnet,
IPRange: config.IPRange,
Gateway: config.Gateway,
AuxAddress: config.AuxAddress,
})
}
return network.IPAM{
Driver: ipam.Driver,
Config: ipamConfigs,
}
}
// NewNetwork creates a new network from the specified name and config.
func NewNetwork(projectName, name string, config *config.NetworkConfig, client client.NetworkAPIClient) *Network {
networkName := name
if config.External.External {
networkName = config.External.Name
}
return &Network{
client: client,
name: networkName,
projectName: projectName,
driver: config.Driver,
driverOptions: config.DriverOpts,
external: config.External.External,
ipam: config.Ipam,
}
}
// Networks holds a list of network
type Networks struct {
networks []*Network
networkEnabled bool
}
// Initialize make sure network exists if network is enabled
func (n *Networks) Initialize(ctx context.Context) error {
if !n.networkEnabled {
return nil
}
for _, network := range n.networks {
err := network.EnsureItExists(ctx)
if err != nil {
return err
}
}
return nil
}
// Remove removes networks (clean-up)
func (n *Networks) Remove(ctx context.Context) error {
if !n.networkEnabled {
return nil
}
for _, network := range n.networks {
err := network.Remove(ctx)
if err != nil {
return err
}
}
return nil
}
// NetworksFromServices creates a new Networks struct based on networks configurations and
// services configuration. If a network is defined but not used by any service, it will return
// an error along the Networks.
func NetworksFromServices(cli client.NetworkAPIClient, projectName string, networkConfigs map[string]*config.NetworkConfig, services *config.ServiceConfigs, networkEnabled bool) (*Networks, error) {
var err error
networks := make([]*Network, 0, len(networkConfigs))
networkNames := map[string]*yaml.Network{}
for _, serviceName := range services.Keys() {
serviceConfig, _ := services.Get(serviceName)
if serviceConfig.NetworkMode != "" || serviceConfig.Networks == nil || len(serviceConfig.Networks.Networks) == 0 {
continue
}
for _, network := range serviceConfig.Networks.Networks {
if network.Name != "default" {
if _, ok := networkConfigs[network.Name]; !ok {
return nil, fmt.Errorf(`Service "%s" uses an undefined network "%s"`, serviceName, network.Name)
}
}
networkNames[network.Name] = network
}
}
for name, config := range networkConfigs {
network := NewNetwork(projectName, name, config, cli)
networks = append(networks, network)
}
if len(networkNames) != len(networks) {
unused := []string{}
for name := range networkConfigs {
if name == "default" {
continue
}
if _, ok := networkNames[name]; !ok {
unused = append(unused, name)
}
}
if len(unused) != 0 {
err = fmt.Errorf("Some networks were defined but are not used by any service: %v", strings.Join(unused, " "))
}
}
return &Networks{
networks: networks,
networkEnabled: networkEnabled,
}, err
}

View file

@ -0,0 +1,123 @@
package docker
import (
"os"
"path/filepath"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/filters"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/docker/network"
"github.com/docker/libcompose/labels"
"github.com/docker/libcompose/lookup"
"github.com/docker/libcompose/project"
)
// ComposeVersion is name of docker-compose.yml file syntax supported version
const ComposeVersion = "1.5.0"
// NewProject creates a Project with the specified context.
func NewProject(context *Context, parseOptions *config.ParseOptions) (project.APIProject, error) {
if context.ResourceLookup == nil {
context.ResourceLookup = &lookup.FileConfigLookup{}
}
if context.EnvironmentLookup == nil {
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
context.EnvironmentLookup = &lookup.ComposableEnvLookup{
Lookups: []config.EnvironmentLookup{
&lookup.EnvfileLookup{
Path: filepath.Join(cwd, ".env"),
},
&lookup.OsEnvLookup{},
},
}
}
if context.AuthLookup == nil {
context.AuthLookup = NewConfigAuthLookup(context)
}
if context.ServiceFactory == nil {
context.ServiceFactory = &ServiceFactory{
context: context,
}
}
if context.ClientFactory == nil {
factory, err := client.NewDefaultFactory(client.Options{})
if err != nil {
return nil, err
}
context.ClientFactory = factory
}
if context.NetworksFactory == nil {
networksFactory := &network.DockerFactory{
ClientFactory: context.ClientFactory,
}
context.NetworksFactory = networksFactory
}
// FIXME(vdemeester) Remove the context duplication ?
runtime := &Project{
clientFactory: context.ClientFactory,
}
p := project.NewProject(&context.Context, runtime, parseOptions)
err := p.Parse()
if err != nil {
return nil, err
}
if err = context.open(); err != nil {
logrus.Errorf("Failed to open project %s: %v", p.Name, err)
return nil, err
}
return p, err
}
// Project implements project.RuntimeProject and define docker runtime specific methods.
type Project struct {
clientFactory client.Factory
}
// RemoveOrphans implements project.RuntimeProject.RemoveOrphans.
// It will remove orphan containers that are part of the project but not to any services.
func (p *Project) RemoveOrphans(ctx context.Context, projectName string, serviceConfigs *config.ServiceConfigs) error {
client := p.clientFactory.Create(nil)
filter := filters.NewArgs()
filter.Add("label", labels.PROJECT.EqString(projectName))
containers, err := client.ContainerList(ctx, types.ContainerListOptions{
Filter: filter,
})
if err != nil {
return err
}
currentServices := map[string]struct{}{}
for _, serviceName := range serviceConfigs.Keys() {
currentServices[serviceName] = struct{}{}
}
for _, container := range containers {
serviceLabel := container.Labels[labels.SERVICE.Str()]
if _, ok := currentServices[serviceLabel]; !ok {
if err := client.ContainerKill(ctx, container.ID, "SIGKILL"); err != nil {
return err
}
if err := client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{
Force: true,
}); err != nil {
return err
}
}
}
return nil
}

View file

@ -0,0 +1,596 @@
package docker
import (
"fmt"
"strconv"
"strings"
"time"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
eventtypes "github.com/docker/engine-api/types/events"
"github.com/docker/engine-api/types/filters"
"github.com/docker/go-connections/nat"
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker/builder"
composeclient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/labels"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/project/events"
"github.com/docker/libcompose/project/options"
"github.com/docker/libcompose/utils"
dockerevents "github.com/vdemeester/docker-events"
)
// Service is a project.Service implementations.
type Service struct {
name string
project *project.Project
serviceConfig *config.ServiceConfig
clientFactory composeclient.Factory
authLookup AuthLookup
// FIXME(vdemeester) remove this at some point
context *Context
}
// NewService creates a service
func NewService(name string, serviceConfig *config.ServiceConfig, context *Context) *Service {
return &Service{
name: name,
project: context.Project,
serviceConfig: serviceConfig,
clientFactory: context.ClientFactory,
authLookup: context.AuthLookup,
context: context,
}
}
// Name returns the service name.
func (s *Service) Name() string {
return s.name
}
// Config returns the configuration of the service (config.ServiceConfig).
func (s *Service) Config() *config.ServiceConfig {
return s.serviceConfig
}
// DependentServices returns the dependent services (as an array of ServiceRelationship) of the service.
func (s *Service) DependentServices() []project.ServiceRelationship {
return DefaultDependentServices(s.project, s)
}
// Create implements Service.Create. It ensures the image exists or build it
// if it can and then create a container.
func (s *Service) Create(ctx context.Context, options options.Create) error {
containers, err := s.collectContainers(ctx)
if err != nil {
return err
}
imageName, err := s.ensureImageExists(ctx, options.NoBuild)
if err != nil {
return err
}
if len(containers) != 0 {
return s.eachContainer(ctx, containers, func(c *Container) error {
return s.recreateIfNeeded(ctx, imageName, c, options.NoRecreate, options.ForceRecreate)
})
}
_, err = s.createOne(ctx, imageName)
return err
}
func (s *Service) collectContainers(ctx context.Context) ([]*Container, error) {
client := s.clientFactory.Create(s)
containers, err := GetContainersByFilter(ctx, client, labels.SERVICE.Eq(s.name), labels.PROJECT.Eq(s.project.Name))
if err != nil {
return nil, err
}
result := []*Container{}
for _, container := range containers {
containerNumber, err := strconv.Atoi(container.Labels[labels.NUMBER.Str()])
if err != nil {
return nil, err
}
// Compose add "/" before name, so Name[1] will store actaul name.
name := strings.SplitAfter(container.Names[0], "/")
result = append(result, NewContainer(client, name[1], containerNumber, s))
}
return result, nil
}
func (s *Service) createOne(ctx context.Context, imageName string) (*Container, error) {
containers, err := s.constructContainers(ctx, imageName, 1)
if err != nil {
return nil, err
}
return containers[0], err
}
func (s *Service) ensureImageExists(ctx context.Context, noBuild bool) (string, error) {
exists, err := s.ImageExists(ctx)
if err != nil {
return "", err
}
if exists {
return s.imageName(), nil
}
if s.Config().Build.Context != "" {
if noBuild {
return "", fmt.Errorf("Service %q needs to be built, but no-build was specified", s.name)
}
return s.imageName(), s.build(ctx, options.Build{})
}
return s.imageName(), s.Pull(ctx)
}
// ImageExists returns whether or not the service image already exists
func (s *Service) ImageExists(ctx context.Context) (bool, error) {
dockerClient := s.clientFactory.Create(s)
_, _, err := dockerClient.ImageInspectWithRaw(ctx, s.imageName(), false)
if err == nil {
return true, nil
}
if err != nil && client.IsErrImageNotFound(err) {
return false, nil
}
return false, err
}
func (s *Service) imageName() string {
if s.Config().Image != "" {
return s.Config().Image
}
return fmt.Sprintf("%s_%s", s.project.Name, s.Name())
}
// Build implements Service.Build. It will try to build the image and returns an error if any.
func (s *Service) Build(ctx context.Context, buildOptions options.Build) error {
return s.build(ctx, buildOptions)
}
func (s *Service) build(ctx context.Context, buildOptions options.Build) error {
if s.Config().Build.Context == "" {
return fmt.Errorf("Specified service does not have a build section")
}
builder := &builder.DaemonBuilder{
Client: s.clientFactory.Create(s),
ContextDirectory: s.Config().Build.Context,
Dockerfile: s.Config().Build.Dockerfile,
BuildArgs: s.Config().Build.Args,
AuthConfigs: s.authLookup.All(),
NoCache: buildOptions.NoCache,
ForceRemove: buildOptions.ForceRemove,
Pull: buildOptions.Pull,
}
return builder.Build(ctx, s.imageName())
}
func (s *Service) constructContainers(ctx context.Context, imageName string, count int) ([]*Container, error) {
result, err := s.collectContainers(ctx)
if err != nil {
return nil, err
}
client := s.clientFactory.Create(s)
var namer Namer
if s.serviceConfig.ContainerName != "" {
if count > 1 {
logrus.Warnf(`The "%s" service is using the custom container name "%s". Docker requires each container to have a unique name. Remove the custom name to scale the service.`, s.name, s.serviceConfig.ContainerName)
}
namer = NewSingleNamer(s.serviceConfig.ContainerName)
} else {
namer, err = NewNamer(ctx, client, s.project.Name, s.name, false)
if err != nil {
return nil, err
}
}
for i := len(result); i < count; i++ {
containerName, containerNumber := namer.Next()
c := NewContainer(client, containerName, containerNumber, s)
dockerContainer, err := c.Create(ctx, imageName)
if err != nil {
return nil, err
}
logrus.Debugf("Created container %s: %v", dockerContainer.ID, dockerContainer.Name)
result = append(result, NewContainer(client, containerName, containerNumber, s))
}
return result, nil
}
// Up implements Service.Up. It builds the image if needed, creates a container
// and start it.
func (s *Service) Up(ctx context.Context, options options.Up) error {
containers, err := s.collectContainers(ctx)
if err != nil {
return err
}
var imageName = s.imageName()
if len(containers) == 0 || !options.NoRecreate {
imageName, err = s.ensureImageExists(ctx, options.NoBuild)
if err != nil {
return err
}
}
return s.up(ctx, imageName, true, options)
}
// Run implements Service.Run. It runs a one of command within the service container.
// It always create a new container.
func (s *Service) Run(ctx context.Context, commandParts []string, options options.Run) (int, error) {
imageName, err := s.ensureImageExists(ctx, false)
if err != nil {
return -1, err
}
client := s.clientFactory.Create(s)
namer, err := NewNamer(ctx, client, s.project.Name, s.name, true)
if err != nil {
return -1, err
}
containerName, containerNumber := namer.Next()
c := NewOneOffContainer(client, containerName, containerNumber, s)
configOverride := &config.ServiceConfig{Command: commandParts, Tty: true, StdinOpen: true}
c.CreateWithOverride(ctx, imageName, configOverride)
if err := s.connectContainerToNetworks(ctx, c); err != nil {
return -1, err
}
if options.Detached {
logrus.Infof("%s", c.Name())
return 0, c.Start(ctx)
}
return c.Run(ctx, configOverride)
}
// Info implements Service.Info. It returns an project.InfoSet with the containers
// related to this service (can be multiple if using the scale command).
func (s *Service) Info(ctx context.Context, qFlag bool) (project.InfoSet, error) {
result := project.InfoSet{}
containers, err := s.collectContainers(ctx)
if err != nil {
return nil, err
}
for _, c := range containers {
info, err := c.Info(ctx, qFlag)
if err != nil {
return nil, err
}
result = append(result, info)
}
return result, nil
}
// Start implements Service.Start. It tries to start a container without creating it.
func (s *Service) Start(ctx context.Context) error {
return s.collectContainersAndDo(ctx, func(c *Container) error {
if err := s.connectContainerToNetworks(ctx, c); err != nil {
return err
}
return c.Start(ctx)
})
}
func (s *Service) up(ctx context.Context, imageName string, create bool, options options.Up) error {
containers, err := s.collectContainers(ctx)
if err != nil {
return err
}
logrus.Debugf("Found %d existing containers for service %s", len(containers), s.name)
if len(containers) == 0 && create {
c, err := s.createOne(ctx, imageName)
if err != nil {
return err
}
containers = []*Container{c}
}
return s.eachContainer(ctx, containers, func(c *Container) error {
if create {
if err := s.recreateIfNeeded(ctx, imageName, c, options.NoRecreate, options.ForceRecreate); err != nil {
return err
}
}
if err := s.connectContainerToNetworks(ctx, c); err != nil {
return err
}
return c.Start(ctx)
})
}
func (s *Service) connectContainerToNetworks(ctx context.Context, c *Container) error {
connectedNetworks, err := c.Networks(ctx)
if err != nil {
return nil
}
if s.serviceConfig.Networks != nil {
for _, network := range s.serviceConfig.Networks.Networks {
existingNetwork, ok := connectedNetworks[network.Name]
if ok {
// FIXME(vdemeester) implement alias checking (to not disconnect/reconnect for nothing)
aliasPresent := false
for _, alias := range existingNetwork.Aliases {
// FIXME(vdemeester) use shortID instead of ID
ID, _ := c.ID()
if alias == ID {
aliasPresent = true
}
}
if aliasPresent {
continue
}
if err := c.NetworkDisconnect(ctx, network); err != nil {
return err
}
}
if err := c.NetworkConnect(ctx, network); err != nil {
return err
}
}
}
return nil
}
func (s *Service) recreateIfNeeded(ctx context.Context, imageName string, c *Container, noRecreate, forceRecreate bool) error {
if noRecreate {
return nil
}
outOfSync, err := c.OutOfSync(ctx, imageName)
if err != nil {
return err
}
logrus.WithFields(logrus.Fields{
"outOfSync": outOfSync,
"ForceRecreate": forceRecreate,
"NoRecreate": noRecreate}).Debug("Going to decide if recreate is needed")
if forceRecreate || outOfSync {
logrus.Infof("Recreating %s", s.name)
if _, err := c.Recreate(ctx, imageName); err != nil {
return err
}
}
return nil
}
func (s *Service) collectContainersAndDo(ctx context.Context, action func(*Container) error) error {
containers, err := s.collectContainers(ctx)
if err != nil {
return err
}
return s.eachContainer(ctx, containers, action)
}
func (s *Service) eachContainer(ctx context.Context, containers []*Container, action func(*Container) error) error {
tasks := utils.InParallel{}
for _, container := range containers {
task := func(container *Container) func() error {
return func() error {
return action(container)
}
}(container)
tasks.Add(task)
}
return tasks.Wait()
}
// Stop implements Service.Stop. It stops any containers related to the service.
func (s *Service) Stop(ctx context.Context, timeout int) error {
return s.collectContainersAndDo(ctx, func(c *Container) error {
return c.Stop(ctx, timeout)
})
}
// Restart implements Service.Restart. It restarts any containers related to the service.
func (s *Service) Restart(ctx context.Context, timeout int) error {
return s.collectContainersAndDo(ctx, func(c *Container) error {
return c.Restart(ctx, timeout)
})
}
// Kill implements Service.Kill. It kills any containers related to the service.
func (s *Service) Kill(ctx context.Context, signal string) error {
return s.collectContainersAndDo(ctx, func(c *Container) error {
return c.Kill(ctx, signal)
})
}
// Delete implements Service.Delete. It removes any containers related to the service.
func (s *Service) Delete(ctx context.Context, options options.Delete) error {
return s.collectContainersAndDo(ctx, func(c *Container) error {
return c.Delete(ctx, options.RemoveVolume)
})
}
// Log implements Service.Log. It returns the docker logs for each container related to the service.
func (s *Service) Log(ctx context.Context, follow bool) error {
return s.collectContainersAndDo(ctx, func(c *Container) error {
return c.Log(ctx, follow)
})
}
// Scale implements Service.Scale. It creates or removes containers to have the specified number
// of related container to the service to run.
func (s *Service) Scale(ctx context.Context, scale int, timeout int) error {
if s.specificiesHostPort() {
logrus.Warnf("The \"%s\" service specifies a port on the host. If multiple containers for this service are created on a single host, the port will clash.", s.Name())
}
foundCount := 0
err := s.collectContainersAndDo(ctx, func(c *Container) error {
foundCount++
if foundCount > scale {
err := c.Stop(ctx, timeout)
if err != nil {
return err
}
// FIXME(vdemeester) remove volume in scale by default ?
return c.Delete(ctx, false)
}
return nil
})
if err != nil {
return err
}
if foundCount != scale {
imageName, err := s.ensureImageExists(ctx, false)
if err != nil {
return err
}
if _, err = s.constructContainers(ctx, imageName, scale); err != nil {
return err
}
}
return s.up(ctx, "", false, options.Up{})
}
// Pull implements Service.Pull. It pulls the image of the service and skip the service that
// would need to be built.
func (s *Service) Pull(ctx context.Context) error {
if s.Config().Image == "" {
return nil
}
return pullImage(ctx, s.clientFactory.Create(s), s, s.Config().Image)
}
// Pause implements Service.Pause. It puts into pause the container(s) related
// to the service.
func (s *Service) Pause(ctx context.Context) error {
return s.collectContainersAndDo(ctx, func(c *Container) error {
return c.Pause(ctx)
})
}
// Unpause implements Service.Pause. It brings back from pause the container(s)
// related to the service.
func (s *Service) Unpause(ctx context.Context) error {
return s.collectContainersAndDo(ctx, func(c *Container) error {
return c.Unpause(ctx)
})
}
// RemoveImage implements Service.RemoveImage. It removes images used for the service
// depending on the specified type.
func (s *Service) RemoveImage(ctx context.Context, imageType options.ImageType) error {
switch imageType {
case "local":
if s.Config().Image != "" {
return nil
}
return removeImage(ctx, s.clientFactory.Create(s), s.imageName())
case "all":
return removeImage(ctx, s.clientFactory.Create(s), s.imageName())
default:
// Don't do a thing, should be validated up-front
return nil
}
}
var eventAttributes = []string{"image", "name"}
// Events implements Service.Events. It listen to all real-time events happening
// for the service, and put them into the specified chan.
func (s *Service) Events(ctx context.Context, evts chan events.ContainerEvent) error {
filter := filters.NewArgs()
filter.Add("label", fmt.Sprintf("%s=%s", labels.PROJECT, s.project.Name))
filter.Add("label", fmt.Sprintf("%s=%s", labels.SERVICE, s.name))
client := s.clientFactory.Create(s)
return <-dockerevents.Monitor(ctx, client, types.EventsOptions{
Filters: filter,
}, func(m eventtypes.Message) {
service := m.Actor.Attributes[labels.SERVICE.Str()]
attributes := map[string]string{}
for _, attr := range eventAttributes {
attributes[attr] = m.Actor.Attributes[attr]
}
e := events.ContainerEvent{
Service: service,
Event: m.Action,
Type: m.Type,
ID: m.Actor.ID,
Time: time.Unix(m.Time, 0),
Attributes: attributes,
}
evts <- e
})
}
// Containers implements Service.Containers. It returns the list of containers
// that are related to the service.
func (s *Service) Containers(ctx context.Context) ([]project.Container, error) {
result := []project.Container{}
containers, err := s.collectContainers(ctx)
if err != nil {
return nil, err
}
for _, c := range containers {
result = append(result, c)
}
return result, nil
}
func (s *Service) specificiesHostPort() bool {
_, bindings, err := nat.ParsePortSpecs(s.Config().Ports)
if err != nil {
fmt.Println(err)
}
for _, portBindings := range bindings {
for _, portBinding := range portBindings {
if portBinding.HostPort != "" {
return true
}
}
}
return false
}

View file

@ -0,0 +1,16 @@
package docker
import (
"github.com/docker/libcompose/config"
"github.com/docker/libcompose/project"
)
// ServiceFactory is an implementation of project.ServiceFactory.
type ServiceFactory struct {
context *Context
}
// Create creates a Service based on the specified project, name and service configuration.
func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *config.ServiceConfig) (project.Service, error) {
return NewService(name, serviceConfig, s.context), nil
}

View file

@ -0,0 +1,45 @@
package docker
import (
"github.com/docker/engine-api/types/container"
"github.com/docker/libcompose/project"
)
// DefaultDependentServices return the dependent services (as an array of ServiceRelationship)
// for the specified project and service. It looks for : links, volumesFrom, net and ipc configuration.
// It uses default project implementation and append some docker specific ones.
func DefaultDependentServices(p *project.Project, s project.Service) []project.ServiceRelationship {
result := project.DefaultDependentServices(p, s)
result = appendNs(p, result, s.Config().NetworkMode, project.RelTypeNetNamespace)
result = appendNs(p, result, s.Config().Ipc, project.RelTypeIpcNamespace)
return result
}
func appendNs(p *project.Project, rels []project.ServiceRelationship, conf string, relType project.ServiceRelationshipType) []project.ServiceRelationship {
service := GetContainerFromIpcLikeConfig(p, conf)
if service != "" {
rels = append(rels, project.NewServiceRelationship(service, relType))
}
return rels
}
// GetContainerFromIpcLikeConfig returns name of the service that shares the IPC
// namespace with the specified service.
func GetContainerFromIpcLikeConfig(p *project.Project, conf string) string {
ipc := container.IpcMode(conf)
if !ipc.IsContainer() {
return ""
}
name := ipc.Container()
if name == "" {
return ""
}
if p.ServiceConfigs.Has(name) {
return name
}
return ""
}