summaryrefslogtreecommitdiff
path: root/libpod/container_api.go
diff options
context:
space:
mode:
authorbaude <bbaude@redhat.com>2019-06-24 15:48:34 -0500
committerbaude <bbaude@redhat.com>2019-06-25 13:51:24 -0500
commitdd81a44ccfa34585ef62319835c8bb421db9e334 (patch)
tree7bab006a56d5823ccdf7b8cf6e59502ee954a979 /libpod/container_api.go
parenta1a4a75abee2c381483a218e1660621ee416ef7c (diff)
downloadpodman-dd81a44ccfa34585ef62319835c8bb421db9e334.tar.gz
podman-dd81a44ccfa34585ef62319835c8bb421db9e334.tar.bz2
podman-dd81a44ccfa34585ef62319835c8bb421db9e334.zip
remove libpod from main
the compilation demands of having libpod in main is a burden for the remote client compilations. to combat this, we should move the use of libpod structs, vars, constants, and functions into the adapter code where it will only be compiled by the local client. this should result in cleaner code organization and smaller binaries. it should also help if we ever need to compile the remote client on non-Linux operating systems natively (not cross-compiled). Signed-off-by: baude <bbaude@redhat.com>
Diffstat (limited to 'libpod/container_api.go')
-rw-r--r--libpod/container_api.go37
1 files changed, 19 insertions, 18 deletions
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 370e3e5d9..b8c339a39 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -10,6 +10,7 @@ import (
"sync"
"time"
+ "github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/pkg/lookup"
"github.com/containers/storage/pkg/stringid"
@@ -39,7 +40,7 @@ func (c *Container) Init(ctx context.Context) (err error) {
if !(c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateStopped ||
c.state.State == ContainerStateExited) {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
}
// don't recursively start
@@ -180,12 +181,12 @@ func (c *Container) StopWithTimeout(timeout uint) error {
if c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateUnknown ||
c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String())
}
if c.state.State == ContainerStateStopped ||
c.state.State == ContainerStateExited {
- return ErrCtrStopped
+ return define.ErrCtrStopped
}
defer c.newContainerEvent(events.Stop)
return c.stop(timeout)
@@ -203,7 +204,7 @@ func (c *Container) Kill(signal uint) error {
}
if c.state.State != ContainerStateRunning {
- return errors.Wrapf(ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
}
defer c.newContainerEvent(events.Kill)
@@ -241,7 +242,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
// TODO can probably relax this once we track exec sessions
if conState != ContainerStateRunning {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot exec into container that is not running")
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running")
}
if privileged || c.config.Privileged {
capList = caps.GetAllCapabilities()
@@ -401,7 +402,7 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re
if c.state.State != ContainerStateCreated &&
c.state.State != ContainerStateRunning &&
c.state.State != ContainerStateExited {
- return errors.Wrapf(ErrCtrStateInvalid, "can only attach to created or running containers")
+ return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
}
defer c.newContainerEvent(events.Attach)
return c.attach(streams, keys, resize, false, nil)
@@ -440,12 +441,12 @@ func (c *Container) Unmount(force bool) error {
}
if mounted == 1 {
if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
}
if len(c.state.ExecSessions) != 0 {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
}
- return errors.Wrapf(ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
+ return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
}
}
defer c.newContainerEvent(events.Unmount)
@@ -464,10 +465,10 @@ func (c *Container) Pause() error {
}
if c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is already paused", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID())
}
if c.state.State != ContainerStateRunning {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
}
defer c.newContainerEvent(events.Pause)
return c.pause()
@@ -485,7 +486,7 @@ func (c *Container) Unpause() error {
}
if c.state.State != ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
}
defer c.newContainerEvent(events.Unpause)
return c.unpause()
@@ -509,7 +510,7 @@ func (c *Container) Export(path string) error {
// AddArtifact creates and writes to an artifact file for the container
func (c *Container) AddArtifact(name string, data []byte) error {
if !c.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
return ioutil.WriteFile(c.getArtifactPath(name), data, 0740)
@@ -518,7 +519,7 @@ func (c *Container) AddArtifact(name string, data []byte) error {
// GetArtifact reads the specified artifact file from the container
func (c *Container) GetArtifact(name string) ([]byte, error) {
if !c.valid {
- return nil, ErrCtrRemoved
+ return nil, define.ErrCtrRemoved
}
return ioutil.ReadFile(c.getArtifactPath(name))
@@ -527,7 +528,7 @@ func (c *Container) GetArtifact(name string) ([]byte, error) {
// RemoveArtifact deletes the specified artifacts file
func (c *Container) RemoveArtifact(name string) error {
if !c.valid {
- return ErrCtrRemoved
+ return define.ErrCtrRemoved
}
return os.Remove(c.getArtifactPath(name))
@@ -542,7 +543,7 @@ func (c *Container) Wait() (int32, error) {
// code. The argument is the interval at which checks the container's status.
func (c *Container) WaitWithInterval(waitTimeout time.Duration) (int32, error) {
if !c.valid {
- return -1, ErrCtrRemoved
+ return -1, define.ErrCtrRemoved
}
err := wait.PollImmediateInfinite(waitTimeout,
func() (bool, error) {
@@ -578,7 +579,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
// Check if state is good
if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}
// Handle restart policy.
@@ -596,7 +597,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
// Check if we have active exec sessions
if len(c.state.ExecSessions) != 0 {
- return errors.Wrapf(ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
+ return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
}
defer c.newContainerEvent(events.Cleanup)
return c.cleanup(ctx)