summaryrefslogtreecommitdiff
path: root/libpod
diff options
context:
space:
mode:
Diffstat (limited to 'libpod')
-rw-r--r--libpod/boltdb_state.go23
-rw-r--r--libpod/boltdb_state_internal.go4
-rw-r--r--libpod/boltdb_state_linux.go2
-rw-r--r--libpod/container.go19
-rw-r--r--libpod/container_api.go2
-rw-r--r--libpod/container_commit.go2
-rw-r--r--libpod/container_copy_linux.go2
-rw-r--r--libpod/container_exec.go28
-rw-r--r--libpod/container_inspect.go6
-rw-r--r--libpod/container_internal.go69
-rw-r--r--libpod/container_internal_linux.go40
-rw-r--r--libpod/container_log.go10
-rw-r--r--libpod/container_path_resolution.go8
-rw-r--r--libpod/define/container_inspect.go8
-rw-r--r--libpod/events.go14
-rw-r--r--libpod/events/journal_linux.go2
-rw-r--r--libpod/info.go2
-rw-r--r--libpod/kube.go41
-rw-r--r--libpod/lock/file/file_lock.go2
-rw-r--r--libpod/lock/shm/shm_lock_nocgo.go20
-rw-r--r--libpod/logs/log.go2
-rw-r--r--libpod/network/cni/cni_conversion.go2
-rw-r--r--libpod/network/cni/cni_exec.go12
-rw-r--r--libpod/network/cni/config.go8
-rw-r--r--libpod/network/cni/config_test.go21
-rw-r--r--libpod/network/cni/run.go5
-rw-r--r--libpod/network/cni/run_test.go45
-rw-r--r--libpod/network/types/network.go4
-rw-r--r--libpod/networking_linux.go65
-rw-r--r--libpod/networking_slirp4netns.go12
-rw-r--r--libpod/oci_attach_linux.go8
-rw-r--r--libpod/oci_conmon_exec_linux.go4
-rw-r--r--libpod/oci_conmon_linux.go32
-rw-r--r--libpod/oci_util.go6
-rw-r--r--libpod/pod.go6
-rw-r--r--libpod/pod_api.go48
-rw-r--r--libpod/pod_internal.go2
-rw-r--r--libpod/reset.go8
-rw-r--r--libpod/runtime.go45
-rw-r--r--libpod/runtime_cstorage.go6
-rw-r--r--libpod/runtime_ctr.go47
-rw-r--r--libpod/runtime_img.go23
-rw-r--r--libpod/runtime_migrate.go4
-rw-r--r--libpod/runtime_pod_linux.go20
-rw-r--r--libpod/runtime_volume_linux.go10
-rw-r--r--libpod/shutdown/handler.go2
-rw-r--r--libpod/storage.go24
-rw-r--r--libpod/util.go8
-rw-r--r--libpod/util_linux.go2
49 files changed, 477 insertions, 308 deletions
diff --git a/libpod/boltdb_state.go b/libpod/boltdb_state.go
index 56b4bafd3..1242a8d6b 100644
--- a/libpod/boltdb_state.go
+++ b/libpod/boltdb_state.go
@@ -954,7 +954,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
// not worth erroring over.
// If we do, a single bad container JSON
// could render libpod unusable.
- logrus.Errorf("Error retrieving container %s from the database: %v", string(id), err)
+ logrus.Errorf("Retrieving container %s from the database: %v", string(id), err)
}
} else {
ctrs = append(ctrs, ctr)
@@ -1756,6 +1756,23 @@ func (s *BoltState) SafeRewriteContainerConfig(ctr *Container, oldName, newName
if err := allCtrsBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
return errors.Wrapf(err, "error renaming container %s in all containers bucket in DB", ctr.ID())
}
+ if ctr.config.Pod != "" {
+ podsBkt, err := getPodBucket(tx)
+ if err != nil {
+ return err
+ }
+ podBkt := podsBkt.Bucket([]byte(ctr.config.Pod))
+ if podBkt == nil {
+ return errors.Wrapf(define.ErrInternal, "bucket for pod %s does not exist", ctr.config.Pod)
+ }
+ podCtrBkt := podBkt.Bucket(containersBkt)
+ if podCtrBkt == nil {
+ return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", ctr.config.Pod)
+ }
+ if err := podCtrBkt.Put([]byte(ctr.ID()), []byte(newName)); err != nil {
+ return errors.Wrapf(err, "error renaming container %s in pod %s members bucket", ctr.ID(), ctr.config.Pod)
+ }
+ }
}
}
@@ -2556,7 +2573,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
if err := s.getVolumeFromDB(id, volume, volBucket); err != nil {
if errors.Cause(err) != define.ErrNSMismatch {
- logrus.Errorf("Error retrieving volume %s from the database: %v", string(id), err)
+ logrus.Errorf("Retrieving volume %s from the database: %v", string(id), err)
}
} else {
volumes = append(volumes, volume)
@@ -3352,7 +3369,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
if err := s.getPodFromDB(id, pod, podBucket); err != nil {
if errors.Cause(err) != define.ErrNSMismatch {
- logrus.Errorf("Error retrieving pod %s from the database: %v", string(id), err)
+ logrus.Errorf("Retrieving pod %s from the database: %v", string(id), err)
}
} else {
pods = append(pods, pod)
diff --git a/libpod/boltdb_state_internal.go b/libpod/boltdb_state_internal.go
index f63876c14..3e3c17a9e 100644
--- a/libpod/boltdb_state_internal.go
+++ b/libpod/boltdb_state_internal.go
@@ -259,7 +259,7 @@ func (s *BoltState) getDBCon() (*bolt.DB, error) {
// of a defer statement only
func (s *BoltState) deferredCloseDBCon(db *bolt.DB) {
if err := s.closeDBCon(db); err != nil {
- logrus.Errorf("failed to close libpod db: %q", err)
+ logrus.Errorf("Failed to close libpod db: %q", err)
}
}
@@ -875,7 +875,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
podCtrs := podDB.Bucket(containersBkt)
if podCtrs == nil {
// Malformed pod
- logrus.Errorf("pod %s malformed in database, missing containers bucket!", pod.ID())
+ logrus.Errorf("Pod %s malformed in database, missing containers bucket!", pod.ID())
} else {
ctrInPod := podCtrs.Get(ctrID)
if ctrInPod == nil {
diff --git a/libpod/boltdb_state_linux.go b/libpod/boltdb_state_linux.go
index 72243dcc5..4fb3236a0 100644
--- a/libpod/boltdb_state_linux.go
+++ b/libpod/boltdb_state_linux.go
@@ -31,7 +31,7 @@ func replaceNetNS(netNSPath string, ctr *Container, newState *ContainerState) er
return errors.Wrapf(err, "error joining network namespace of container %s", ctr.ID())
}
- logrus.Errorf("error joining network namespace for container %s: %v", ctr.ID(), err)
+ logrus.Errorf("Joining network namespace for container %s: %v", ctr.ID(), err)
ctr.state.NetNS = nil
}
}
diff --git a/libpod/container.go b/libpod/container.go
index 5c56ff036..4d15c04c5 100644
--- a/libpod/container.go
+++ b/libpod/container.go
@@ -774,9 +774,9 @@ func (c *Container) ExecSessions() ([]string, error) {
return ids, nil
}
-// ExecSession retrieves detailed information on a single active exec session in
-// a container
-func (c *Container) ExecSession(id string) (*ExecSession, error) {
+// execSessionNoCopy returns the associated exec session to id.
+// Note that the session is not a deep copy.
+func (c *Container) execSessionNoCopy(id string) (*ExecSession, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
@@ -791,6 +791,17 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) {
return nil, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", id, c.ID())
}
+ return session, nil
+}
+
+// ExecSession retrieves detailed information on a single active exec session in
+// a container
+func (c *Container) ExecSession(id string) (*ExecSession, error) {
+ session, err := c.execSessionNoCopy(id)
+ if err != nil {
+ return nil, err
+ }
+
returnSession := new(ExecSession)
if err := JSONDeepCopy(session, returnSession); err != nil {
return nil, errors.Wrapf(err, "error copying contents of container %s exec session %s", c.ID(), session.ID())
@@ -1095,7 +1106,7 @@ func (c *Container) AutoRemove() bool {
if spec.Annotations == nil {
return false
}
- return c.Spec().Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue
+ return spec.Annotations[define.InspectAnnotationAutoremove] == define.InspectResponseTrue
}
// Timezone returns the timezone configured inside the container.
diff --git a/libpod/container_api.go b/libpod/container_api.go
index 637f5b686..2d5b07a35 100644
--- a/libpod/container_api.go
+++ b/libpod/container_api.go
@@ -53,7 +53,7 @@ func (c *Container) Init(ctx context.Context, recursive bool) error {
if err := c.prepare(); err != nil {
if err2 := c.cleanup(ctx); err2 != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err2)
+ logrus.Errorf("Cleaning up container %s: %v", c.ID(), err2)
}
return err
}
diff --git a/libpod/container_commit.go b/libpod/container_commit.go
index 87e5d511c..6ae225cbc 100644
--- a/libpod/container_commit.go
+++ b/libpod/container_commit.go
@@ -51,7 +51,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
}
defer func() {
if err := c.unpause(); err != nil {
- logrus.Errorf("error unpausing container %q: %v", c.ID(), err)
+ logrus.Errorf("Unpausing container %q: %v", c.ID(), err)
}
}()
}
diff --git a/libpod/container_copy_linux.go b/libpod/container_copy_linux.go
index a35824289..7d4dd0d46 100644
--- a/libpod/container_copy_linux.go
+++ b/libpod/container_copy_linux.go
@@ -174,7 +174,7 @@ func (c *Container) copyToArchive(ctx context.Context, path string, writer io.Wr
// getContainerUser returns the specs.User and ID mappings of the container.
func getContainerUser(container *Container, mountPoint string) (specs.User, error) {
- userspec := container.Config().User
+ userspec := container.config.User
uid, gid, _, err := chrootuser.GetUser(mountPoint, userspec)
u := specs.User{
diff --git a/libpod/container_exec.go b/libpod/container_exec.go
index 5d4bcb422..f99fb7d3f 100644
--- a/libpod/container_exec.go
+++ b/libpod/container_exec.go
@@ -415,7 +415,7 @@ func (c *Container) ExecHTTPStartAndAttach(sessionID string, r *http.Request, w
session.ExitCode = define.ExecErrorCodeGeneric
if err := c.save(); err != nil {
- logrus.Errorf("Error saving container %s exec session %s after failure to prepare: %v", err, c.ID(), session.ID())
+ logrus.Errorf("Saving container %s exec session %s after failure to prepare: %v", err, c.ID(), session.ID())
}
return err
@@ -440,7 +440,7 @@ func (c *Container) ExecHTTPStartAndAttach(sessionID string, r *http.Request, w
session.ExitCode = define.TranslateExecErrorToExitCode(define.ExecErrorCodeGeneric, err)
if err := c.save(); err != nil {
- logrus.Errorf("Error saving container %s exec session %s after failure to start: %v", err, c.ID(), session.ID())
+ logrus.Errorf("Saving container %s exec session %s after failure to start: %v", err, c.ID(), session.ID())
}
return err
@@ -549,7 +549,7 @@ func (c *Container) ExecStop(sessionID string, timeout *uint) error {
if err := c.cleanupExecBundle(session.ID()); err != nil {
if cleanupErr != nil {
- logrus.Errorf("Error stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr)
+ logrus.Errorf("Stopping container %s exec session %s: %v", c.ID(), session.ID(), cleanupErr)
}
cleanupErr = err
}
@@ -695,7 +695,7 @@ func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) er
session.State = define.ExecStateStopped
if err := c.save(); err != nil {
- logrus.Errorf("Error saving state of container %s: %v", c.ID(), err)
+ logrus.Errorf("Saving state of container %s: %v", c.ID(), err)
}
return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it has stopped", c.ID(), session.ID())
@@ -747,7 +747,7 @@ func (c *Container) Exec(config *ExecConfig, streams *define.AttachStreams, resi
return -1, err
}
- session, err := c.ExecSession(sessionID)
+ session, err := c.execSessionNoCopy(sessionID)
if err != nil {
if errors.Cause(err) == define.ErrNoSuchExecSession {
// TODO: If a proper Context is ever plumbed in here, we
@@ -825,7 +825,7 @@ func (c *Container) createExecBundle(sessionID string) (retErr error) {
defer func() {
if retErr != nil {
if err := os.RemoveAll(bundlePath); err != nil {
- logrus.Warnf("error removing exec bundle after creation caused another error: %v", err)
+ logrus.Warnf("Error removing exec bundle after creation caused another error: %v", err)
}
}
}()
@@ -911,7 +911,7 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
alive, err := c.ociRuntime.ExecUpdateStatus(c, id)
if err != nil {
if lastErr != nil {
- logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
+ logrus.Errorf("Checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
continue
@@ -926,7 +926,7 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
exitCode, err := c.readExecExitCode(session.ID())
if err != nil {
if lastErr != nil {
- logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
+ logrus.Errorf("Checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
@@ -940,7 +940,7 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
}
if err := c.cleanupExecBundle(id); err != nil {
if lastErr != nil {
- logrus.Errorf("Error checking container %s exec sessions: %v", c.ID(), lastErr)
+ logrus.Errorf("Checking container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
@@ -951,7 +951,7 @@ func (c *Container) getActiveExecSessions() ([]string, error) {
if needSave {
if err := c.save(); err != nil {
if lastErr != nil {
- logrus.Errorf("Error reaping exec sessions for container %s: %v", c.ID(), lastErr)
+ logrus.Errorf("Reaping exec sessions for container %s: %v", c.ID(), lastErr)
}
lastErr = err
}
@@ -970,7 +970,7 @@ func (c *Container) removeAllExecSessions() error {
for _, id := range knownSessions {
if err := c.ociRuntime.ExecStopContainer(c, id, c.StopTimeout()); err != nil {
if lastErr != nil {
- logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr)
+ logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
continue
@@ -978,7 +978,7 @@ func (c *Container) removeAllExecSessions() error {
if err := c.cleanupExecBundle(id); err != nil {
if lastErr != nil {
- logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr)
+ logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
@@ -987,7 +987,7 @@ func (c *Container) removeAllExecSessions() error {
if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil {
if errors.Cause(err) != define.ErrCtrRemoved {
if lastErr != nil {
- logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr)
+ logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
@@ -997,7 +997,7 @@ func (c *Container) removeAllExecSessions() error {
if err := c.save(); err != nil {
if errors.Cause(err) != define.ErrCtrRemoved {
if lastErr != nil {
- logrus.Errorf("Error stopping container %s exec sessions: %v", c.ID(), lastErr)
+ logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
}
lastErr = err
}
diff --git a/libpod/container_inspect.go b/libpod/container_inspect.go
index 42d8da52d..ab79d82d9 100644
--- a/libpod/container_inspect.go
+++ b/libpod/container_inspect.go
@@ -156,7 +156,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver
// An error here is not considered fatal; no health state will be displayed
logrus.Error(err)
} else {
- data.State.Healthcheck = healthCheckState
+ data.State.Health = healthCheckState
}
}
@@ -178,13 +178,13 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver
if size {
rootFsSize, err := c.rootFsSize()
if err != nil {
- logrus.Errorf("error getting rootfs size %q: %v", config.ID, err)
+ logrus.Errorf("Getting rootfs size %q: %v", config.ID, err)
}
data.SizeRootFs = rootFsSize
rwSize, err := c.rwSize()
if err != nil {
- logrus.Errorf("error getting rw size %q: %v", config.ID, err)
+ logrus.Errorf("Getting rw size %q: %v", config.ID, err)
}
data.SizeRw = &rwSize
}
diff --git a/libpod/container_internal.go b/libpod/container_internal.go
index 9ac2cd5bd..3f9738411 100644
--- a/libpod/container_internal.go
+++ b/libpod/container_internal.go
@@ -176,7 +176,7 @@ func (c *Container) waitForExitFileAndSync() error {
c.state.State = define.ContainerStateStopped
if err2 := c.save(); err2 != nil {
- logrus.Errorf("Error saving container %s state: %v", c.ID(), err2)
+ logrus.Errorf("Saving container %s state: %v", c.ID(), err2)
}
return err
@@ -278,7 +278,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
defer func() {
if retErr != nil {
if err := c.cleanup(ctx); err != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
+ logrus.Errorf("Cleaning up container %s: %v", c.ID(), err)
}
}
}()
@@ -709,7 +709,7 @@ func (c *Container) export(path string) error {
mountPoint = containerMount
defer func() {
if _, err := c.runtime.store.Unmount(c.ID(), false); err != nil {
- logrus.Errorf("error unmounting container %q: %v", c.ID(), err)
+ logrus.Errorf("Unmounting container %q: %v", c.ID(), err)
}
}()
}
@@ -778,7 +778,7 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (retErr
defer func() {
if retErr != nil {
if err := c.cleanup(ctx); err != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
+ logrus.Errorf("Cleaning up container %s: %v", c.ID(), err)
}
}
}()
@@ -859,7 +859,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
}
if len(ctrErrors) > 0 {
- logrus.Errorf("error starting some container dependencies")
+ logrus.Errorf("Starting some container dependencies")
for _, e := range ctrErrors {
logrus.Errorf("%q", e)
}
@@ -923,12 +923,11 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
}
// Check the status
- conf := depCtr.Config()
state, err := depCtr.State()
if err != nil {
return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID())
}
- if state != define.ContainerStateRunning && !conf.IsInfra {
+ if state != define.ContainerStateRunning && !depCtr.config.IsInfra {
notRunning = append(notRunning, dep)
}
depCtrs[dep] = depCtr
@@ -1003,7 +1002,7 @@ func (c *Container) cniHosts() string {
for _, status := range c.getNetworkStatus() {
for _, netInt := range status.Interfaces {
for _, netAddress := range netInt.Networks {
- hosts += fmt.Sprintf("%s\t%s %s\n", netAddress.Subnet.IP.String(), c.Hostname(), c.Config().Name)
+ hosts += fmt.Sprintf("%s\t%s %s\n", netAddress.Subnet.IP.String(), c.Hostname(), c.config.Name)
}
}
}
@@ -1047,7 +1046,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
// upstream in any OCI runtime.
// TODO: Remove once runc supports cgroupsv2
if strings.Contains(err.Error(), "this version of runc doesn't work on cgroups v2") {
- logrus.Errorf("oci runtime %q does not support CGroups V2: use system migrate to mitigate", c.ociRuntime.Name())
+ logrus.Errorf("Oci runtime %q does not support CGroups V2: use system migrate to mitigate", c.ociRuntime.Name())
}
return err
}
@@ -1057,7 +1056,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
// Remove any exec sessions leftover from a potential prior run.
if len(c.state.ExecSessions) > 0 {
if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil {
- logrus.Errorf("Error removing container %s exec sessions from DB: %v", c.ID(), err)
+ logrus.Errorf("Removing container %s exec sessions from DB: %v", c.ID(), err)
}
c.state.ExecSessions = make(map[string]*ExecSession)
}
@@ -1164,7 +1163,7 @@ func (c *Container) initAndStart(ctx context.Context) (retErr error) {
defer func() {
if retErr != nil {
if err := c.cleanup(ctx); err != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
+ logrus.Errorf("Cleaning up container %s: %v", c.ID(), err)
}
}
}()
@@ -1211,7 +1210,7 @@ func (c *Container) start() error {
payload += daemon.SdNotifyReady
}
if sent, err := daemon.SdNotify(false, payload); err != nil {
- logrus.Errorf("Error notifying systemd of Conmon PID: %s", err.Error())
+ logrus.Errorf("Notifying systemd of Conmon PID: %s", err.Error())
} else if sent {
logrus.Debugf("Notify sent successfully")
}
@@ -1290,7 +1289,7 @@ func (c *Container) stop(timeout uint) error {
return stopErr
default:
if stopErr != nil {
- logrus.Errorf("Error syncing container %s status: %v", c.ID(), err)
+ logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
return stopErr
}
return err
@@ -1328,7 +1327,7 @@ func (c *Container) stop(timeout uint) error {
c.state.FinishedTime = time.Now()
c.state.State = define.ContainerStateStopped
if err := c.save(); err != nil {
- logrus.Errorf("Error saving container %s status: %v", c.ID(), err)
+ logrus.Errorf("Saving container %s status: %v", c.ID(), err)
}
return errors.Wrapf(define.ErrConmonDead, "container %s conmon process missing, cannot retrieve exit code", c.ID())
@@ -1432,7 +1431,7 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retEr
defer func() {
if retErr != nil {
if err := c.cleanup(ctx); err != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
+ logrus.Errorf("Cleaning up container %s: %v", c.ID(), err)
}
}
}()
@@ -1483,7 +1482,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
defer func() {
if deferredErr != nil {
if err := c.unmountSHM(c.config.ShmDir); err != nil {
- logrus.Errorf("Error unmounting SHM for container %s after mount error: %v", c.ID(), err)
+ logrus.Errorf("Unmounting SHM for container %s after mount error: %v", c.ID(), err)
}
}
}()
@@ -1526,7 +1525,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
defer func() {
if deferredErr != nil {
if err := c.unmount(false); err != nil {
- logrus.Errorf("Error unmounting container %s after mount error: %v", c.ID(), err)
+ logrus.Errorf("Unmounting container %s after mount error: %v", c.ID(), err)
}
}
}()
@@ -1554,7 +1553,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
}
vol.lock.Lock()
if err := vol.unmount(false); err != nil {
- logrus.Errorf("Error unmounting volume %s after error mounting container %s: %v", vol.Name(), c.ID(), err)
+ logrus.Errorf("Unmounting volume %s after error mounting container %s: %v", vol.Name(), c.ID(), err)
}
vol.lock.Unlock()
}()
@@ -1669,7 +1668,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
if err := copier.Put(volMount, "", copyOpts, reader); err != nil {
err2 := <-errChan
if err2 != nil {
- logrus.Errorf("Error streaming contents of container %s directory for volume copy-up: %v", c.ID(), err2)
+ logrus.Errorf("Streaming contents of container %s directory for volume copy-up: %v", c.ID(), err2)
}
return nil, errors.Wrapf(err, "error copying up to volume %s", vol.Name())
}
@@ -1705,7 +1704,7 @@ func (c *Container) cleanupStorage() error {
for _, containerMount := range c.config.Mounts {
if err := c.unmountSHM(containerMount); err != nil {
if cleanupErr != nil {
- logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr)
+ logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
}
cleanupErr = err
}
@@ -1730,7 +1729,7 @@ func (c *Container) cleanupStorage() error {
logrus.Errorf("Storage for container %s has been removed", c.ID())
} else {
if cleanupErr != nil {
- logrus.Errorf("Error cleaning up container %s storage: %v", c.ID(), cleanupErr)
+ logrus.Errorf("Cleaning up container %s storage: %v", c.ID(), cleanupErr)
}
cleanupErr = err
}
@@ -1741,7 +1740,7 @@ func (c *Container) cleanupStorage() error {
vol, err := c.runtime.state.Volume(v.Name)
if err != nil {
if cleanupErr != nil {
- logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr)
+ logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
}
cleanupErr = errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
@@ -1754,7 +1753,7 @@ func (c *Container) cleanupStorage() error {
vol.lock.Lock()
if err := vol.unmount(false); err != nil {
if cleanupErr != nil {
- logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr)
+ logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
}
cleanupErr = errors.Wrapf(err, "error unmounting volume %s for container %s", vol.Name(), c.ID())
}
@@ -1768,7 +1767,7 @@ func (c *Container) cleanupStorage() error {
if c.valid {
if err := c.save(); err != nil {
if cleanupErr != nil {
- logrus.Errorf("Error unmounting container %s: %v", c.ID(), cleanupErr)
+ logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
}
cleanupErr = err
}
@@ -1785,7 +1784,7 @@ func (c *Container) cleanup(ctx context.Context) error {
// Remove healthcheck unit/timer file if it execs
if c.config.HealthCheckConfig != nil {
if err := c.removeTimer(); err != nil {
- logrus.Errorf("Error removing timer for container %s healthcheck: %v", c.ID(), err)
+ logrus.Errorf("Removing timer for container %s healthcheck: %v", c.ID(), err)
}
}
@@ -1800,7 +1799,7 @@ func (c *Container) cleanup(ctx context.Context) error {
// exists.
if err := c.cleanupRuntime(ctx); err != nil {
if lastError != nil {
- logrus.Errorf("Error removing container %s from OCI runtime: %v", c.ID(), err)
+ logrus.Errorf("Removing container %s from OCI runtime: %v", c.ID(), err)
} else {
lastError = err
}
@@ -1809,7 +1808,7 @@ func (c *Container) cleanup(ctx context.Context) error {
// Unmount storage
if err := c.cleanupStorage(); err != nil {
if lastError != nil {
- logrus.Errorf("Error unmounting container %s storage: %v", c.ID(), err)
+ logrus.Errorf("Unmounting container %s storage: %v", c.ID(), err)
} else {
lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID())
}
@@ -1823,14 +1822,14 @@ func (c *Container) cleanup(ctx context.Context) error {
lastError = err
continue
}
- logrus.Errorf("error unmounting image volume %q:%q :%v", v.Source, v.Dest, err)
+ logrus.Errorf("Unmounting image volume %q:%q :%v", v.Source, v.Dest, err)
}
if err := img.Unmount(false); err != nil {
if lastError == nil {
lastError = err
continue
}
- logrus.Errorf("error unmounting image volume %q:%q :%v", v.Source, v.Dest, err)
+ logrus.Errorf("Unmounting image volume %q:%q :%v", v.Source, v.Dest, err)
}
}
@@ -1874,7 +1873,7 @@ func (c *Container) postDeleteHooks(ctx context.Context) error {
var stderr, stdout bytes.Buffer
hookErr, err := exec.Run(ctx, &hook, state, &stdout, &stderr, exec.DefaultPostKillTimeout)
if err != nil {
- logrus.Warnf("container %s: poststop hook %d: %v", c.ID(), i, err)
+ logrus.Warnf("Container %s: poststop hook %d: %v", c.ID(), i, err)
if hookErr != err {
logrus.Debugf("container %s: poststop hook %d (hook error): %v", c.ID(), i, hookErr)
}
@@ -2005,12 +2004,12 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s
}
return nil, err
}
- ociHooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
+ ociHooks, err := manager.Hooks(config, c.config.Spec.Annotations, len(c.config.UserVolumes) > 0)
if err != nil {
return nil, err
}
if len(ociHooks) > 0 || config.Hooks != nil {
- logrus.Warnf("implicit hook directories are deprecated; set --ociHooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
+ logrus.Warnf("Implicit hook directories are deprecated; set --ociHooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
}
for i, hook := range ociHooks {
allHooks[i] = hook
@@ -2022,7 +2021,7 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s
return nil, err
}
- allHooks, err = manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
+ allHooks, err = manager.Hooks(config, c.config.Spec.Annotations, len(c.config.UserVolumes) > 0)
if err != nil {
return nil, err
}
@@ -2030,7 +2029,7 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s
hookErr, err := exec.RuntimeConfigFilter(ctx, allHooks["precreate"], config, exec.DefaultPostKillTimeout)
if err != nil {
- logrus.Warnf("container %s: precreate hook: %v", c.ID(), err)
+ logrus.Warnf("Container %s: precreate hook: %v", c.ID(), err)
if hookErr != nil && hookErr != err {
logrus.Debugf("container %s: precreate hook (hook error): %v", c.ID(), hookErr)
}
@@ -2106,7 +2105,7 @@ func (c *Container) canWithPrevious() error {
// JSON files for later export
func (c *Container) prepareCheckpointExport() error {
// save live config
- if _, err := metadata.WriteJSONFile(c.Config(), c.bundlePath(), metadata.ConfigDumpFile); err != nil {
+ if _, err := metadata.WriteJSONFile(c.config, c.bundlePath(), metadata.ConfigDumpFile); err != nil {
return err
}
diff --git a/libpod/container_internal_linux.go b/libpod/container_internal_linux.go
index 0a663200a..867ecc2ad 100644
--- a/libpod/container_internal_linux.go
+++ b/libpod/container_internal_linux.go
@@ -70,7 +70,7 @@ func (c *Container) unmountSHM(mount string) error {
return errors.Wrapf(err, "error unmounting container %s SHM mount %s", c.ID(), mount)
}
// If it's just an EINVAL or ENOENT, debug logs only
- logrus.Debugf("container %s failed to unmount %s : %v", c.ID(), mount, err)
+ logrus.Debugf("Container %s failed to unmount %s : %v", c.ID(), mount, err)
}
return nil
}
@@ -143,7 +143,7 @@ func (c *Container) prepare() error {
}
if mountStorageErr != nil {
if createErr != nil {
- logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr)
+ logrus.Errorf("Preparing container %s: %v", c.ID(), createErr)
}
createErr = mountStorageErr
}
@@ -154,7 +154,7 @@ func (c *Container) prepare() error {
if err := c.cleanupStorage(); err != nil {
// createErr is guaranteed non-nil, so print
// unconditionally
- logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr)
+ logrus.Errorf("Preparing container %s: %v", c.ID(), createErr)
createErr = errors.Wrapf(err, "error unmounting storage for container %s after network create failure", c.ID())
}
}
@@ -163,7 +163,7 @@ func (c *Container) prepare() error {
// isn't ready it will do nothing.
if createErr != nil {
if err := c.cleanupNetwork(); err != nil {
- logrus.Errorf("Error preparing container %s: %v", c.ID(), createErr)
+ logrus.Errorf("Preparing container %s: %v", c.ID(), createErr)
createErr = errors.Wrapf(err, "error cleaning up container %s network after setup failure", c.ID())
}
}
@@ -258,7 +258,7 @@ func (c *Container) cleanupNetwork() error {
// Stop the container's network namespace (if it has one)
if err := c.runtime.teardownNetNS(c); err != nil {
- logrus.Errorf("unable to cleanup network for container %s: %q", c.ID(), err)
+ logrus.Errorf("Unable to cleanup network for container %s: %q", c.ID(), err)
}
c.state.NetNS = nil
@@ -599,7 +599,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if isGIDAvailable {
g.AddProcessAdditionalGid(uint32(gid))
} else {
- logrus.Warnf("additional gid=%d is not present in the user namespace, skip setting it", gid)
+ logrus.Warnf("Additional gid=%d is not present in the user namespace, skip setting it", gid)
}
}
}
@@ -640,7 +640,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
if err != nil {
if os.IsNotExist(err) {
// The kernel-provided files only exist if user namespaces are supported
- logrus.Debugf("user or group ID mappings not available: %s", err)
+ logrus.Debugf("User or group ID mappings not available: %s", err)
} else {
return nil, err
}
@@ -781,7 +781,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
}
if rootPropagation != "" {
- logrus.Debugf("set root propagation to %q", rootPropagation)
+ logrus.Debugf("Set root propagation to %q", rootPropagation)
if err := g.SetLinuxRootPropagation(rootPropagation); err != nil {
return nil, err
}
@@ -838,7 +838,7 @@ func (c *Container) mountNotifySocket(g generate.Generator) error {
}
notifyDir := filepath.Join(c.bundlePath(), "notify")
- logrus.Debugf("checking notify %q dir", notifyDir)
+ logrus.Debugf("Checking notify %q dir", notifyDir)
if err := os.MkdirAll(notifyDir, 0755); err != nil {
if !os.IsExist(err) {
return errors.Wrapf(err, "unable to create notify %q dir", notifyDir)
@@ -847,7 +847,7 @@ func (c *Container) mountNotifySocket(g generate.Generator) error {
if err := label.Relabel(notifyDir, c.MountLabel(), true); err != nil {
return errors.Wrapf(err, "relabel failed %q", notifyDir)
}
- logrus.Debugf("add bindmount notify %q dir", notifyDir)
+ logrus.Debugf("Add bindmount notify %q dir", notifyDir)
if _, ok := c.state.BindMounts["/run/notify"]; !ok {
c.state.BindMounts["/run/notify"] = notifyDir
}
@@ -1199,7 +1199,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
for _, del := range cleanup {
file := filepath.Join(c.bundlePath(), del)
if err := os.Remove(file); err != nil {
- logrus.Debugf("unable to remove file %s", file)
+ logrus.Debugf("Unable to remove file %s", file)
}
}
}
@@ -1299,7 +1299,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
var netStatus map[string]types.StatusBlock
_, err := metadata.ReadJSONFile(&netStatus, c.bundlePath(), metadata.NetworkStatusFile)
if err != nil {
- logrus.Infof("failed to unmarshal network status, cannot restore the same ip/mac: %v", err)
+ logrus.Infof("Failed to unmarshal network status, cannot restore the same ip/mac: %v", err)
}
// If the restored container should get a new name, the IP address of
// the container will not be restored. This assumes that if a new name is
@@ -1310,7 +1310,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
if err == nil && options.Name == "" && (!options.IgnoreStaticIP || !options.IgnoreStaticMAC) {
// The file with the network.status does exist. Let's restore the
// container with the same networks settings as during checkpointing.
- aliases, err := c.runtime.state.GetAllNetworkAliases(c)
+ aliases, err := c.GetAllNetworkAliases()
if err != nil {
return err
}
@@ -1349,7 +1349,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
defer func() {
if retErr != nil {
if err := c.cleanup(ctx); err != nil {
- logrus.Errorf("error cleaning up container %s: %v", c.ID(), err)
+ logrus.Errorf("Cleaning up container %s: %v", c.ID(), err)
}
}
}()
@@ -1903,11 +1903,11 @@ func (c *Container) generateResolvConf() (string, error) {
for _, nsIP := range status.DNSServerIPs {
networkNameServers = append(networkNameServers, nsIP.String())
}
- logrus.Debugf("adding nameserver(s) from network status of '%q'", status.DNSServerIPs)
+ logrus.Debugf("Adding nameserver(s) from network status of '%q'", status.DNSServerIPs)
}
if status.DNSSearchDomains != nil {
networkSearchDomains = append(networkSearchDomains, status.DNSSearchDomains...)
- logrus.Debugf("adding search domain(s) from network status of '%q'", status.DNSSearchDomains)
+ logrus.Debugf("Adding search domain(s) from network status of '%q'", status.DNSSearchDomains)
}
}
@@ -1956,7 +1956,7 @@ func (c *Container) generateResolvConf() (string, error) {
if c.config.NetMode.IsSlirp4netns() {
slirp4netnsDNS, err := GetSlirp4netnsDNS(c.slirp4netnsSubnet)
if err != nil {
- logrus.Warn("failed to determine Slirp4netns DNS: ", err.Error())
+ logrus.Warn("Failed to determine Slirp4netns DNS: ", err.Error())
} else {
nameservers = append([]string{slirp4netnsDNS.String()}, nameservers...)
}
@@ -2058,7 +2058,7 @@ func (c *Container) getHosts() string {
// When using slirp4netns, the interface gets a static IP
slirp4netnsIP, err := GetSlirp4netnsIP(c.slirp4netnsSubnet)
if err != nil {
- logrus.Warnf("failed to determine slirp4netnsIP: %v", err.Error())
+ logrus.Warnf("Failed to determine slirp4netnsIP: %v", err.Error())
} else {
hosts += fmt.Sprintf("# used by slirp4netns\n%s\t%s %s\n", slirp4netnsIP.String(), c.Hostname(), c.config.Name)
}
@@ -2109,12 +2109,12 @@ func (c *Container) getHosts() string {
} else if c.config.NetMode.IsSlirp4netns() {
gatewayIP, err := GetSlirp4netnsGateway(c.slirp4netnsSubnet)
if err != nil {
- logrus.Warn("failed to determine gatewayIP: ", err.Error())
+ logrus.Warn("Failed to determine gatewayIP: ", err.Error())
} else {
hosts += fmt.Sprintf("%s host.containers.internal\n", gatewayIP.String())
}
} else {
- logrus.Debug("network configuration does not support host.containers.internal address")
+ logrus.Debug("Network configuration does not support host.containers.internal address")
}
}
diff --git a/libpod/container_log.go b/libpod/container_log.go
index 89dd5e8b0..a65b2a44f 100644
--- a/libpod/container_log.go
+++ b/libpod/container_log.go
@@ -83,7 +83,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
}
nll, err := logs.NewLogLine(line.Text)
if err != nil {
- logrus.Errorf("Error getting new log line: %v", err)
+ logrus.Errorf("Getting new log line: %v", err)
continue
}
if nll.Partial() {
@@ -108,7 +108,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
state, err := c.State()
if err != nil || state != define.ContainerStateRunning {
if err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
- logrus.Errorf("Error getting container state: %v", err)
+ logrus.Errorf("Getting container state: %v", err)
}
go func() {
// Make sure to wait at least for the poll duration
@@ -116,7 +116,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
time.Sleep(watch.POLL_DURATION)
tailError := t.StopAtEOF()
if tailError != nil && tailError.Error() != "tail: stop at eof" {
- logrus.Errorf("Error stopping logger: %v", tailError)
+ logrus.Errorf("Stopping logger: %v", tailError)
}
}()
return nil
@@ -132,7 +132,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
}
go func() {
if err := c.runtime.Events(ctx, eventOptions); err != nil {
- logrus.Errorf("Error waiting for container to exit: %v", err)
+ logrus.Errorf("Waiting for container to exit: %v", err)
}
}()
// Now wait for the died event and signal to finish
@@ -143,7 +143,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
time.Sleep(watch.POLL_DURATION)
tailError := t.StopAtEOF()
if tailError != nil && fmt.Sprintf("%v", tailError) != "tail: stop at eof" {
- logrus.Errorf("Error stopping logger: %v", tailError)
+ logrus.Errorf("Stopping logger: %v", tailError)
}
}()
}
diff --git a/libpod/container_path_resolution.go b/libpod/container_path_resolution.go
index ec7306ca1..bb2ef1a73 100644
--- a/libpod/container_path_resolution.go
+++ b/libpod/container_path_resolution.go
@@ -112,7 +112,7 @@ func (c *Container) resolvePath(mountPoint string, containerPath string) (string
func findVolume(c *Container, containerPath string) (*Volume, error) {
runtime := c.Runtime()
cleanedContainerPath := filepath.Clean(containerPath)
- for _, vol := range c.Config().NamedVolumes {
+ for _, vol := range c.config.NamedVolumes {
if cleanedContainerPath == filepath.Clean(vol.Dest) {
return runtime.GetVolume(vol.Name)
}
@@ -124,7 +124,7 @@ func findVolume(c *Container, containerPath string) (*Volume, error) {
// Volume's destination.
func isPathOnVolume(c *Container, containerPath string) bool {
cleanedContainerPath := filepath.Clean(containerPath)
- for _, vol := range c.Config().NamedVolumes {
+ for _, vol := range c.config.NamedVolumes {
if cleanedContainerPath == filepath.Clean(vol.Dest) {
return true
}
@@ -141,7 +141,7 @@ func isPathOnVolume(c *Container, containerPath string) bool {
// path of a Mount. Returns a matching Mount or nil.
func findBindMount(c *Container, containerPath string) *specs.Mount {
cleanedPath := filepath.Clean(containerPath)
- for _, m := range c.Config().Spec.Mounts {
+ for _, m := range c.config.Spec.Mounts {
if m.Type != "bind" {
continue
}
@@ -157,7 +157,7 @@ func findBindMount(c *Container, containerPath string) *specs.Mount {
// Mount's destination.
func isPathOnBindMount(c *Container, containerPath string) bool {
cleanedContainerPath := filepath.Clean(containerPath)
- for _, m := range c.Config().Spec.Mounts {
+ for _, m := range c.config.Spec.Mounts {
if cleanedContainerPath == filepath.Clean(m.Destination) {
return true
}
diff --git a/libpod/define/container_inspect.go b/libpod/define/container_inspect.go
index 90703a807..7decb18a8 100644
--- a/libpod/define/container_inspect.go
+++ b/libpod/define/container_inspect.go
@@ -202,10 +202,16 @@ type InspectContainerState struct {
Error string `json:"Error"` // TODO
StartedAt time.Time `json:"StartedAt"`
FinishedAt time.Time `json:"FinishedAt"`
- Healthcheck HealthCheckResults `json:"Healthcheck,omitempty"`
+ Health HealthCheckResults `json:"Health,omitempty"`
Checkpointed bool `json:"Checkpointed,omitempty"`
}
+// Healthcheck returns the HealthCheckResults. This is used for old podman compat
+// to make the "Healthcheck" key available in the go template.
+func (s *InspectContainerState) Healthcheck() HealthCheckResults {
+ return s.Health
+}
+
// HealthCheckResults describes the results/logs from a healthcheck
type HealthCheckResults struct {
// Status healthy or unhealthy
diff --git a/libpod/events.go b/libpod/events.go
index 22c51aeec..342af02d2 100644
--- a/libpod/events.go
+++ b/libpod/events.go
@@ -33,7 +33,7 @@ func (c *Container) newContainerEvent(status events.Status) {
}
if err := c.runtime.eventer.Write(e); err != nil {
- logrus.Errorf("unable to write pod event: %q", err)
+ logrus.Errorf("Unable to write pod event: %q", err)
}
}
@@ -46,7 +46,7 @@ func (c *Container) newContainerExitedEvent(exitCode int32) {
e.Type = events.Container
e.ContainerExitCode = int(exitCode)
if err := c.runtime.eventer.Write(e); err != nil {
- logrus.Errorf("unable to write container exited event: %q", err)
+ logrus.Errorf("Unable to write container exited event: %q", err)
}
}
@@ -61,7 +61,7 @@ func (c *Container) newExecDiedEvent(sessionID string, exitCode int) {
e.Attributes = make(map[string]string)
e.Attributes["execID"] = sessionID
if err := c.runtime.eventer.Write(e); err != nil {
- logrus.Errorf("unable to write exec died event: %q", err)
+ logrus.Errorf("Unable to write exec died event: %q", err)
}
}
@@ -73,7 +73,7 @@ func (c *Container) newNetworkEvent(status events.Status, netName string) {
e.Type = events.Network
e.Network = netName
if err := c.runtime.eventer.Write(e); err != nil {
- logrus.Errorf("unable to write pod event: %q", err)
+ logrus.Errorf("Unable to write pod event: %q", err)
}
}
@@ -84,7 +84,7 @@ func (p *Pod) newPodEvent(status events.Status) {
e.Name = p.Name()
e.Type = events.Pod
if err := p.runtime.eventer.Write(e); err != nil {
- logrus.Errorf("unable to write pod event: %q", err)
+ logrus.Errorf("Unable to write pod event: %q", err)
}
}
@@ -94,7 +94,7 @@ func (r *Runtime) newSystemEvent(status events.Status) {
e.Type = events.System
if err := r.eventer.Write(e); err != nil {
- logrus.Errorf("unable to write system event: %q", err)
+ logrus.Errorf("Unable to write system event: %q", err)
}
}
@@ -104,7 +104,7 @@ func (v *Volume) newVolumeEvent(status events.Status) {
e.Name = v.Name()
e.Type = events.Volume
if err := v.runtime.eventer.Write(e); err != nil {
- logrus.Errorf("unable to write volume event: %q", err)
+ logrus.Errorf("Unable to write volume event: %q", err)
}
}
diff --git a/libpod/events/journal_linux.go b/libpod/events/journal_linux.go
index 3e16d8679..72e03355a 100644
--- a/libpod/events/journal_linux.go
+++ b/libpod/events/journal_linux.go
@@ -195,7 +195,7 @@ func newEventFromJournalEntry(entry *sdjournal.JournalEntry) (*Event, error) { /
if code, ok := entry.Fields["PODMAN_EXIT_CODE"]; ok {
intCode, err := strconv.Atoi(code)
if err != nil {
- logrus.Errorf("Error parsing event exit code %s", code)
+ logrus.Errorf("Parsing event exit code %s", code)
} else {
newEvent.ContainerExitCode = intCode
}
diff --git a/libpod/info.go b/libpod/info.go
index 36dc8bc2a..a2fd18491 100644
--- a/libpod/info.go
+++ b/libpod/info.go
@@ -186,7 +186,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
conmonInfo, ociruntimeInfo, err := r.defaultOCIRuntime.RuntimeInfo()
if err != nil {
- logrus.Errorf("Error getting info on OCI runtime %s: %v", r.defaultOCIRuntime.Name(), err)
+ logrus.Errorf("Getting info on OCI runtime %s: %v", r.defaultOCIRuntime.Name(), err)
} else {
info.Conmon = conmonInfo
info.OCIRuntime = ociruntimeInfo
diff --git a/libpod/kube.go b/libpod/kube.go
index 9b96dd99d..57d99f3ef 100644
--- a/libpod/kube.go
+++ b/libpod/kube.go
@@ -1,9 +1,11 @@
package libpod
import (
+ "context"
"fmt"
"math/rand"
"os"
+ "reflect"
"sort"
"strconv"
"strings"
@@ -27,14 +29,14 @@ import (
// GenerateForKube takes a slice of libpod containers and generates
// one v1.Pod description that includes just a single container.
-func GenerateForKube(ctrs []*Container) (*v1.Pod, error) {
+func GenerateForKube(ctx context.Context, ctrs []*Container) (*v1.Pod, error) {
// Generate the v1.Pod yaml description
- return simplePodWithV1Containers(ctrs)
+ return simplePodWithV1Containers(ctx, ctrs)
}
// GenerateForKube takes a slice of libpod containers and generates
// one v1.Pod description
-func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
+func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, error) {
// Generate the v1.Pod yaml description
var (
ports []v1.ContainerPort //nolint
@@ -78,7 +80,7 @@ func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
servicePorts = containerPortsToServicePorts(ports)
hostNetwork = infraContainer.NetworkMode() == string(namespaces.NetworkMode(specgen.Host))
}
- pod, err := p.podWithContainers(allContainers, ports, hostNetwork)
+ pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork)
if err != nil {
return nil, servicePorts, err
}
@@ -88,7 +90,7 @@ func (p *Pod) GenerateForKube() (*v1.Pod, []v1.ServicePort, error) {
// so set it at here
for _, ctr := range allContainers {
if !ctr.IsInfra() {
- switch ctr.Config().RestartPolicy {
+ switch ctr.config.RestartPolicy {
case define.RestartPolicyAlways:
pod.Spec.RestartPolicy = v1.RestartPolicyAlways
case define.RestartPolicyOnFailure:
@@ -218,7 +220,7 @@ func containersToServicePorts(containers []v1.Container) []v1.ServicePort {
return sps
}
-func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPort, hostNetwork bool) (*v1.Pod, error) {
+func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork bool) (*v1.Pod, error) {
deDupPodVolumes := make(map[string]*v1.Volume)
first := true
podContainers := make([]v1.Container, 0, len(containers))
@@ -239,7 +241,7 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
isInit := ctr.IsInitCtr()
- ctr, volumes, _, err := containerToV1Container(ctr)
+ ctr, volumes, _, err := containerToV1Container(ctx, ctr)
if err != nil {
return nil, err
}
@@ -251,7 +253,9 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
// We add the original port declarations from the libpod infra container
// to the first kubernetes container description because otherwise we loose
// the original container/port bindings.
- if first && len(ports) > 0 {
+ // Add the port configuration to the first regular container or the first
+ // init container if only init containers have been created in the pod.
+ if first && len(ports) > 0 && (!isInit || len(containers) == 2) {
ctr.Ports = ports
first = false
}
@@ -267,7 +271,7 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
deDupPodVolumes[vol.Name] = &vol
}
} else {
- _, _, infraDNS, err := containerToV1Container(ctr)
+ _, _, infraDNS, err := containerToV1Container(ctx, ctr)
if err != nil {
return nil, err
}
@@ -337,7 +341,7 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta
// simplePodWithV1Containers is a function used by inspect when kube yaml needs to be generated
// for a single container. we "insert" that container description in a pod.
-func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) {
+func simplePodWithV1Containers(ctx context.Context, ctrs []*Container) (*v1.Pod, error) {
kubeCtrs := make([]v1.Container, 0, len(ctrs))
kubeInitCtrs := []v1.Container{}
kubeVolumes := make([]v1.Volume, 0)
@@ -355,7 +359,7 @@ func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) {
if !ctr.HostNetwork() {
hostNetwork = false
}
- kubeCtr, kubeVols, ctrDNS, err := containerToV1Container(ctr)
+ kubeCtr, kubeVols, ctrDNS, err := containerToV1Container(ctx, ctr)
if err != nil {
return nil, err
}
@@ -411,7 +415,7 @@ func simplePodWithV1Containers(ctrs []*Container) (*v1.Pod, error) {
// containerToV1Container converts information we know about a libpod container
// to a V1.Container specification.
-func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNSConfig, error) {
+func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []v1.Volume, *v1.PodDNSConfig, error) {
kubeContainer := v1.Container{}
kubeVolumes := []v1.Volume{}
kubeSec, err := generateKubeSecurityContext(c)
@@ -422,7 +426,7 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNS
// NOTE: a privileged container mounts all of /dev/*.
if !c.Privileged() && len(c.config.Spec.Linux.Devices) > 0 {
// TODO Enable when we can support devices and their names
- kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
+ kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.config.Spec.Linux.Devices)
return kubeContainer, kubeVolumes, nil, errors.Wrapf(define.ErrNotImplemented, "linux devices")
}
@@ -463,6 +467,17 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, *v1.PodDNS
_, image := c.Image()
kubeContainer.Image = image
kubeContainer.Stdin = c.Stdin()
+ img, _, err := c.runtime.libimageRuntime.LookupImage(image, nil)
+ if err != nil {
+ return kubeContainer, kubeVolumes, nil, err
+ }
+ imgData, err := img.Inspect(ctx, false)
+ if err != nil {
+ return kubeContainer, kubeVolumes, nil, err
+ }
+ if reflect.DeepEqual(imgData.Config.Cmd, kubeContainer.Command) {
+ kubeContainer.Command = nil
+ }
kubeContainer.WorkingDir = c.WorkingDir()
kubeContainer.Ports = ports
diff --git a/libpod/lock/file/file_lock.go b/libpod/lock/file/file_lock.go
index 2643c9211..4685872b6 100644
--- a/libpod/lock/file/file_lock.go
+++ b/libpod/lock/file/file_lock.go
@@ -139,7 +139,7 @@ func (locks *FileLocks) DeallocateAllLocks() error {
err := os.Remove(p)
if err != nil {
lastErr = err
- logrus.Errorf("deallocating lock %s", p)
+ logrus.Errorf("Deallocating lock %s", p)
}
}
return lastErr
diff --git a/libpod/lock/shm/shm_lock_nocgo.go b/libpod/lock/shm/shm_lock_nocgo.go
index ea1488c90..627344d9c 100644
--- a/libpod/lock/shm/shm_lock_nocgo.go
+++ b/libpod/lock/shm/shm_lock_nocgo.go
@@ -16,7 +16,7 @@ type SHMLocks struct {
// numLocks must not be 0, and may be rounded up to a multiple of the bitmap
// size used by the underlying implementation.
func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return &SHMLocks{}, nil
}
@@ -24,13 +24,13 @@ func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
// POSIX semaphores. numLocks must match the number of locks the shared memory
// segment was created with.
func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return &SHMLocks{}, nil
}
// GetMaxLocks returns the maximum number of locks in the SHM
func (locks *SHMLocks) GetMaxLocks() uint32 {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return 0
}
@@ -40,7 +40,7 @@ func (locks *SHMLocks) GetMaxLocks() uint32 {
// fail to release, causing a program freeze.
// Close() is only intended to be used while testing the locks.
func (locks *SHMLocks) Close() error {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return nil
}
@@ -50,7 +50,7 @@ func (locks *SHMLocks) Close() error {
// Allocations past the maximum number of locks given when the SHM segment was
// created will result in an error, and no semaphore will be allocated.
func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return 0, nil
}
@@ -59,7 +59,7 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
// If the semaphore is already in use or the index is invalid an error will be
// returned.
func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return nil
}
@@ -67,14 +67,14 @@ func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
// reallocated to another container or pod.
// The given semaphore must be already allocated, or an error will be returned.
func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return nil
}
// DeallocateAllSemaphores frees all semaphores so they can be reallocated to
// other containers and pods.
func (locks *SHMLocks) DeallocateAllSemaphores() error {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return nil
}
@@ -86,7 +86,7 @@ func (locks *SHMLocks) DeallocateAllSemaphores() error {
// but before the caller has queried the database to determine this, will
// succeed.
func (locks *SHMLocks) LockSemaphore(sem uint32) error {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return nil
}
@@ -97,6 +97,6 @@ func (locks *SHMLocks) LockSemaphore(sem uint32) error {
// but before the caller has queried the database to determine this, will
// succeed.
func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
- logrus.Error("locks are not supported without cgo")
+ logrus.Error("Locks are not supported without cgo")
return nil
}
diff --git a/libpod/logs/log.go b/libpod/logs/log.go
index a584de0ee..19a121fe9 100644
--- a/libpod/logs/log.go
+++ b/libpod/logs/log.go
@@ -267,6 +267,6 @@ func (l *LogLine) Write(stdout io.Writer, stderr io.Writer, logOpts *LogOptions)
}
default:
// Warn the user if the device type does not match. Most likely the file is corrupted.
- logrus.Warnf("unknown Device type '%s' in log file from Container %s", l.Device, l.CID)
+ logrus.Warnf("Unknown Device type '%s' in log file from Container %s", l.Device, l.CID)
}
}
diff --git a/libpod/network/cni/cni_conversion.go b/libpod/network/cni/cni_conversion.go
index 7a73b874a..93d871767 100644
--- a/libpod/network/cni/cni_conversion.go
+++ b/libpod/network/cni/cni_conversion.go
@@ -104,7 +104,7 @@ func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath str
default:
// A warning would be good but users would get this warning everytime so keep this at info level.
- logrus.Infof("unsupported CNI config type %s in %s, this network can still be used but inspect or list cannot show all information",
+ logrus.Infof("Unsupported CNI config type %s in %s, this network can still be used but inspect or list cannot show all information",
firstPlugin.Network.Type, confPath)
}
diff --git a/libpod/network/cni/cni_exec.go b/libpod/network/cni/cni_exec.go
index c4d7f49f7..ae857bcfb 100644
--- a/libpod/network/cni/cni_exec.go
+++ b/libpod/network/cni/cni_exec.go
@@ -30,6 +30,7 @@ import (
"github.com/containernetworking/cni/pkg/invoke"
"github.com/containernetworking/cni/pkg/version"
+ "github.com/containers/podman/v3/pkg/rootless"
)
type cniExec struct {
@@ -67,6 +68,17 @@ func (e *cniExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData [
c.Stdout = stdout
c.Stderr = stderr
+ // The dnsname plugin tries to use XDG_RUNTIME_DIR to store files.
+ // podman run will have XDG_RUNTIME_DIR set and thus the cni plugin can use
+ // it. The problem is that XDG_RUNTIME_DIR is unset for the conmon process
+ // for rootful users. This causes issues since the cleanup process is spawned
+ // by conmon and thus not have XDG_RUNTIME_DIR set to same value as podman run.
+ // Because of it dnsname will not find the config files and cannot correctly cleanup.
+ // To fix this we should also unset XDG_RUNTIME_DIR for the cni plugins as rootful.
+ if !rootless.IsRootless() {
+ c.Env = append(c.Env, "XDG_RUNTIME_DIR=")
+ }
+
err := c.Run()
if err != nil {
return nil, annotatePluginError(err, pluginPath, stdout.Bytes(), stderr.Bytes())
diff --git a/libpod/network/cni/config.go b/libpod/network/cni/config.go
index 2a6ad8eb3..3df155637 100644
--- a/libpod/network/cni/config.go
+++ b/libpod/network/cni/config.go
@@ -162,7 +162,7 @@ func (n *cniNetwork) NetworkRemove(nameOrID string) error {
err = netlink.LinkDel(link)
// only log the error, it is not fatal
if err != nil {
- logrus.Infof("failed to remove network interface %s: %v", network.libpodNet.NetworkInterface, err)
+ logrus.Infof("Failed to remove network interface %s: %v", network.libpodNet.NetworkInterface, err)
}
}
}
@@ -170,7 +170,11 @@ func (n *cniNetwork) NetworkRemove(nameOrID string) error {
file := network.filename
delete(n.networks, network.libpodNet.Name)
- return os.Remove(file)
+ // make sure to not error for ErrNotExist
+ if err := os.Remove(file); err != nil && !errors.Is(err, os.ErrNotExist) {
+ return err
+ }
+ return nil
}
// NetworkList will return all known Networks. Optionally you can
diff --git a/libpod/network/cni/config_test.go b/libpod/network/cni/config_test.go
index a0a0ea1af..288cf4626 100644
--- a/libpod/network/cni/config_test.go
+++ b/libpod/network/cni/config_test.go
@@ -1021,6 +1021,27 @@ var _ = Describe("Config", func() {
Expect(err.Error()).To(ContainSubstring("subnet 10.10.0.0/24 is already used on the host or by another config"))
})
+ It("remove network should not error when config file does not exists on disk", func() {
+ name := "mynet"
+ network := types.Network{Name: name}
+ _, err := libpodNet.NetworkCreate(network)
+ Expect(err).To(BeNil())
+
+ path := filepath.Join(cniConfDir, name+".conflist")
+ Expect(path).To(BeARegularFile())
+
+ err = os.Remove(path)
+ Expect(err).To(BeNil())
+ Expect(path).ToNot(BeARegularFile())
+
+ err = libpodNet.NetworkRemove(name)
+ Expect(err).To(BeNil())
+
+ nets, err := libpodNet.NetworkList()
+ Expect(err).To(BeNil())
+ Expect(nets).To(HaveLen(1))
+ Expect(nets).ToNot(ContainElement(HaveNetworkName(name)))
+ })
})
Context("network load valid existing ones", func() {
diff --git a/libpod/network/cni/run.go b/libpod/network/cni/run.go
index 834e7c867..bd873f89b 100644
--- a/libpod/network/cni/run.go
+++ b/libpod/network/cni/run.go
@@ -186,9 +186,6 @@ outer:
}
return errors.Errorf("requested static ip %s not in any subnet on network %s", ip.String(), network.libpodNet.Name)
}
- if len(netOpts.Aliases) > 0 && !network.libpodNet.DNSEnabled {
- return errors.New("cannot set aliases on a network without dns enabled")
- }
return nil
}
@@ -273,7 +270,7 @@ func (n *cniNetwork) teardown(namespacePath string, options types.TeardownOption
if err == nil {
rt = newRt
} else {
- logrus.Warnf("failed to load cached network config: %v, falling back to loading network %s from disk", err, name)
+ logrus.Warnf("Failed to load cached network config: %v, falling back to loading network %s from disk", err, name)
network := n.networks[name]
if network == nil {
multiErr = multierror.Append(multiErr, errors.Wrapf(define.ErrNoSuchNetwork, "network %s", name))
diff --git a/libpod/network/cni/run_test.go b/libpod/network/cni/run_test.go
index f6da22a76..965203c2a 100644
--- a/libpod/network/cni/run_test.go
+++ b/libpod/network/cni/run_test.go
@@ -966,6 +966,26 @@ var _ = Describe("run CNI", func() {
})
})
+ It("setup with aliases but dns disabled should work", func() {
+ runTest(func() {
+ defNet := types.DefaultNetworkName
+ intName := "eth0"
+ setupOpts := types.SetupOptions{
+ NetworkOptions: types.NetworkOptions{
+ ContainerID: stringid.GenerateNonCryptoID(),
+ Networks: map[string]types.PerNetworkOptions{
+ defNet: {
+ InterfaceName: intName,
+ Aliases: []string{"somealias"},
+ },
+ },
+ },
+ }
+ _, err := libpodNet.Setup(netNSContainer.Path(), setupOpts)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ })
+
})
Context("invalid network setup test", func() {
@@ -1052,27 +1072,6 @@ var _ = Describe("run CNI", func() {
})
})
- It("setup with aliases but dns disabled", func() {
- runTest(func() {
- defNet := types.DefaultNetworkName
- intName := "eth0"
- setupOpts := types.SetupOptions{
- NetworkOptions: types.NetworkOptions{
- ContainerID: stringid.GenerateNonCryptoID(),
- Networks: map[string]types.PerNetworkOptions{
- defNet: {
- InterfaceName: intName,
- Aliases: []string{"somealias"},
- },
- },
- },
- }
- _, err := libpodNet.Setup(netNSContainer.Path(), setupOpts)
- Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(ContainSubstring("cannot set aliases on a network without dns enabled"))
- })
- })
-
It("setup without networks", func() {
runTest(func() {
setupOpts := types.SetupOptions{
@@ -1256,7 +1255,7 @@ var _ = Describe("run CNI", func() {
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("network somenet: network not found"))
logString := logBuffer.String()
- Expect(logString).To(ContainSubstring("failed to load cached network config"))
+ Expect(logString).To(ContainSubstring("Failed to load cached network config"))
})
})
@@ -1283,7 +1282,7 @@ var _ = Describe("run CNI", func() {
err = libpodNet.Teardown(netNSContainer.Path(), teardownOpts)
Expect(err).To(BeNil())
logString := logBuffer.String()
- Expect(logString).To(ContainSubstring("failed to load cached network config"))
+ Expect(logString).To(ContainSubstring("Failed to load cached network config"))
})
})
})
diff --git a/libpod/network/types/network.go b/libpod/network/types/network.go
index 68a32d499..2fe4f3da2 100644
--- a/libpod/network/types/network.go
+++ b/libpod/network/types/network.go
@@ -151,7 +151,9 @@ type PerNetworkOptions struct {
// StaticIPv4 for this container. Optional.
StaticIPs []net.IP `json:"static_ips,omitempty"`
// Aliases contains a list of names which the dns server should resolve
- // to this container. Can only be set when DNSEnabled is true on the Network.
+ // to this container. Should only be set when DNSEnabled is true on the Network.
+ // If aliases are set but there is no dns support for this network the
+ // network interface implementation should ignore this and NOT error.
// Optional.
Aliases []string `json:"aliases,omitempty"`
// StaticMac for this container. Optional.
diff --git a/libpod/networking_linux.go b/libpod/networking_linux.go
index 96b6fb298..e792a410c 100644
--- a/libpod/networking_linux.go
+++ b/libpod/networking_linux.go
@@ -48,6 +48,41 @@ const (
persistentCNIDir = "/var/lib/cni"
)
+// GetAllNetworkAliases returns all configured aliases for this container.
+// It also adds the container short ID as alias to match docker.
+func (c *Container) GetAllNetworkAliases() (map[string][]string, error) {
+ allAliases, err := c.runtime.state.GetAllNetworkAliases(c)
+ if err != nil {
+ return nil, err
+ }
+
+ // get the all attached networks, we cannot use GetAllNetworkAliases()
+ // since it returns nil if there are no aliases
+ nets, _, err := c.networks()
+ if err != nil {
+ return nil, err
+ }
+
+ // add container short ID as alias to match docker
+ for _, net := range nets {
+ allAliases[net] = append(allAliases[net], c.config.ID[:12])
+ }
+ return allAliases, nil
+}
+
+// GetNetworkAliases returns configured aliases for this network.
+// It also adds the container short ID as alias to match docker.
+func (c *Container) GetNetworkAliases(netName string) ([]string, error) {
+ aliases, err := c.runtime.state.GetNetworkAliases(c, netName)
+ if err != nil {
+ return nil, err
+ }
+
+ // add container short ID as alias to match docker
+ aliases = append(aliases, c.config.ID[:12])
+ return aliases, nil
+}
+
func (c *Container) getNetworkOptions() (types.NetworkOptions, error) {
opts := types.NetworkOptions{
ContainerID: c.config.ID,
@@ -61,7 +96,7 @@ func (c *Container) getNetworkOptions() (types.NetworkOptions, error) {
if err != nil {
return opts, err
}
- aliases, err := c.runtime.state.GetAllNetworkAliases(c)
+ aliases, err := c.GetAllNetworkAliases()
if err != nil {
return opts, err
}
@@ -320,14 +355,14 @@ func (r *RootlessCNI) Cleanup(runtime *Runtime) error {
}
}
if err != nil {
- logrus.Errorf("failed to kill slirp4netns process: %s", err)
+ logrus.Errorf("Failed to kill slirp4netns process: %s", err)
}
err = os.RemoveAll(r.dir)
if err != nil {
logrus.Error(err)
}
} else if err != nil && !os.IsNotExist(err) {
- logrus.Errorf("could not read rootless cni directory, skipping cleanup: %s", err)
+ logrus.Errorf("Could not read rootless cni directory, skipping cleanup: %s", err)
}
}
return nil
@@ -458,7 +493,7 @@ func (r *Runtime) GetRootlessCNINetNs(new bool) (*RootlessCNI, error) {
defer func() {
if err := cmd.Process.Release(); err != nil {
- logrus.Errorf("unable to release command process: %q", err)
+ logrus.Errorf("Unable to release command process: %q", err)
}
}()
@@ -635,10 +670,10 @@ func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q map[string]types.St
defer func() {
if retErr != nil {
if err := netns.UnmountNS(ctrNS); err != nil {
- logrus.Errorf("Error unmounting partially created network namespace for container %s: %v", ctr.ID(), err)
+ logrus.Errorf("Unmounting partially created network namespace for container %s: %v", ctr.ID(), err)
}
if err := ctrNS.Close(); err != nil {
- logrus.Errorf("Error closing partially created network namespace for container %s: %v", ctr.ID(), err)
+ logrus.Errorf("Closing partially created network namespace for container %s: %v", ctr.ID(), err)
}
}
}()
@@ -872,7 +907,7 @@ func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.Statu
}
}
- aliases, err := ctr.runtime.state.GetAllNetworkAliases(ctr)
+ aliases, err := ctr.GetAllNetworkAliases()
if err != nil {
return nil, err
}
@@ -975,6 +1010,11 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
for _, net := range networks {
cniNet := new(define.InspectAdditionalNetwork)
cniNet.NetworkID = net
+ aliases, err := c.GetNetworkAliases(net)
+ if err != nil {
+ return nil, err
+ }
+ cniNet.Aliases = aliases
settings.Networks[net] = cniNet
}
}
@@ -1009,7 +1049,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
return nil, err
}
- aliases, err := c.runtime.state.GetNetworkAliases(c, name)
+ aliases, err := c.GetNetworkAliases(name)
if err != nil {
return nil, err
}
@@ -1222,6 +1262,14 @@ func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) e
// get network status before we connect
networkStatus := c.getNetworkStatus()
+ network, err := c.runtime.network.NetworkInspect(netName)
+ if err != nil {
+ return err
+ }
+ if !network.DNSEnabled && len(aliases) > 0 {
+ return errors.Wrapf(define.ErrInvalidArg, "cannot set network aliases for network %q because dns is disabled", netName)
+ }
+
if err := c.runtime.state.NetworkConnect(c, netName, aliases); err != nil {
return err
}
@@ -1253,6 +1301,7 @@ func (c *Container) NetworkConnect(nameOrID, netName string, aliases []string) e
if !exists {
return errors.Errorf("no network interface name for container %s on network %s", c.config.ID, netName)
}
+ aliases = append(aliases, c.config.ID[:12])
opts.Networks = map[string]types.PerNetworkOptions{
netName: {
Aliases: aliases,
diff --git a/libpod/networking_slirp4netns.go b/libpod/networking_slirp4netns.go
index a5c035757..46cda89a9 100644
--- a/libpod/networking_slirp4netns.go
+++ b/libpod/networking_slirp4netns.go
@@ -210,7 +210,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error {
var err error
path, err = exec.LookPath("slirp4netns")
if err != nil {
- logrus.Errorf("could not find slirp4netns, the network namespace won't be configured: %v", err)
+ logrus.Errorf("Could not find slirp4netns, the network namespace won't be configured: %v", err)
return nil
}
}
@@ -222,7 +222,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error {
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
- havePortMapping := len(ctr.Config().PortMappings) > 0
+ havePortMapping := len(ctr.config.PortMappings) > 0
logPath := filepath.Join(ctr.runtime.config.Engine.TmpDir, fmt.Sprintf("slirp4netns-%s.log", ctr.config.ID))
ctrNetworkSlipOpts := []string{}
@@ -303,7 +303,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container) error {
defer func() {
servicereaper.AddPID(cmd.Process.Pid)
if err := cmd.Process.Release(); err != nil {
- logrus.Errorf("unable to release command process: %q", err)
+ logrus.Errorf("Unable to release command process: %q", err)
}
}()
@@ -421,7 +421,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
if status.Exited() {
// Seek at the beginning of the file and read all its content
if _, err := logFile.Seek(0, 0); err != nil {
- logrus.Errorf("could not seek log file: %q", err)
+ logrus.Errorf("Could not seek log file: %q", err)
}
logContent, err := ioutil.ReadAll(logFile)
if err != nil {
@@ -506,7 +506,7 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
defer func() {
servicereaper.AddPID(cmd.Process.Pid)
if err := cmd.Process.Release(); err != nil {
- logrus.Errorf("unable to release rootlessport process: %q", err)
+ logrus.Errorf("Unable to release rootlessport process: %q", err)
}
}()
if err := waitForSync(syncR, cmd, logFile, 3*time.Second); err != nil {
@@ -559,7 +559,7 @@ func (r *Runtime) setupRootlessPortMappingViaSlirp(ctr *Container, cmd *exec.Cmd
}
defer func() {
if err := conn.Close(); err != nil {
- logrus.Errorf("unable to close connection: %q", err)
+ logrus.Errorf("Unable to close connection: %q", err)
}
}()
hostIP := i.HostIP
diff --git a/libpod/oci_attach_linux.go b/libpod/oci_attach_linux.go
index de435b58a..9ae46eeda 100644
--- a/libpod/oci_attach_linux.go
+++ b/libpod/oci_attach_linux.go
@@ -67,7 +67,7 @@ func (c *Container) attach(streams *define.AttachStreams, keys string, resize <-
}
defer func() {
if err := conn.Close(); err != nil {
- logrus.Errorf("unable to close socket: %q", err)
+ logrus.Errorf("Unable to close socket: %q", err)
}
}()
@@ -142,7 +142,7 @@ func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, se
if newSize != nil {
err = c.ociRuntime.ExecAttachResize(c, sessionID, *newSize)
if err != nil {
- logrus.Warn("resize failed", err)
+ logrus.Warnf("Resize failed: %v", err)
}
}
@@ -153,7 +153,7 @@ func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, se
}
defer func() {
if err := conn.Close(); err != nil {
- logrus.Errorf("unable to close socket: %q", err)
+ logrus.Errorf("Unable to close socket: %q", err)
}
}()
@@ -210,7 +210,7 @@ func setupStdioChannels(streams *define.AttachStreams, conn *net.UnixConn, detac
_, err = utils.CopyDetachable(conn, streams.InputStream, detachKeys)
if err == nil {
if connErr := conn.CloseWrite(); connErr != nil {
- logrus.Errorf("unable to close conn: %q", connErr)
+ logrus.Errorf("Unable to close conn: %q", connErr)
}
}
}
diff --git a/libpod/oci_conmon_exec_linux.go b/libpod/oci_conmon_exec_linux.go
index 5a7677b04..822377bfe 100644
--- a/libpod/oci_conmon_exec_linux.go
+++ b/libpod/oci_conmon_exec_linux.go
@@ -528,7 +528,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
if newSize != nil {
err = c.ociRuntime.ExecAttachResize(c, sessionID, *newSize)
if err != nil {
- logrus.Warn("resize failed", err)
+ logrus.Warnf("Resize failed: %v", err)
}
}
@@ -540,7 +540,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
}
defer func() {
if err := conn.Close(); err != nil {
- logrus.Errorf("unable to close socket: %q", err)
+ logrus.Errorf("Unable to close socket: %q", err)
}
}()
diff --git a/libpod/oci_conmon_linux.go b/libpod/oci_conmon_linux.go
index 831e89223..f82fc4ce6 100644
--- a/libpod/oci_conmon_linux.go
+++ b/libpod/oci_conmon_linux.go
@@ -215,7 +215,7 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
}
defer func() {
if err := unix.Setns(int(fd.Fd()), unix.CLONE_NEWNS); err != nil {
- logrus.Errorf("unable to clone new namespace: %q", err)
+ logrus.Errorf("Unable to clone new namespace: %q", err)
}
}()
@@ -524,7 +524,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
conn = newConn
defer func() {
if err := conn.Close(); err != nil {
- logrus.Errorf("unable to close container %s attach socket: %q", ctr.ID(), err)
+ logrus.Errorf("Unable to close container %s attach socket: %q", ctr.ID(), err)
}
}()
@@ -936,7 +936,7 @@ func waitPidStop(pid int, timeout time.Duration) error {
close(done)
return
}
- logrus.Errorf("Error pinging PID %d with signal 0: %v", pid, err)
+ logrus.Errorf("Pinging PID %d with signal 0: %v", pid, err)
}
time.Sleep(100 * time.Millisecond)
}
@@ -1150,7 +1150,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
if ctr.config.NetMode.IsSlirp4netns() || rootless.IsRootless() {
if ctr.config.PostConfigureNetNS {
- havePortMapping := len(ctr.Config().PortMappings) > 0
+ havePortMapping := len(ctr.config.PortMappings) > 0
if havePortMapping {
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
if err != nil {
@@ -1199,7 +1199,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
pid, err := readConmonPipeData(parentSyncPipe, ociLog)
if err != nil {
if err2 := r.DeleteContainer(ctr); err2 != nil {
- logrus.Errorf("Error removing container %s from runtime after creation failed", ctr.ID())
+ logrus.Errorf("Removing container %s from runtime after creation failed", ctr.ID())
}
return err
}
@@ -1207,7 +1207,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
conmonPID, err := readConmonPidFile(ctr.config.ConmonPidFile)
if err != nil {
- logrus.Warnf("error reading conmon pid file for container %s: %v", ctr.ID(), err)
+ logrus.Warnf("Error reading conmon pid file for container %s: %v", ctr.ID(), err)
} else if conmonPID > 0 {
// conmon not having a pid file is a valid state, so don't set it if we don't have it
logrus.Infof("Got Conmon PID as %d", conmonPID)
@@ -1220,7 +1220,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
default:
if sent, err := daemon.SdNotify(false, fmt.Sprintf("MAINPID=%d", conmonPID)); err != nil {
- logrus.Errorf("Error notifying systemd of Conmon PID: %v", err)
+ logrus.Errorf("Notifying systemd of Conmon PID: %v", err)
} else if sent {
logrus.Debugf("Notify MAINPID sent successfully")
}
@@ -1346,7 +1346,7 @@ func startCommandGivenSelinux(cmd *exec.Cmd, ctr *Container) error {
defer func() {
if err := os.Setenv("NOTIFY_SOCKET", ctr.notifySocket); err != nil {
- logrus.Errorf("Error resetting NOTIFY_SOCKET=%s", ctr.notifySocket)
+ logrus.Errorf("Resetting NOTIFY_SOCKET=%s", ctr.notifySocket)
}
}()
}
@@ -1385,7 +1385,7 @@ func startCommandGivenSelinux(cmd *exec.Cmd, ctr *Container) error {
// Ignore error returned from SetProcessLabel("") call,
// can't recover.
if labelErr := label.SetProcessLabel(""); labelErr != nil {
- logrus.Errorf("unable to set process label: %q", err)
+ logrus.Errorf("Unable to set process label: %q", err)
}
runtime.UnlockOSThread()
return err
@@ -1608,7 +1608,7 @@ func httpAttachTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid
numW, err2 := http.Write(buf[1:numR])
if err2 != nil {
if err != nil {
- logrus.Errorf("Error reading container %s STDOUT: %v", cid, err)
+ logrus.Errorf("Reading container %s STDOUT: %v", cid, err)
}
return err2
} else if numW+1 != numR {
@@ -1618,7 +1618,7 @@ func httpAttachTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter, cid
// there isn't a delay on the terminal side.
if err2 := http.Flush(); err2 != nil {
if err != nil {
- logrus.Errorf("Error reading container %s STDOUT: %v", cid, err)
+ logrus.Errorf("Reading container %s STDOUT: %v", cid, err)
}
return err2
}
@@ -1670,7 +1670,7 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter,
numH, err2 := http.Write(headerBuf)
if err2 != nil {
if err != nil {
- logrus.Errorf("Error reading container %s standard streams: %v", cid, err)
+ logrus.Errorf("Reading container %s standard streams: %v", cid, err)
}
return err2
@@ -1680,7 +1680,7 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter,
// of the protocol.
if numH != 8 {
if err != nil {
- logrus.Errorf("Error reading container %s standard streams: %v", cid, err)
+ logrus.Errorf("Reading container %s standard streams: %v", cid, err)
}
return io.ErrShortWrite
@@ -1689,13 +1689,13 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter,
numW, err2 := http.Write(buf[1:numR])
if err2 != nil {
if err != nil {
- logrus.Errorf("Error reading container %s standard streams: %v", cid, err)
+ logrus.Errorf("Reading container %s standard streams: %v", cid, err)
}
return err2
} else if numW+1 != numR {
if err != nil {
- logrus.Errorf("Error reading container %s standard streams: %v", cid, err)
+ logrus.Errorf("Reading container %s standard streams: %v", cid, err)
}
return io.ErrShortWrite
@@ -1704,7 +1704,7 @@ func httpAttachNonTerminalCopy(container *net.UnixConn, http *bufio.ReadWriter,
// there isn't a delay on the terminal side.
if err2 := http.Flush(); err2 != nil {
if err != nil {
- logrus.Errorf("Error reading container %s STDOUT: %v", cid, err)
+ logrus.Errorf("Reading container %s STDOUT: %v", cid, err)
}
return err2
}
diff --git a/libpod/oci_util.go b/libpod/oci_util.go
index 7db267915..c1afc0d20 100644
--- a/libpod/oci_util.go
+++ b/libpod/oci_util.go
@@ -72,7 +72,7 @@ func bindPorts(ports []types.OCICNIPortMapping) ([]*os.File, error) {
// note that this does not affect the fd, see the godoc for server.File()
err = server.Close()
if err != nil {
- logrus.Warnf("failed to close connection: %v", err)
+ logrus.Warnf("Failed to close connection: %v", err)
}
case "tcp":
@@ -106,13 +106,13 @@ func bindPorts(ports []types.OCICNIPortMapping) ([]*os.File, error) {
// note that this does not affect the fd, see the godoc for server.File()
err = server.Close()
if err != nil {
- logrus.Warnf("failed to close connection: %v", err)
+ logrus.Warnf("Failed to close connection: %v", err)
}
case "sctp":
if !notifySCTP {
notifySCTP = true
- logrus.Warnf("port reservation for SCTP is not supported")
+ logrus.Warnf("Port reservation for SCTP is not supported")
}
default:
return nil, fmt.Errorf("unknown protocol %s", i.Protocol)
diff --git a/libpod/pod.go b/libpod/pod.go
index e4516b354..c0b0a974b 100644
--- a/libpod/pod.go
+++ b/libpod/pod.go
@@ -164,8 +164,7 @@ func (p *Pod) PidMode() string {
if err != nil {
return ""
}
- conf := infra.Config()
- ctrSpec := conf.Spec
+ ctrSpec := infra.config.Spec
if ctrSpec != nil && ctrSpec.Linux != nil {
for _, ns := range ctrSpec.Linux.Namespaces {
if ns.Type == specs.PIDNamespace {
@@ -186,8 +185,7 @@ func (p *Pod) UserNSMode() string {
if err != nil {
return ""
}
- conf := infra.Config()
- ctrSpec := conf.Spec
+ ctrSpec := infra.config.Spec
if ctrSpec != nil && ctrSpec.Linux != nil {
for _, ns := range ctrSpec.Linux.Namespaces {
if ns.Type == specs.UserNamespace {
diff --git a/libpod/pod_api.go b/libpod/pod_api.go
index ff818edc2..971309f77 100644
--- a/libpod/pod_api.go
+++ b/libpod/pod_api.go
@@ -34,7 +34,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
}
// If the container is a once init container, we need to remove it
// after it runs
- if initCon.Config().InitContainerType == define.OneShotInitContainer {
+ if initCon.config.InitContainerType == define.OneShotInitContainer {
icLock := initCon.lock
icLock.Lock()
if err := p.runtime.removeContainer(ctx, initCon, false, false, true); err != nil {
@@ -43,7 +43,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
}
// Removing a container this way requires an explicit call to clean up the db
if err := p.runtime.state.RemoveContainerFromPod(p, initCon); err != nil {
- logrus.Errorf("Error removing container %s from database: %v", initCon.ID(), err)
+ logrus.Errorf("Removing container %s from database: %v", initCon.ID(), err)
}
icLock.Unlock()
}
@@ -590,16 +590,16 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
return nil, err
}
infraConfig = new(define.InspectPodInfraConfig)
- infraConfig.HostNetwork = !infra.Config().ContainerNetworkConfig.UseImageHosts
- infraConfig.StaticIP = infra.Config().ContainerNetworkConfig.StaticIP
- infraConfig.NoManageResolvConf = infra.Config().UseImageResolvConf
- infraConfig.NoManageHosts = infra.Config().UseImageHosts
+ infraConfig.HostNetwork = !infra.config.ContainerNetworkConfig.UseImageHosts
+ infraConfig.StaticIP = infra.config.ContainerNetworkConfig.StaticIP
+ infraConfig.NoManageResolvConf = infra.config.UseImageResolvConf
+ infraConfig.NoManageHosts = infra.config.UseImageHosts
infraConfig.CPUPeriod = p.CPUPeriod()
infraConfig.CPUQuota = p.CPUQuota()
infraConfig.CPUSetCPUs = p.ResourceLim().CPU.Cpus
infraConfig.PidNS = p.PidMode()
infraConfig.UserNS = p.UserNSMode()
- namedVolumes, mounts := infra.sortUserVolumes(infra.Config().Spec)
+ namedVolumes, mounts := infra.sortUserVolumes(infra.config.Spec)
inspectMounts, err = infra.GetInspectMounts(namedVolumes, infra.config.ImageVolumes, mounts)
if err != nil {
return nil, err
@@ -611,30 +611,30 @@ func (p *Pod) Inspect() (*define.InspectPodData, error) {
return nil, err
}
- if len(infra.Config().ContainerNetworkConfig.DNSServer) > 0 {
- infraConfig.DNSServer = make([]string, 0, len(infra.Config().ContainerNetworkConfig.DNSServer))
- for _, entry := range infra.Config().ContainerNetworkConfig.DNSServer {
+ if len(infra.config.ContainerNetworkConfig.DNSServer) > 0 {
+ infraConfig.DNSServer = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSServer))
+ for _, entry := range infra.config.ContainerNetworkConfig.DNSServer {
infraConfig.DNSServer = append(infraConfig.DNSServer, entry.String())
}
}
- if len(infra.Config().ContainerNetworkConfig.DNSSearch) > 0 {
- infraConfig.DNSSearch = make([]string, 0, len(infra.Config().ContainerNetworkConfig.DNSSearch))
- infraConfig.DNSSearch = append(infraConfig.DNSSearch, infra.Config().ContainerNetworkConfig.DNSSearch...)
+ if len(infra.config.ContainerNetworkConfig.DNSSearch) > 0 {
+ infraConfig.DNSSearch = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSSearch))
+ infraConfig.DNSSearch = append(infraConfig.DNSSearch, infra.config.ContainerNetworkConfig.DNSSearch...)
}
- if len(infra.Config().ContainerNetworkConfig.DNSOption) > 0 {
- infraConfig.DNSOption = make([]string, 0, len(infra.Config().ContainerNetworkConfig.DNSOption))
- infraConfig.DNSOption = append(infraConfig.DNSOption, infra.Config().ContainerNetworkConfig.DNSOption...)
+ if len(infra.config.ContainerNetworkConfig.DNSOption) > 0 {
+ infraConfig.DNSOption = make([]string, 0, len(infra.config.ContainerNetworkConfig.DNSOption))
+ infraConfig.DNSOption = append(infraConfig.DNSOption, infra.config.ContainerNetworkConfig.DNSOption...)
}
- if len(infra.Config().HostAdd) > 0 {
- infraConfig.HostAdd = make([]string, 0, len(infra.Config().HostAdd))
- infraConfig.HostAdd = append(infraConfig.HostAdd, infra.Config().HostAdd...)
+ if len(infra.config.HostAdd) > 0 {
+ infraConfig.HostAdd = make([]string, 0, len(infra.config.HostAdd))
+ infraConfig.HostAdd = append(infraConfig.HostAdd, infra.config.HostAdd...)
}
- if len(infra.Config().ContainerNetworkConfig.Networks) > 0 {
- infraConfig.Networks = make([]string, 0, len(infra.Config().ContainerNetworkConfig.Networks))
- infraConfig.Networks = append(infraConfig.Networks, infra.Config().ContainerNetworkConfig.Networks...)
+ if len(infra.config.ContainerNetworkConfig.Networks) > 0 {
+ infraConfig.Networks = make([]string, 0, len(infra.config.ContainerNetworkConfig.Networks))
+ infraConfig.Networks = append(infraConfig.Networks, infra.config.ContainerNetworkConfig.Networks...)
}
- infraConfig.NetworkOptions = infra.Config().ContainerNetworkConfig.NetworkOptions
- infraConfig.PortBindings = makeInspectPortBindings(infra.Config().ContainerNetworkConfig.PortMappings, nil)
+ infraConfig.NetworkOptions = infra.config.ContainerNetworkConfig.NetworkOptions
+ infraConfig.PortBindings = makeInspectPortBindings(infra.config.ContainerNetworkConfig.PortMappings, nil)
}
inspectData := define.InspectPodData{
diff --git a/libpod/pod_internal.go b/libpod/pod_internal.go
index 079b631a0..d903b8719 100644
--- a/libpod/pod_internal.go
+++ b/libpod/pod_internal.go
@@ -71,7 +71,7 @@ func (p *Pod) refresh() error {
case config.SystemdCgroupsManager:
cgroupPath, err := systemdSliceFromPath(p.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", p.ID()))
if err != nil {
- logrus.Errorf("Error creating CGroup for pod %s: %v", p.ID(), err)
+ logrus.Errorf("Creating CGroup for pod %s: %v", p.ID(), err)
}
p.state.CgroupPath = cgroupPath
case config.CgroupfsCgroupsManager:
diff --git a/libpod/reset.go b/libpod/reset.go
index 8e753e845..96fa44c2f 100644
--- a/libpod/reset.go
+++ b/libpod/reset.go
@@ -27,7 +27,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
if errors.Cause(err) == define.ErrNoSuchPod {
continue
}
- logrus.Errorf("Error removing Pod %s: %v", p.ID(), err)
+ logrus.Errorf("Removing Pod %s: %v", p.ID(), err)
}
}
@@ -42,13 +42,13 @@ func (r *Runtime) Reset(ctx context.Context) error {
if errors.Cause(err) == define.ErrNoSuchCtr {
continue
}
- logrus.Errorf("Error removing container %s: %v", c.ID(), err)
+ logrus.Errorf("Removing container %s: %v", c.ID(), err)
}
}
}
if err := r.stopPauseProcess(); err != nil {
- logrus.Errorf("Error stopping pause process: %v", err)
+ logrus.Errorf("Stopping pause process: %v", err)
}
rmiOptions := &libimage.RemoveImagesOptions{Filters: []string{"readonly=false"}}
@@ -65,7 +65,7 @@ func (r *Runtime) Reset(ctx context.Context) error {
if errors.Cause(err) == define.ErrNoSuchVolume {
continue
}
- logrus.Errorf("Error removing volume %s: %v", v.config.Name, err)
+ logrus.Errorf("Removing volume %s: %v", v.config.Name, err)
}
}
diff --git a/libpod/runtime.go b/libpod/runtime.go
index a2279e56d..27885bf5c 100644
--- a/libpod/runtime.go
+++ b/libpod/runtime.go
@@ -211,7 +211,7 @@ func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...R
os.Exit(1)
return nil
}); err != nil && errors.Cause(err) != shutdown.ErrHandlerExists {
- logrus.Errorf("Error registering shutdown handler for libpod: %v", err)
+ logrus.Errorf("Registering shutdown handler for libpod: %v", err)
}
if err := shutdown.Start(); err != nil {
@@ -344,7 +344,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
logrus.Warn(msg)
}
} else {
- logrus.Warn(msg)
+ logrus.Warnf("%s: %v", msg, err)
}
}
}
@@ -388,7 +388,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
// Don't forcibly shut down
// We could be opening a store in use by another libpod
if _, err := store.Shutdown(false); err != nil {
- logrus.Errorf("Error removing store for partially-created runtime: %s", err)
+ logrus.Errorf("Removing store for partially-created runtime: %s", err)
}
}
}()
@@ -436,7 +436,7 @@ func makeRuntime(ctx context.Context, runtime *Runtime) (retErr error) {
// This will allow us to ship configs including optional
// runtimes that might not be installed (crun, kata).
// Only a infof so default configs don't spec errors.
- logrus.Debugf("configured OCI runtime %s initialization failed: %v", name, err)
+ logrus.Debugf("Configured OCI runtime %s initialization failed: %v", name, err)
continue
}
@@ -706,19 +706,32 @@ func (r *Runtime) TmpDir() (string, error) {
return r.config.Engine.TmpDir, nil
}
-// GetConfig returns a copy of the configuration used by the runtime
-func (r *Runtime) GetConfig() (*config.Config, error) {
+// GetConfig returns the configuration used by the runtime.
+// Note that the returned value is not a copy and must hence
+// only be used in a reading fashion.
+func (r *Runtime) GetConfigNoCopy() (*config.Config, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
return nil, define.ErrRuntimeStopped
}
+ return r.config, nil
+}
+
+// GetConfig returns a copy of the configuration used by the runtime.
+// Please use GetConfigNoCopy() in case you only want to read from
+// but not write to the returned config.
+func (r *Runtime) GetConfig() (*config.Config, error) {
+ rtConfig, err := r.GetConfigNoCopy()
+ if err != nil {
+ return nil, err
+ }
config := new(config.Config)
// Copy so the caller won't be able to modify the actual config
- if err := JSONDeepCopy(r.config, config); err != nil {
+ if err := JSONDeepCopy(rtConfig, config); err != nil {
return nil, errors.Wrapf(err, "error copying config")
}
@@ -767,7 +780,7 @@ func (r *Runtime) libimageEvents() {
Type: events.Image,
}
if err := r.eventer.Write(e); err != nil {
- logrus.Errorf("unable to write image event: %q", err)
+ logrus.Errorf("Unable to write image event: %q", err)
}
}
@@ -807,11 +820,11 @@ func (r *Runtime) Shutdown(force bool) error {
if force {
ctrs, err := r.state.AllContainers()
if err != nil {
- logrus.Errorf("Error retrieving containers from database: %v", err)
+ logrus.Errorf("Retrieving containers from database: %v", err)
} else {
for _, ctr := range ctrs {
if err := ctr.StopWithTimeout(r.config.Engine.StopTimeout); err != nil {
- logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err)
+ logrus.Errorf("Stopping container %s: %v", ctr.ID(), err)
}
}
}
@@ -833,7 +846,7 @@ func (r *Runtime) Shutdown(force bool) error {
}
if err := r.state.Close(); err != nil {
if lastError != nil {
- logrus.Errorf("%v", lastError)
+ logrus.Error(lastError)
}
lastError = err
}
@@ -879,17 +892,17 @@ func (r *Runtime) refresh(alivePath string) error {
// until this has run.
for _, ctr := range ctrs {
if err := ctr.refresh(); err != nil {
- logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err)
+ logrus.Errorf("Refreshing container %s: %v", ctr.ID(), err)
}
}
for _, pod := range pods {
if err := pod.refresh(); err != nil {
- logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
+ logrus.Errorf("Refreshing pod %s: %v", pod.ID(), err)
}
}
for _, vol := range vols {
if err := vol.refresh(); err != nil {
- logrus.Errorf("Error refreshing volume %s: %v", vol.Name(), err)
+ logrus.Errorf("Refreshing volume %s: %v", vol.Name(), err)
}
}
@@ -1099,7 +1112,7 @@ func (r *Runtime) reloadContainersConf() error {
return err
}
r.config = config
- logrus.Infof("applied new containers configuration: %v", config)
+ logrus.Infof("Applied new containers configuration: %v", config)
return nil
}
@@ -1110,7 +1123,7 @@ func (r *Runtime) reloadStorageConf() error {
return err
}
storage.ReloadConfigurationFile(configFile, &r.storageConfig)
- logrus.Infof("applied new storage configuration: %v", r.storageConfig)
+ logrus.Infof("Applied new storage configuration: %v", r.storageConfig)
return nil
}
diff --git a/libpod/runtime_cstorage.go b/libpod/runtime_cstorage.go
index cd2f226af..58bd67e6d 100644
--- a/libpod/runtime_cstorage.go
+++ b/libpod/runtime_cstorage.go
@@ -106,18 +106,18 @@ func (r *Runtime) removeStorageContainer(idOrName string, force bool) error {
logrus.Infof("Storage for container %s already removed", ctr.ID)
return nil
}
- return errors.Wrapf(err, "error looking up container %q mounts", idOrName)
+ logrus.Warnf("Checking if container %q is mounted, attempting to delete: %v", idOrName, err)
}
if timesMounted > 0 {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %q is mounted and cannot be removed without using force", idOrName)
}
} else if _, err := r.store.Unmount(ctr.ID, true); err != nil {
- if errors.Cause(err) == storage.ErrContainerUnknown {
+ if errors.Is(err, storage.ErrContainerUnknown) {
// Container again gone, no error
logrus.Infof("Storage for container %s already removed", ctr.ID)
return nil
}
- return errors.Wrapf(err, "error unmounting container %q", idOrName)
+ logrus.Warnf("Unmounting container %q while attempting to delete storage: %v", idOrName, err)
}
if err := r.store.DeleteContainer(ctr.ID); err != nil {
diff --git a/libpod/runtime_ctr.go b/libpod/runtime_ctr.go
index d4f67a115..7cda9aa8b 100644
--- a/libpod/runtime_ctr.go
+++ b/libpod/runtime_ctr.go
@@ -59,7 +59,7 @@ func (r *Runtime) PrepareVolumeOnCreateContainer(ctx context.Context, ctr *Conta
defer func() {
if err := ctr.cleanupStorage(); err != nil {
- logrus.Errorf("error cleaning up container storage %s: %v", ctr.ID(), err)
+ logrus.Errorf("Cleaning up container storage %s: %v", ctr.ID(), err)
}
}()
@@ -69,7 +69,7 @@ func (r *Runtime) PrepareVolumeOnCreateContainer(ctx context.Context, ctr *Conta
ctr.state.Mounted = true
ctr.state.Mountpoint = mountPoint
if err = ctr.save(); err != nil {
- logrus.Errorf("Error saving container %s state: %v", ctr.ID(), err)
+ logrus.Errorf("Saving container %s state: %v", ctr.ID(), err)
}
}
@@ -193,10 +193,7 @@ func (r *Runtime) initContainerVariables(rSpec *spec.Spec, config *ContainerConf
ctr.config.LogPath = ""
}
- ctr.config.Spec = new(spec.Spec)
- if err := JSONDeepCopy(rSpec, ctr.config.Spec); err != nil {
- return nil, errors.Wrapf(err, "error copying runtime spec while creating container")
- }
+ ctr.config.Spec = rSpec
ctr.config.CreatedTime = time.Now()
ctr.state.BindMounts = make(map[string]string)
@@ -234,13 +231,6 @@ func (r *Runtime) newContainer(ctx context.Context, rSpec *spec.Spec, options ..
}
func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Container, retErr error) {
- // Validate the container
- if err := ctr.validate(); err != nil {
- return nil, err
- }
- if ctr.config.IsInfra {
- ctr.config.StopTimeout = 10
- }
// normalize the networks to names
// ocicni only knows about cni names so we have to make
// sure we do not use ids internally
@@ -265,11 +255,26 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
if err != nil {
return nil, err
}
+ network, err := r.network.NetworkInspect(netName)
+ if err != nil {
+ return nil, err
+ }
+ if !network.DNSEnabled {
+ return nil, errors.Wrapf(define.ErrInvalidArg, "cannot set network aliases for network %q because dns is disabled", netName)
+ }
netAliases[netName] = aliases
}
ctr.config.NetworkAliases = netAliases
}
+ // Validate the container
+ if err := ctr.validate(); err != nil {
+ return nil, err
+ }
+ if ctr.config.IsInfra {
+ ctr.config.StopTimeout = 10
+ }
+
// Inhibit shutdown until creation succeeds
shutdown.Inhibit()
defer shutdown.Uninhibit()
@@ -286,7 +291,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
defer func() {
if retErr != nil {
if err := ctr.lock.Free(); err != nil {
- logrus.Errorf("Error freeing lock for container after creation failed: %v", err)
+ logrus.Errorf("Freeing lock for container after creation failed: %v", err)
}
}
}()
@@ -409,7 +414,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai
defer func() {
if retErr != nil {
if err := ctr.teardownStorage(); err != nil {
- logrus.Errorf("Error removing partially-created container root filesystem: %s", err)
+ logrus.Errorf("Removing partially-created container root filesystem: %s", err)
}
}
}()
@@ -696,7 +701,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if cleanupErr == nil {
cleanupErr = err
} else {
- logrus.Errorf("cleanup storage: %v", err)
+ logrus.Errorf("Cleanup storage: %v", err)
}
}
@@ -709,7 +714,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if cleanupErr == nil {
cleanupErr = err
} else {
- logrus.Errorf("Error removing container %s from database: %v", c.ID(), err)
+ logrus.Errorf("Removing container %s from database: %v", c.ID(), err)
}
}
}
@@ -718,7 +723,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if cleanupErr == nil {
cleanupErr = err
} else {
- logrus.Errorf("Error removing container %s from database: %v", c.ID(), err)
+ logrus.Errorf("Removing container %s from database: %v", c.ID(), err)
}
}
}
@@ -728,7 +733,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
if cleanupErr == nil {
cleanupErr = errors.Wrapf(err, "error freeing lock for container %s", c.ID())
} else {
- logrus.Errorf("free container lock: %v", err)
+ logrus.Errorf("Free container lock: %v", err)
}
}
@@ -747,7 +752,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force, remo
continue
}
if err := runtime.removeVolume(ctx, volume, false); err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
- logrus.Errorf("cleanup volume (%s): %v", v, err)
+ logrus.Errorf("Cleanup volume (%s): %v", v, err)
}
}
}
@@ -888,7 +893,7 @@ func (r *Runtime) evictContainer(ctx context.Context, idOrName string, removeVol
continue
}
if err := r.removeVolume(ctx, volume, false); err != nil && err != define.ErrNoSuchVolume && err != define.ErrVolumeBeingUsed {
- logrus.Errorf("cleanup volume (%s): %v", v, err)
+ logrus.Errorf("Cleanup volume (%s): %v", v, err)
}
}
}
diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go
index a42f9a365..1915a5c4d 100644
--- a/libpod/runtime_img.go
+++ b/libpod/runtime_img.go
@@ -48,13 +48,34 @@ func (r *Runtime) RemoveContainersForImageCallback(ctx context.Context) libimage
}
}
+// IsExternalContainerCallback returns a callback that be used in `libimage` to
+// figure out whether a given container is an external one. A container is
+// considered external if it is not present in libpod's database.
+func (r *Runtime) IsExternalContainerCallback(_ context.Context) libimage.IsExternalContainerFunc {
+ // NOTE: pruning external containers is subject to race conditions
+ // (e.g., when a container gets removed). To address this and similar
+ // races, pruning had to happen inside c/storage. Containers has to be
+ // labelled with "podman/libpod" along with callbacks similar to
+ // libimage.
+ return func(idOrName string) (bool, error) {
+ _, err := r.LookupContainer(idOrName)
+ if err == nil {
+ return false, nil
+ }
+ if errors.Is(err, define.ErrNoSuchCtr) {
+ return true, nil
+ }
+ return false, nil
+ }
+}
+
// newBuildEvent creates a new event based on completion of a built image
func (r *Runtime) newImageBuildCompleteEvent(idOrName string) {
e := events.NewEvent(events.Build)
e.Type = events.Image
e.Name = idOrName
if err := r.eventer.Write(e); err != nil {
- logrus.Errorf("unable to write build event: %q", err)
+ logrus.Errorf("Unable to write build event: %q", err)
}
}
diff --git a/libpod/runtime_migrate.go b/libpod/runtime_migrate.go
index 3e63bc19e..087991e6f 100644
--- a/libpod/runtime_migrate.go
+++ b/libpod/runtime_migrate.go
@@ -56,7 +56,7 @@ func (r *Runtime) migrate(ctx context.Context) error {
return err
}
- logrus.Infof("stopping all containers")
+ logrus.Infof("Stopping all containers")
for _, ctr := range runningContainers {
fmt.Printf("stopped %s\n", ctr.ID())
if err := ctr.Stop(); err != nil {
@@ -77,7 +77,7 @@ func (r *Runtime) migrate(ctx context.Context) error {
// Reset pause process location
oldLocation := filepath.Join(ctr.state.RunDir, "conmon.pid")
if ctr.config.ConmonPidFile == oldLocation {
- logrus.Infof("changing conmon PID file for %s", ctr.ID())
+ logrus.Infof("Changing conmon PID file for %s", ctr.ID())
ctr.config.ConmonPidFile = filepath.Join(ctr.config.StaticDir, "conmon.pid")
needsWrite = true
}
diff --git a/libpod/runtime_pod_linux.go b/libpod/runtime_pod_linux.go
index 7571fdfff..5036dd680 100644
--- a/libpod/runtime_pod_linux.go
+++ b/libpod/runtime_pod_linux.go
@@ -66,7 +66,7 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option
defer func() {
if deferredErr != nil {
if err := pod.lock.Free(); err != nil {
- logrus.Errorf("Error freeing pod lock after failed creation: %v", err)
+ logrus.Errorf("Freeing pod lock after failed creation: %v", err)
}
}
}()
@@ -224,7 +224,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon")
conmonCgroup, err := cgroups.Load(conmonCgroupPath)
if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless {
- logrus.Errorf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
+ logrus.Errorf("Retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
// New resource limits
@@ -259,7 +259,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if removalErr == nil {
removalErr = err
} else {
- logrus.Errorf("Error removing container %s from pod %s: %v", ctr.ID(), p.ID(), err)
+ logrus.Errorf("Removing container %s from pod %s: %v", ctr.ID(), p.ID(), err)
}
}
}
@@ -275,7 +275,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
for volName := range ctrNamedVolumes {
volume, err := r.state.Volume(volName)
if err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
- logrus.Errorf("Error retrieving volume %s: %v", volName, err)
+ logrus.Errorf("Retrieving volume %s: %v", volName, err)
continue
}
if !volume.Anonymous() {
@@ -285,7 +285,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if errors.Cause(err) == define.ErrNoSuchVolume || errors.Cause(err) == define.ErrVolumeRemoved {
continue
}
- logrus.Errorf("Error removing volume %s: %v", volName, err)
+ logrus.Errorf("Removing volume %s: %v", volName, err)
}
}
@@ -299,7 +299,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if removalErr == nil {
removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
} else {
- logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
+ logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
}
case config.CgroupfsCgroupsManager:
@@ -321,7 +321,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if removalErr == nil {
removalErr = errors.Wrapf(err, "error removing pod %s conmon cgroup", p.ID())
} else {
- logrus.Errorf("Error deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
+ logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err)
}
}
}
@@ -330,7 +330,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if removalErr == nil {
removalErr = errors.Wrapf(err, "error retrieving pod %s cgroup", p.ID())
} else {
- logrus.Errorf("Error retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
+ logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
}
if err == nil {
@@ -338,7 +338,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if removalErr == nil {
removalErr = errors.Wrapf(err, "error removing pod %s cgroup", p.ID())
} else {
- logrus.Errorf("Error deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
+ logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err)
}
}
}
@@ -371,7 +371,7 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool)
if removalErr == nil {
removalErr = errors.Wrapf(err, "error freeing pod %s lock", p.ID())
} else {
- logrus.Errorf("Error freeing pod %s lock: %v", p.ID(), err)
+ logrus.Errorf("Freeing pod %s lock: %v", p.ID(), err)
}
}
diff --git a/libpod/runtime_volume_linux.go b/libpod/runtime_volume_linux.go
index d1ea7d4fd..def6ca411 100644
--- a/libpod/runtime_volume_linux.go
+++ b/libpod/runtime_volume_linux.go
@@ -140,7 +140,7 @@ func (r *Runtime) newVolume(ctx context.Context, options ...VolumeCreateOption)
defer func() {
if deferredErr != nil {
if err := volume.lock.Free(); err != nil {
- logrus.Errorf("Error freeing volume lock after failed creation: %v", err)
+ logrus.Errorf("Freeing volume lock after failed creation: %v", err)
}
}
}()
@@ -246,7 +246,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
// If force is set, evict the volume, even if errors
// occur. Otherwise we'll never be able to get rid of
// them.
- logrus.Errorf("Error unmounting volume %s: %v", v.Name(), err)
+ logrus.Errorf("Unmounting volume %s: %v", v.Name(), err)
} else {
return errors.Wrapf(err, "error unmounting volume %s", v.Name())
}
@@ -290,7 +290,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
// Remove the volume from the state
if err := r.state.RemoveVolume(v); err != nil {
if removalErr != nil {
- logrus.Errorf("Error removing volume %s from plugin %s: %v", v.Name(), v.Driver(), removalErr)
+ logrus.Errorf("Removing volume %s from plugin %s: %v", v.Name(), v.Driver(), removalErr)
}
return errors.Wrapf(err, "error removing volume %s", v.Name())
}
@@ -300,7 +300,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
if removalErr == nil {
removalErr = errors.Wrapf(err, "error freeing lock for volume %s", v.Name())
} else {
- logrus.Errorf("Error freeing lock for volume %q: %v", v.Name(), err)
+ logrus.Errorf("Freeing lock for volume %q: %v", v.Name(), err)
}
}
@@ -310,7 +310,7 @@ func (r *Runtime) removeVolume(ctx context.Context, v *Volume, force bool) error
if removalErr == nil {
removalErr = errors.Wrapf(err, "error cleaning up volume storage for %q", v.Name())
} else {
- logrus.Errorf("Error cleaning up volume storage for volume %q: %v", v.Name(), err)
+ logrus.Errorf("Cleaning up volume storage for volume %q: %v", v.Name(), err)
}
}
diff --git a/libpod/shutdown/handler.go b/libpod/shutdown/handler.go
index 1e8a9ec3b..cca74c3c4 100644
--- a/libpod/shutdown/handler.go
+++ b/libpod/shutdown/handler.go
@@ -61,7 +61,7 @@ func Start() error {
}
logrus.Infof("Invoking shutdown handler %s", name)
if err := handler(sig); err != nil {
- logrus.Errorf("Error running shutdown handler %s: %v", name, err)
+ logrus.Errorf("Running shutdown handler %s: %v", name, err)
}
}
handlerLock.Unlock()
diff --git a/libpod/storage.go b/libpod/storage.go
index 4aa42dc8e..ad78fe191 100644
--- a/libpod/storage.go
+++ b/libpod/storage.go
@@ -118,22 +118,22 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte
container, err := r.store.CreateContainer(containerID, names, imageID, "", string(mdata), &options)
if err != nil {
- logrus.Debugf("failed to create container %s(%s): %v", metadata.ContainerName, containerID, err)
+ logrus.Debugf("Failed to create container %s(%s): %v", metadata.ContainerName, containerID, err)
return ContainerInfo{}, err
}
- logrus.Debugf("created container %q", container.ID)
+ logrus.Debugf("Created container %q", container.ID)
// If anything fails after this point, we need to delete the incomplete
// container before returning.
defer func() {
if retErr != nil {
if err := r.store.DeleteContainer(container.ID); err != nil {
- logrus.Infof("%v deleting partially-created container %q", err, container.ID)
+ logrus.Infof("Error deleting partially-created container %q: %v", container.ID, err)
return
}
- logrus.Infof("deleted partially-created container %q", container.ID)
+ logrus.Infof("Deleted partially-created container %q", container.ID)
}
}()
@@ -155,13 +155,13 @@ func (r *storageService) CreateContainerStorage(ctx context.Context, systemConte
if err != nil {
return ContainerInfo{}, err
}
- logrus.Debugf("container %q has work directory %q", container.ID, containerDir)
+ logrus.Debugf("Container %q has work directory %q", container.ID, containerDir)
containerRunDir, err := r.store.ContainerRunDirectory(container.ID)
if err != nil {
return ContainerInfo{}, err
}
- logrus.Debugf("container %q has run directory %q", container.ID, containerRunDir)
+ logrus.Debugf("Container %q has run directory %q", container.ID, containerRunDir)
return ContainerInfo{
UIDMap: options.UIDMap,
@@ -184,7 +184,7 @@ func (r *storageService) DeleteContainer(idOrName string) error {
}
err = r.store.DeleteContainer(container.ID)
if err != nil {
- logrus.Debugf("failed to delete container %q: %v", container.ID, err)
+ logrus.Debugf("Failed to delete container %q: %v", container.ID, err)
return err
}
return nil
@@ -193,7 +193,7 @@ func (r *storageService) DeleteContainer(idOrName string) error {
func (r *storageService) SetContainerMetadata(idOrName string, metadata RuntimeContainerMetadata) error {
mdata, err := json.Marshal(&metadata)
if err != nil {
- logrus.Debugf("failed to encode metadata for %q: %v", idOrName, err)
+ logrus.Debugf("Failed to encode metadata for %q: %v", idOrName, err)
return err
}
return r.store.SetMetadata(idOrName, string(mdata))
@@ -225,10 +225,10 @@ func (r *storageService) MountContainerImage(idOrName string) (string, error) {
}
mountPoint, err := r.store.Mount(container.ID, metadata.MountLabel)
if err != nil {
- logrus.Debugf("failed to mount container %q: %v", container.ID, err)
+ logrus.Debugf("Failed to mount container %q: %v", container.ID, err)
return "", err
}
- logrus.Debugf("mounted container %q at %q", container.ID, mountPoint)
+ logrus.Debugf("Mounted container %q at %q", container.ID, mountPoint)
return mountPoint, nil
}
@@ -252,10 +252,10 @@ func (r *storageService) UnmountContainerImage(idOrName string, force bool) (boo
}
mounted, err := r.store.Unmount(container.ID, force)
if err != nil {
- logrus.Debugf("failed to unmount container %q: %v", container.ID, err)
+ logrus.Debugf("Failed to unmount container %q: %v", container.ID, err)
return false, err
}
- logrus.Debugf("unmounted container %q", container.ID)
+ logrus.Debugf("Unmounted container %q", container.ID)
return mounted, nil
}
diff --git a/libpod/util.go b/libpod/util.go
index d3f7da91e..8f8303ff2 100644
--- a/libpod/util.go
+++ b/libpod/util.go
@@ -240,14 +240,14 @@ func hijackWriteError(toWrite error, cid string, terminal bool, httpBuf *bufio.R
// We need a header.
header := makeHTTPAttachHeader(2, uint32(len(errString)))
if _, err := httpBuf.Write(header); err != nil {
- logrus.Errorf("Error writing header for container %s attach connection error: %v", cid, err)
+ logrus.Errorf("Writing header for container %s attach connection error: %v", cid, err)
}
}
if _, err := httpBuf.Write(errString); err != nil {
- logrus.Errorf("Error writing error to container %s HTTP attach connection: %v", cid, err)
+ logrus.Errorf("Writing error to container %s HTTP attach connection: %v", cid, err)
}
if err := httpBuf.Flush(); err != nil {
- logrus.Errorf("Error flushing HTTP buffer for container %s HTTP attach connection: %v", cid, err)
+ logrus.Errorf("Flushing HTTP buffer for container %s HTTP attach connection: %v", cid, err)
}
}
}
@@ -259,7 +259,7 @@ func hijackWriteErrorAndClose(toWrite error, cid string, terminal bool, httpCon
hijackWriteError(toWrite, cid, terminal, httpBuf)
if err := httpCon.Close(); err != nil {
- logrus.Errorf("Error closing container %s HTTP attach connection: %v", cid, err)
+ logrus.Errorf("Closing container %s HTTP attach connection: %v", cid, err)
}
}
diff --git a/libpod/util_linux.go b/libpod/util_linux.go
index 32b058d27..e2ea97185 100644
--- a/libpod/util_linux.go
+++ b/libpod/util_linux.go
@@ -119,7 +119,7 @@ func LabelVolumePath(path string) error {
func Unmount(mount string) {
if err := unix.Unmount(mount, unix.MNT_DETACH); err != nil {
if err != syscall.EINVAL {
- logrus.Warnf("failed to unmount %s : %v", mount, err)
+ logrus.Warnf("Failed to unmount %s : %v", mount, err)
} else {
logrus.Debugf("failed to unmount %s : %v", mount, err)
}